xref: /haiku/src/system/kernel/team.cpp (revision 46f2d5ea887ddb5df5e409113e840ebbb8790de0)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*!	Team functions */
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/wait.h>
15 
16 #include <OS.h>
17 
18 #include <AutoDeleter.h>
19 
20 #include <elf.h>
21 #include <file_cache.h>
22 #include <heap.h>
23 #include <int.h>
24 #include <kernel.h>
25 #include <kimage.h>
26 #include <kscheduler.h>
27 #include <ksignal.h>
28 #include <port.h>
29 #include <realtime_sem.h>
30 #include <sem.h>
31 #include <syscall_process_info.h>
32 #include <syscall_restart.h>
33 #include <syscalls.h>
34 #include <team.h>
35 #include <tls.h>
36 #include <tracing.h>
37 #include <user_runtime.h>
38 #include <usergroup.h>
39 #include <vfs.h>
40 #include <vm.h>
41 #include <vm_address_space.h>
42 #include <util/AutoLock.h>
43 #include <util/khash.h>
44 
45 //#define TRACE_TEAM
46 #ifdef TRACE_TEAM
47 #	define TRACE(x) dprintf x
48 #else
49 #	define TRACE(x) ;
50 #endif
51 
52 
53 struct team_key {
54 	team_id id;
55 };
56 
57 struct team_arg {
58 	uint32	arg_count;
59 	char	**args;
60 	uint32	env_count;
61 	char	**env;
62 	port_id	error_port;
63 	uint32	error_token;
64 };
65 
66 struct fork_arg {
67 	area_id		user_stack_area;
68 	addr_t		user_stack_base;
69 	size_t		user_stack_size;
70 	addr_t		user_local_storage;
71 	sigset_t	sig_block_mask;
72 
73 	struct arch_fork_arg arch_info;
74 };
75 
76 
77 static hash_table *sTeamHash = NULL;
78 static hash_table *sGroupHash = NULL;
79 static struct team *sKernelTeam = NULL;
80 
81 // some arbitrary chosen limits - should probably depend on the available
82 // memory (the limit is not yet enforced)
83 static int32 sMaxTeams = 2048;
84 static int32 sUsedTeams = 1;
85 
86 spinlock team_spinlock = 0;
87 
88 
89 // #pragma mark - Tracing
90 
91 
92 #if TEAM_TRACING
93 namespace TeamTracing {
94 
95 class TeamForked : public AbstractTraceEntry {
96 	public:
97 		TeamForked(thread_id forkedThread)
98 			:
99 			fForkedThread(forkedThread)
100 		{
101 			Initialized();
102 		}
103 
104 		virtual void AddDump(TraceOutput& out)
105 		{
106 			out.Print("team forked, new thread %ld", fForkedThread);
107 		}
108 
109 	private:
110 		thread_id			fForkedThread;
111 };
112 
113 
114 class ExecTeam : public AbstractTraceEntry {
115 	public:
116 		ExecTeam(const char* path, int32 argCount, const char* const* args,
117 				int32 envCount, const char* const* env)
118 			:
119 			fArgCount(argCount),
120 			fArgs(NULL)
121 		{
122 			fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
123 				false);
124 
125 			// determine the buffer size we need for the args
126 			size_t argBufferSize = 0;
127 			for (int32 i = 0; i < argCount; i++)
128 				argBufferSize += strlen(args[i]) + 1;
129 
130 			// allocate a buffer
131 			fArgs = (char*)alloc_tracing_buffer(argBufferSize);
132 			if (fArgs) {
133 				char* buffer = fArgs;
134 				for (int32 i = 0; i < argCount; i++) {
135 					size_t argSize = strlen(args[i]) + 1;
136 					memcpy(buffer, args[i], argSize);
137 					buffer += argSize;
138 				}
139 			}
140 
141 			// ignore env for the time being
142 			(void)envCount;
143 			(void)env;
144 
145 			Initialized();
146 		}
147 
148 		virtual void AddDump(TraceOutput& out)
149 		{
150 			out.Print("team exec, \"%p\", args:", fPath);
151 
152 			char* args = fArgs;
153 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
154 				out.Print(" \"%s\"", args);
155 				args += strlen(args) + 1;
156 			}
157 		}
158 
159 	private:
160 		char*	fPath;
161 		int32	fArgCount;
162 		char*	fArgs;
163 };
164 
165 
166 static const char*
167 job_control_state_name(job_control_state state)
168 {
169 	switch (state) {
170 		case JOB_CONTROL_STATE_NONE:
171 			return "none";
172 		case JOB_CONTROL_STATE_STOPPED:
173 			return "stopped";
174 		case JOB_CONTROL_STATE_CONTINUED:
175 			return "continued";
176 		case JOB_CONTROL_STATE_DEAD:
177 			return "dead";
178 		default:
179 			return "invalid";
180 	}
181 }
182 
183 
184 class SetJobControlState : public AbstractTraceEntry {
185 	public:
186 		SetJobControlState(team_id team, job_control_state newState, int signal)
187 			:
188 			fTeam(team),
189 			fNewState(newState),
190 			fSignal(signal)
191 		{
192 			Initialized();
193 		}
194 
195 		virtual void AddDump(TraceOutput& out)
196 		{
197 			out.Print("team set job control state, team %ld, "
198 				"new state: %s, signal: %d",
199 				fTeam, job_control_state_name(fNewState), fSignal);
200 		}
201 
202 	private:
203 		team_id				fTeam;
204 		job_control_state	fNewState;
205 		int					fSignal;
206 };
207 
208 
209 class WaitForChild : public AbstractTraceEntry {
210 	public:
211 		WaitForChild(pid_t child, uint32 flags)
212 			:
213 			fChild(child),
214 			fFlags(flags)
215 		{
216 			Initialized();
217 		}
218 
219 		virtual void AddDump(TraceOutput& out)
220 		{
221 			out.Print("team wait for child, child: %ld, "
222 				"flags: 0x%lx", fChild, fFlags);
223 		}
224 
225 	private:
226 		pid_t	fChild;
227 		uint32	fFlags;
228 };
229 
230 
231 class WaitForChildDone : public AbstractTraceEntry {
232 	public:
233 		WaitForChildDone(const job_control_entry& entry)
234 			:
235 			fState(entry.state),
236 			fTeam(entry.thread),
237 			fStatus(entry.status),
238 			fReason(entry.reason),
239 			fSignal(entry.signal)
240 		{
241 			Initialized();
242 		}
243 
244 		WaitForChildDone(status_t error)
245 			:
246 			fTeam(error)
247 		{
248 			Initialized();
249 		}
250 
251 		virtual void AddDump(TraceOutput& out)
252 		{
253 			if (fTeam >= 0) {
254 				out.Print("team wait for child done, team: %ld, "
255 					"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
256 					fTeam, job_control_state_name(fState), fStatus, fReason,
257 					fSignal);
258 			} else {
259 				out.Print("team wait for child failed, error: "
260 					"0x%lx, ", fTeam);
261 			}
262 		}
263 
264 	private:
265 		job_control_state	fState;
266 		team_id				fTeam;
267 		status_t			fStatus;
268 		uint16				fReason;
269 		uint16				fSignal;
270 };
271 
272 }	// namespace TeamTracing
273 
274 #	define T(x) new(std::nothrow) TeamTracing::x;
275 #else
276 #	define T(x) ;
277 #endif
278 
279 
280 
281 //	#pragma mark - Private functions
282 
283 
284 static void
285 _dump_team_info(struct team *team)
286 {
287 	kprintf("TEAM: %p\n", team);
288 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
289 	kprintf("name:        '%s'\n", team->name);
290 	kprintf("args:        '%s'\n", team->args);
291 	kprintf("next:        %p\n", team->next);
292 	kprintf("parent:      %p", team->parent);
293 	if (team->parent != NULL) {
294 		kprintf(" (id = %ld)\n", team->parent->id);
295 	} else
296 		kprintf("\n");
297 
298 	kprintf("children:    %p\n", team->children);
299 	kprintf("num_threads: %d\n", team->num_threads);
300 	kprintf("state:       %d\n", team->state);
301 	kprintf("flags:       0x%lx\n", team->flags);
302 	kprintf("io_context:  %p\n", team->io_context);
303 	if (team->address_space)
304 		kprintf("address_space: %p\n", team->address_space);
305 	kprintf("main_thread: %p\n", team->main_thread);
306 	kprintf("thread_list: %p\n", team->thread_list);
307 	kprintf("group_id:    %ld\n", team->group_id);
308 	kprintf("session_id:  %ld\n", team->session_id);
309 }
310 
311 
312 static int
313 dump_team_info(int argc, char **argv)
314 {
315 	struct hash_iterator iterator;
316 	struct team *team;
317 	team_id id = -1;
318 	bool found = false;
319 
320 	if (argc < 2) {
321 		struct thread* thread = thread_get_current_thread();
322 		if (thread != NULL && thread->team != NULL)
323 			_dump_team_info(thread->team);
324 		else
325 			kprintf("No current team!\n");
326 		return 0;
327 	}
328 
329 	id = strtoul(argv[1], NULL, 0);
330 	if (IS_KERNEL_ADDRESS(id)) {
331 		// semi-hack
332 		_dump_team_info((struct team *)id);
333 		return 0;
334 	}
335 
336 	// walk through the thread list, trying to match name or id
337 	hash_open(sTeamHash, &iterator);
338 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
339 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
340 			_dump_team_info(team);
341 			found = true;
342 			break;
343 		}
344 	}
345 	hash_close(sTeamHash, &iterator, false);
346 
347 	if (!found)
348 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
349 	return 0;
350 }
351 
352 
353 static int
354 dump_teams(int argc, char **argv)
355 {
356 	struct hash_iterator iterator;
357 	struct team *team;
358 
359 	kprintf("team           id  parent      name\n");
360 	hash_open(sTeamHash, &iterator);
361 
362 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
363 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
364 	}
365 
366 	hash_close(sTeamHash, &iterator, false);
367 	return 0;
368 }
369 
370 
371 /*!	Frees an array of strings in kernel space.
372 
373 	\param strings strings array
374 	\param count number of strings in array
375 */
376 static void
377 free_strings_array(char **strings, int32 count)
378 {
379 	int32 i;
380 
381 	if (strings == NULL)
382 		return;
383 
384 	for (i = 0; i < count; i++)
385 		free(strings[i]);
386 
387     free(strings);
388 }
389 
390 
391 /*!	Copy an array of strings in kernel space
392 
393 	\param strings strings array to be copied
394 	\param count number of strings in array
395 	\param kstrings	pointer to the kernel copy
396 	\return \c B_OK on success, or an appropriate error code on
397 		failure.
398 */
399 static status_t
400 kernel_copy_strings_array(char * const *in, int32 count, char ***_strings)
401 {
402 	status_t status;
403 	char **strings;
404 	int32 i = 0;
405 
406 	strings = (char **)malloc((count + 1) * sizeof(char *));
407 	if (strings == NULL)
408 		return B_NO_MEMORY;
409 
410 	for (; i < count; i++) {
411 		strings[i] = strdup(in[i]);
412 		if (strings[i] == NULL) {
413 			status = B_NO_MEMORY;
414 			goto error;
415 		}
416 	}
417 
418 	strings[count] = NULL;
419 	*_strings = strings;
420 
421 	return B_OK;
422 
423 error:
424 	free_strings_array(strings, i);
425 	return status;
426 }
427 
428 
429 /*!	Copy an array of strings from user space to kernel space
430 
431 	\param strings userspace strings array
432 	\param count number of strings in array
433 	\param kstrings	pointer to the kernel copy
434 	\return \c B_OK on success, or an appropriate error code on
435 		failure.
436 */
437 static status_t
438 user_copy_strings_array(char * const *userStrings, int32 count, char ***_strings)
439 {
440 	char *buffer;
441 	char **strings;
442 	status_t err;
443 	int32 i = 0;
444 
445 	if (!IS_USER_ADDRESS(userStrings))
446 		return B_BAD_ADDRESS;
447 
448 	// buffer for safely accessing the user string
449 	// TODO: maybe have a user_strdup() instead?
450 	buffer = (char *)malloc(4 * B_PAGE_SIZE);
451 	if (buffer == NULL)
452 		return B_NO_MEMORY;
453 
454 	strings = (char **)malloc((count + 1) * sizeof(char *));
455 	if (strings == NULL) {
456 		err = B_NO_MEMORY;
457 		goto error;
458 	}
459 
460 	if ((err = user_memcpy(strings, userStrings, count * sizeof(char *))) < B_OK)
461 		goto error;
462 
463 	// scan all strings and copy to kernel space
464 
465 	for (; i < count; i++) {
466 		err = user_strlcpy(buffer, strings[i], 4 * B_PAGE_SIZE);
467 		if (err < B_OK)
468 			goto error;
469 
470 		strings[i] = strdup(buffer);
471 		if (strings[i] == NULL) {
472 			err = B_NO_MEMORY;
473 			goto error;
474 		}
475 	}
476 
477 	strings[count] = NULL;
478 	*_strings = strings;
479 	free(buffer);
480 
481 	return B_OK;
482 
483 error:
484 	free_strings_array(strings, i);
485 	free(buffer);
486 
487 	TRACE(("user_copy_strings_array failed %ld\n", err));
488 	return err;
489 }
490 
491 
492 static status_t
493 copy_strings_array(char * const *strings, int32 count, char ***_strings,
494 	bool kernel)
495 {
496 	if (kernel)
497 		return kernel_copy_strings_array(strings, count, _strings);
498 
499 	return user_copy_strings_array(strings, count, _strings);
500 }
501 
502 
503 static int
504 team_struct_compare(void *_p, const void *_key)
505 {
506 	struct team *p = (struct team*)_p;
507 	const struct team_key *key = (const struct team_key*)_key;
508 
509 	if (p->id == key->id)
510 		return 0;
511 
512 	return 1;
513 }
514 
515 
516 static uint32
517 team_struct_hash(void *_p, const void *_key, uint32 range)
518 {
519 	struct team *p = (struct team*)_p;
520 	const struct team_key *key = (const struct team_key*)_key;
521 
522 	if (p != NULL)
523 		return p->id % range;
524 
525 	return (uint32)key->id % range;
526 }
527 
528 
529 static int
530 process_group_compare(void *_group, const void *_key)
531 {
532 	struct process_group *group = (struct process_group*)_group;
533 	const struct team_key *key = (const struct team_key*)_key;
534 
535 	if (group->id == key->id)
536 		return 0;
537 
538 	return 1;
539 }
540 
541 
542 static uint32
543 process_group_hash(void *_group, const void *_key, uint32 range)
544 {
545 	struct process_group *group = (struct process_group*)_group;
546 	const struct team_key *key = (const struct team_key*)_key;
547 
548 	if (group != NULL)
549 		return group->id % range;
550 
551 	return (uint32)key->id % range;
552 }
553 
554 
555 static void
556 insert_team_into_parent(struct team *parent, struct team *team)
557 {
558 	ASSERT(parent != NULL);
559 
560 	team->siblings_next = parent->children;
561 	parent->children = team;
562 	team->parent = parent;
563 }
564 
565 
566 /*!	Note: must have team lock held */
567 static void
568 remove_team_from_parent(struct team *parent, struct team *team)
569 {
570 	struct team *child, *last = NULL;
571 
572 	for (child = parent->children; child != NULL; child = child->siblings_next) {
573 		if (child == team) {
574 			if (last == NULL)
575 				parent->children = child->siblings_next;
576 			else
577 				last->siblings_next = child->siblings_next;
578 
579 			team->parent = NULL;
580 			break;
581 		}
582 		last = child;
583 	}
584 }
585 
586 
587 /*!	Reparent each of our children
588 	Note: must have team lock held
589 */
590 static void
591 reparent_children(struct team *team)
592 {
593 	struct team *child;
594 
595 	while ((child = team->children) != NULL) {
596 		// remove the child from the current proc and add to the parent
597 		remove_team_from_parent(team, child);
598 		insert_team_into_parent(sKernelTeam, child);
599 	}
600 
601 	// move job control entries too
602 	sKernelTeam->stopped_children->entries.MoveFrom(
603 		&team->stopped_children->entries);
604 	sKernelTeam->continued_children->entries.MoveFrom(
605 		&team->continued_children->entries);
606 
607 	// Note, we don't move the dead children entries. Those will be deleted
608 	// when the team structure is deleted.
609 }
610 
611 
612 static bool
613 is_session_leader(struct team *team)
614 {
615 	return team->session_id == team->id;
616 }
617 
618 
619 static bool
620 is_process_group_leader(struct team *team)
621 {
622 	return team->group_id == team->id;
623 }
624 
625 
626 static void
627 deferred_delete_process_group(struct process_group *group)
628 {
629 	if (group == NULL)
630 		return;
631 
632 	// remove_group_from_session() keeps this pointer around
633 	// only if the session can be freed as well
634 	if (group->session) {
635 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
636 			group->session->id));
637 		deferred_free(group->session);
638 	}
639 
640 	deferred_free(group);
641 }
642 
643 
644 /*!	Removes a group from a session, and puts the session object
645 	back into the session cache, if it's not used anymore.
646 	You must hold the team lock when calling this function.
647 */
648 static void
649 remove_group_from_session(struct process_group *group)
650 {
651 	struct process_session *session = group->session;
652 
653 	// the group must be in any session to let this function have any effect
654 	if (session == NULL)
655 		return;
656 
657 	hash_remove(sGroupHash, group);
658 
659 	// we cannot free the resource here, so we're keeping the group link
660 	// around - this way it'll be freed by free_process_group()
661 	if (--session->group_count > 0)
662 		group->session = NULL;
663 }
664 
665 
666 /*!	Team lock must be held.
667 */
668 static void
669 acquire_process_group_ref(pid_t groupID)
670 {
671 	process_group* group = team_get_process_group_locked(NULL, groupID);
672 	if (group == NULL) {
673 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
674 		return;
675 	}
676 
677 	group->refs++;
678 }
679 
680 
681 /*!	Team lock must be held.
682 */
683 static void
684 release_process_group_ref(pid_t groupID)
685 {
686 	process_group* group = team_get_process_group_locked(NULL, groupID);
687 	if (group == NULL) {
688 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
689 		return;
690 	}
691 
692 	if (group->refs <= 0) {
693 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
694 		return;
695 	}
696 
697 	if (--group->refs > 0)
698 		return;
699 
700 	// group is no longer used
701 
702 	remove_group_from_session(group);
703 	deferred_delete_process_group(group);
704 }
705 
706 
707 /*!	You must hold the team lock when calling this function. */
708 static void
709 insert_group_into_session(struct process_session *session, struct process_group *group)
710 {
711 	if (group == NULL)
712 		return;
713 
714 	group->session = session;
715 	hash_insert(sGroupHash, group);
716 	session->group_count++;
717 }
718 
719 
720 /*!	You must hold the team lock when calling this function. */
721 static void
722 insert_team_into_group(struct process_group *group, struct team *team)
723 {
724 	team->group = group;
725 	team->group_id = group->id;
726 	team->session_id = group->session->id;
727 
728 	team->group_next = group->teams;
729 	group->teams = team;
730 	acquire_process_group_ref(group->id);
731 }
732 
733 
734 /*!	Removes the team from the group.
735 
736 	\param team the team that'll be removed from it's group
737 */
738 static void
739 remove_team_from_group(struct team *team)
740 {
741 	struct process_group *group = team->group;
742 	struct team *current, *last = NULL;
743 
744 	// the team must be in any team to let this function have any effect
745 	if  (group == NULL)
746 		return;
747 
748 	for (current = group->teams; current != NULL; current = current->group_next) {
749 		if (current == team) {
750 			if (last == NULL)
751 				group->teams = current->group_next;
752 			else
753 				last->group_next = current->group_next;
754 
755 			team->group = NULL;
756 			break;
757 		}
758 		last = current;
759 	}
760 
761 	team->group = NULL;
762 	team->group_next = NULL;
763 
764 	release_process_group_ref(group->id);
765 }
766 
767 
768 static struct process_group *
769 create_process_group(pid_t id)
770 {
771 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
772 	if (group == NULL)
773 		return NULL;
774 
775 	group->id = id;
776 	group->refs = 0;
777 	group->session = NULL;
778 	group->teams = NULL;
779 	group->orphaned = true;
780 	return group;
781 }
782 
783 
784 static struct process_session *
785 create_process_session(pid_t id)
786 {
787 	struct process_session *session
788 		= (struct process_session *)malloc(sizeof(struct process_session));
789 	if (session == NULL)
790 		return NULL;
791 
792 	session->id = id;
793 	session->group_count = 0;
794 	session->controlling_tty = -1;
795 	session->foreground_group = -1;
796 
797 	return session;
798 }
799 
800 
801 static void
802 set_team_name(struct team* team, const char* name)
803 {
804 	if (const char* lastSlash = strrchr(name, '/'))
805 		name = lastSlash + 1;
806 
807 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
808 }
809 
810 
811 static struct team *
812 create_team_struct(const char *name, bool kernel)
813 {
814 	struct team *team = (struct team *)malloc(sizeof(struct team));
815 	if (team == NULL)
816 		return NULL;
817 	MemoryDeleter teamDeleter(team);
818 
819 	team->next = team->siblings_next = team->children = team->parent = NULL;
820 	team->id = allocate_thread_id();
821 	set_team_name(team, name);
822 	team->args[0] = '\0';
823 	team->num_threads = 0;
824 	team->io_context = NULL;
825 	team->address_space = NULL;
826 	team->realtime_sem_context = NULL;
827 	team->thread_list = NULL;
828 	team->main_thread = NULL;
829 	team->loading_info = NULL;
830 	team->state = TEAM_STATE_BIRTH;
831 	team->flags = 0;
832 	team->death_sem = -1;
833 
834 	team->supplementary_groups = NULL;
835 	team->supplementary_group_count = 0;
836 
837 	team->dead_threads_kernel_time = 0;
838 	team->dead_threads_user_time = 0;
839 
840 	// dead threads
841 	list_init(&team->dead_threads);
842 	team->dead_threads_count = 0;
843 
844 	// dead children
845 	team->dead_children = new(nothrow) team_dead_children;
846 	if (team->dead_children == NULL)
847 		return NULL;
848 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
849 
850 	team->dead_children->count = 0;
851 	team->dead_children->kernel_time = 0;
852 	team->dead_children->user_time = 0;
853 
854 	// stopped children
855 	team->stopped_children = new(nothrow) team_job_control_children;
856 	if (team->stopped_children == NULL)
857 		return NULL;
858 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
859 		team->stopped_children);
860 
861 	// continued children
862 	team->continued_children = new(nothrow) team_job_control_children;
863 	if (team->continued_children == NULL)
864 		return NULL;
865 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
866 		team->continued_children);
867 
868 	// job control entry
869 	team->job_control_entry = new(nothrow) job_control_entry;
870 	if (team->job_control_entry == NULL)
871 		return NULL;
872 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
873 		team->job_control_entry);
874 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
875 	team->job_control_entry->thread = team->id;
876 	team->job_control_entry->team = team;
877 
878 	list_init(&team->image_list);
879 	list_init(&team->watcher_list);
880 
881 	clear_team_debug_info(&team->debug_info, true);
882 
883 	if (arch_team_init_team_struct(team, kernel) < 0)
884 		return NULL;
885 
886 	// publish dead/stopped/continued children condition vars
887 	team->dead_children->condition_variable.Publish(team->dead_children,
888 		"team children");
889 
890 	// keep all allocated structures
891 	jobControlEntryDeleter.Detach();
892 	continuedChildrenDeleter.Detach();
893 	stoppedChildrenDeleter.Detach();
894 	deadChildrenDeleter.Detach();
895 	teamDeleter.Detach();
896 
897 	return team;
898 }
899 
900 
901 static void
902 delete_team_struct(struct team *team)
903 {
904 	team->dead_children->condition_variable.Unpublish();
905 
906 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
907 			&team->dead_threads)) {
908 		free(threadDeathEntry);
909 	}
910 
911 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
912 		delete entry;
913 
914 	malloc_referenced_release(team->supplementary_groups);
915 
916 	delete team->job_control_entry;
917 		// usually already NULL and transferred to the parent
918 	delete team->continued_children;
919 	delete team->stopped_children;
920 	delete team->dead_children;
921 	free(team);
922 }
923 
924 
925 static uint32
926 get_arguments_data_size(char **args, int32 argc)
927 {
928 	uint32 size = 0;
929 	int32 count;
930 
931 	for (count = 0; count < argc; count++)
932 		size += strlen(args[count]) + 1;
933 
934 	return size + (argc + 1) * sizeof(char *) + sizeof(struct user_space_program_args);
935 }
936 
937 
938 static void
939 free_team_arg(struct team_arg *teamArg)
940 {
941 	free_strings_array(teamArg->args, teamArg->arg_count);
942 	free_strings_array(teamArg->env, teamArg->env_count);
943 
944 	free(teamArg);
945 }
946 
947 
948 static status_t
949 create_team_arg(struct team_arg **_teamArg, int32 argCount, char * const *args,
950 	int32 envCount, char * const *env, port_id port, uint32 token, bool kernel)
951 {
952 	status_t status;
953 	char **argsCopy;
954 	char **envCopy;
955 
956 	struct team_arg *teamArg = (struct team_arg *)malloc(sizeof(struct team_arg));
957 	if (teamArg == NULL)
958 		return B_NO_MEMORY;
959 
960 	// copy the args over
961 
962 	status = copy_strings_array(args, argCount, &argsCopy, kernel);
963 	if (status != B_OK)
964 		return status;
965 
966 	status = copy_strings_array(env, envCount, &envCopy, kernel);
967 	if (status != B_OK) {
968 		free_strings_array(argsCopy, argCount);
969 		return status;
970 	}
971 
972 	teamArg->arg_count = argCount;
973 	teamArg->args = argsCopy;
974 	teamArg->env_count = envCount;
975 	teamArg->env = envCopy;
976 	teamArg->error_port = port;
977 	teamArg->error_token = token;
978 
979 	*_teamArg = teamArg;
980 	return B_OK;
981 }
982 
983 
984 static int32
985 team_create_thread_start(void *args)
986 {
987 	status_t err;
988 	struct thread *t;
989 	struct team *team;
990 	struct team_arg *teamArgs = (struct team_arg*)args;
991 	const char *path;
992 	addr_t entry;
993 	char ustack_name[128];
994 	uint32 sizeLeft;
995 	char **userArgs;
996 	char **userEnv;
997 	char *userDest;
998 	struct user_space_program_args *programArgs;
999 	uint32 argCount, envCount, i;
1000 
1001 	t = thread_get_current_thread();
1002 	team = t->team;
1003 	cache_node_launched(teamArgs->arg_count, teamArgs->args);
1004 
1005 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1006 
1007 	// create an initial primary stack area
1008 
1009 	// Main stack area layout is currently as follows (starting from 0):
1010 	//
1011 	// size							| usage
1012 	// -----------------------------+--------------------------------
1013 	// USER_MAIN_THREAD_STACK_SIZE	| actual stack
1014 	// TLS_SIZE						| TLS data
1015 	// ENV_SIZE						| environment variables
1016 	// arguments size				| arguments passed to the team
1017 
1018 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1019 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1020 
1021 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE + TLS_SIZE + ENV_SIZE +
1022 		get_arguments_data_size(teamArgs->args, teamArgs->arg_count));
1023 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1024 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE;
1025 		// the exact location at the end of the user stack area
1026 
1027 	sprintf(ustack_name, "%s_main_stack", team->name);
1028 	t->user_stack_area = create_area_etc(team, ustack_name, (void **)&t->user_stack_base,
1029 		B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
1030 	if (t->user_stack_area < 0) {
1031 		dprintf("team_create_thread_start: could not create default user stack region\n");
1032 
1033 		free_team_arg(teamArgs);
1034 		return t->user_stack_area;
1035 	}
1036 
1037 	// now that the TLS area is allocated, initialize TLS
1038 	arch_thread_init_tls(t);
1039 
1040 	argCount = teamArgs->arg_count;
1041 	envCount = teamArgs->env_count;
1042 
1043 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1044 		+ t->user_stack_size + TLS_SIZE + ENV_SIZE);
1045 	userArgs = (char **)(programArgs + 1);
1046 	userDest = (char *)(userArgs + argCount + 1);
1047 
1048 	TRACE(("addr: stack base = 0x%lx, userArgs = %p, userDest = %p, sizeLeft = %lu\n",
1049 		t->user_stack_base, userArgs, userDest, sizeLeft));
1050 
1051 	sizeLeft = t->user_stack_base + sizeLeft - (addr_t)userDest;
1052 
1053 	for (i = 0; i < argCount; i++) {
1054 		ssize_t length = user_strlcpy(userDest, teamArgs->args[i], sizeLeft);
1055 		if (length < B_OK) {
1056 			argCount = 0;
1057 			break;
1058 		}
1059 
1060 		userArgs[i] = userDest;
1061 		userDest += ++length;
1062 		sizeLeft -= length;
1063 	}
1064 	userArgs[argCount] = NULL;
1065 
1066 	userEnv = (char **)(t->user_stack_base + t->user_stack_size + TLS_SIZE);
1067 	sizeLeft = ENV_SIZE;
1068 	userDest = (char *)userEnv + ENV_SIZE - 1;
1069 		// the environment variables are copied from back to front
1070 
1071 	TRACE(("team_create_thread_start: envc: %ld, env: %p\n",
1072 		teamArgs->env_count, (void *)teamArgs->env));
1073 
1074 	for (i = 0; i < envCount; i++) {
1075 		ssize_t length = strlen(teamArgs->env[i]) + 1;
1076 		userDest -= length;
1077 		if (userDest < (char *)&userEnv[envCount]) {
1078 			envCount = i;
1079 			break;
1080 		}
1081 
1082 		userEnv[i] = userDest;
1083 
1084 		if (user_memcpy(userDest, teamArgs->env[i], length) < B_OK) {
1085 			envCount = 0;
1086 			break;
1087 		}
1088 
1089 		sizeLeft -= length;
1090 	}
1091 	userEnv[envCount] = NULL;
1092 
1093 	path = teamArgs->args[0];
1094 	if (user_memcpy(programArgs->program_path, path,
1095 				sizeof(programArgs->program_path)) < B_OK
1096 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1097 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1098 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1099 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1100 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1101 				sizeof(port_id)) < B_OK
1102 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1103 				sizeof(uint32)) < B_OK) {
1104 		// the team deletion process will clean this mess
1105 		return B_BAD_ADDRESS;
1106 	}
1107 
1108 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1109 
1110 	// add args to info member
1111 	team->args[0] = 0;
1112 	strlcpy(team->args, path, sizeof(team->args));
1113 	for (i = 1; i < argCount; i++) {
1114 		strlcat(team->args, " ", sizeof(team->args));
1115 		strlcat(team->args, teamArgs->args[i], sizeof(team->args));
1116 	}
1117 
1118 	free_team_arg(teamArgs);
1119 		// the arguments are already on the user stack, we no longer need them in this form
1120 
1121 	// ToDo: don't use fixed paths!
1122 	err = elf_load_user_image("/boot/beos/system/runtime_loader", team, 0, &entry);
1123 	if (err < B_OK) {
1124 		// Luckily, we don't have to clean up the mess we created - that's
1125 		// done for us by the normal team deletion process
1126 		TRACE(("team_create_thread_start: error when elf_load_user_image() %s\n", strerror(err)));
1127 		return err;
1128 	}
1129 
1130 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1131 
1132 	team->state = TEAM_STATE_NORMAL;
1133 
1134 	// jump to the entry point in user space
1135 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1136 		// only returns in case of error
1137 }
1138 
1139 
1140 /*!	The BeOS kernel exports a function with this name, but most probably with
1141 	different parameters; we should not make it public.
1142 */
1143 static thread_id
1144 load_image_etc(int32 argCount, char * const *args, int32 envCount,
1145 	char * const *env, int32 priority, uint32 flags,
1146 	port_id errorPort, uint32 errorToken, bool kernel)
1147 {
1148 	struct team *team, *parent;
1149 	const char *threadName;
1150 	thread_id thread;
1151 	status_t status;
1152 	cpu_status state;
1153 	struct team_arg *teamArgs;
1154 	struct team_loading_info loadingInfo;
1155 
1156 	if (args == NULL || argCount == 0)
1157 		return B_BAD_VALUE;
1158 
1159 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1160 		args[0], args, argCount));
1161 
1162 	team = create_team_struct(args[0], false);
1163 	if (team == NULL)
1164 		return B_NO_MEMORY;
1165 
1166 	parent = thread_get_current_thread()->team;
1167 
1168 	if (flags & B_WAIT_TILL_LOADED) {
1169 		loadingInfo.thread = thread_get_current_thread();
1170 		loadingInfo.result = B_ERROR;
1171 		loadingInfo.done = false;
1172 		team->loading_info = &loadingInfo;
1173 	}
1174 
1175 	// Inherit the parent's user/group, but also check the executable's
1176 	// set-user/group-id permission
1177 	inherit_parent_user_and_group(team, parent);
1178 	update_set_id_user_and_group(team, args[0]);
1179 
1180 	state = disable_interrupts();
1181 	GRAB_TEAM_LOCK();
1182 
1183 	hash_insert(sTeamHash, team);
1184 	insert_team_into_parent(parent, team);
1185 	insert_team_into_group(parent->group, team);
1186 	sUsedTeams++;
1187 
1188 	RELEASE_TEAM_LOCK();
1189 	restore_interrupts(state);
1190 
1191 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1192 		errorPort, errorToken, kernel);
1193 	if (status != B_OK)
1194 		goto err1;
1195 
1196 	// create a new io_context for this team
1197 	team->io_context = vfs_new_io_context(parent->io_context);
1198 	if (!team->io_context) {
1199 		status = B_NO_MEMORY;
1200 		goto err2;
1201 	}
1202 
1203 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1204 	vfs_exec_io_context(team->io_context);
1205 
1206 	// create an address space for this team
1207 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1208 		&team->address_space);
1209 	if (status < B_OK)
1210 		goto err3;
1211 
1212 	// cut the path from the main thread name
1213 	threadName = strrchr(args[0], '/');
1214 	if (threadName != NULL)
1215 		threadName++;
1216 	else
1217 		threadName = args[0];
1218 
1219 	// Create a kernel thread, but under the context of the new team
1220 	// The new thread will take over ownership of teamArgs
1221 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1222 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1223 	if (thread < 0) {
1224 		status = thread;
1225 		goto err4;
1226 	}
1227 
1228 	// wait for the loader of the new team to finish its work
1229 	if (flags & B_WAIT_TILL_LOADED) {
1230 		struct thread *mainThread;
1231 
1232 		state = disable_interrupts();
1233 		GRAB_THREAD_LOCK();
1234 
1235 		mainThread = thread_get_thread_struct_locked(thread);
1236 		if (mainThread) {
1237 			// resume the team's main thread
1238 			if (mainThread->state == B_THREAD_SUSPENDED)
1239 				scheduler_enqueue_in_run_queue(mainThread);
1240 
1241 			// Now suspend ourselves until loading is finished.
1242 			// We will be woken either by the thread, when it finished or
1243 			// aborted loading, or when the team is going to die (e.g. is
1244 			// killed). In either case the one setting `loadingInfo.done' is
1245 			// responsible for removing the info from the team structure.
1246 			while (!loadingInfo.done) {
1247 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1248 				scheduler_reschedule();
1249 			}
1250 		} else {
1251 			// Impressive! Someone managed to kill the thread in this short
1252 			// time.
1253 		}
1254 
1255 		RELEASE_THREAD_LOCK();
1256 		restore_interrupts(state);
1257 
1258 		if (loadingInfo.result < B_OK)
1259 			return loadingInfo.result;
1260 	}
1261 
1262 	// notify the debugger
1263 	user_debug_team_created(team->id);
1264 
1265 	return thread;
1266 
1267 err4:
1268 	vm_put_address_space(team->address_space);
1269 err3:
1270 	vfs_free_io_context(team->io_context);
1271 err2:
1272 	free_team_arg(teamArgs);
1273 err1:
1274 	// remove the team structure from the team hash table and delete the team structure
1275 	state = disable_interrupts();
1276 	GRAB_TEAM_LOCK();
1277 
1278 	remove_team_from_group(team);
1279 	remove_team_from_parent(parent, team);
1280 	hash_remove(sTeamHash, team);
1281 
1282 	RELEASE_TEAM_LOCK();
1283 	restore_interrupts(state);
1284 
1285 	delete_team_struct(team);
1286 
1287 	return status;
1288 }
1289 
1290 
1291 /*!	Almost shuts down the current team and loads a new image into it.
1292 	If successful, this function does not return and will takeover ownership of
1293 	the arguments provided.
1294 	This function may only be called from user space.
1295 */
1296 static status_t
1297 exec_team(const char *path, int32 argCount, char * const *args,
1298 	int32 envCount, char * const *env)
1299 {
1300 	struct team *team = thread_get_current_thread()->team;
1301 	struct team_arg *teamArgs;
1302 	const char *threadName;
1303 	status_t status = B_OK;
1304 	cpu_status state;
1305 	struct thread *thread;
1306 	thread_id nubThreadID = -1;
1307 
1308 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1309 		args[0], argCount, envCount, team->id));
1310 
1311 	// switching the kernel at run time is probably not a good idea :)
1312 	if (team == team_get_kernel_team())
1313 		return B_NOT_ALLOWED;
1314 
1315 	// we currently need to be single threaded here
1316 	// ToDo: maybe we should just kill all other threads and
1317 	//	make the current thread the team's main thread?
1318 	if (team->main_thread != thread_get_current_thread())
1319 		return B_NOT_ALLOWED;
1320 
1321 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1322 	// We iterate through the thread list to make sure that there's no other
1323 	// thread.
1324 	state = disable_interrupts();
1325 	GRAB_TEAM_LOCK();
1326 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1327 
1328 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1329 		nubThreadID = team->debug_info.nub_thread;
1330 
1331 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1332 
1333 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1334 		if (thread != team->main_thread && thread->id != nubThreadID) {
1335 			status = B_NOT_ALLOWED;
1336 			break;
1337 		}
1338 	}
1339 
1340 	RELEASE_TEAM_LOCK();
1341 	restore_interrupts(state);
1342 
1343 	if (status != B_OK)
1344 		return status;
1345 
1346 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1347 		-1, 0, false);
1348 	if (status != B_OK)
1349 		return status;
1350 
1351 	T(ExecTeam(path, teamArgs->arg_count, teamArgs->args, envCount, env));
1352 		// trace here, so we don't have to deal with the user addresses
1353 
1354 	// replace args[0] with the path argument, just to be on the safe side
1355 	free(teamArgs->args[0]);
1356 	teamArgs->args[0] = strdup(path);
1357 
1358 	// ToDo: remove team resources if there are any left
1359 	// thread_atkernel_exit() might not be called at all
1360 
1361 	thread_reset_for_exec();
1362 
1363 	user_debug_prepare_for_exec();
1364 
1365 	vm_delete_areas(team->address_space);
1366 	delete_owned_ports(team->id);
1367 	sem_delete_owned_sems(team->id);
1368 	remove_images(team);
1369 	vfs_exec_io_context(team->io_context);
1370 	delete_realtime_sem_context(team->realtime_sem_context);
1371 	team->realtime_sem_context = NULL;
1372 
1373 	user_debug_finish_after_exec();
1374 
1375 	// rename the team
1376 
1377 	set_team_name(team, path);
1378 
1379 	// cut the path from the team name and rename the main thread, too
1380 	threadName = strrchr(path, '/');
1381 	if (threadName != NULL)
1382 		threadName++;
1383 	else
1384 		threadName = path;
1385 	rename_thread(thread_get_current_thread_id(), threadName);
1386 
1387 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1388 
1389 	// Update user/group according to the executable's set-user/group-id
1390 	// permission.
1391 	update_set_id_user_and_group(team, path);
1392 
1393 	status = team_create_thread_start(teamArgs);
1394 		// this one usually doesn't return...
1395 
1396 	// sorry, we have to kill us, there is no way out anymore
1397 	// (without any areas left and all that)
1398 	exit_thread(status);
1399 
1400 	// we return a status here since the signal that is sent by the
1401 	// call above is not immediately handled
1402 	return B_ERROR;
1403 }
1404 
1405 
1406 /*! This is the first function to be called from the newly created
1407 	main child thread.
1408 	It will fill in everything what's left to do from fork_arg, and
1409 	return from the parent's fork() syscall to the child.
1410 */
1411 static int32
1412 fork_team_thread_start(void *_args)
1413 {
1414 	struct thread *thread = thread_get_current_thread();
1415 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1416 
1417 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1418 		// we need a local copy of the arch dependent part
1419 
1420 	thread->user_stack_area = forkArgs->user_stack_area;
1421 	thread->user_stack_base = forkArgs->user_stack_base;
1422 	thread->user_stack_size = forkArgs->user_stack_size;
1423 	thread->user_local_storage = forkArgs->user_local_storage;
1424 	thread->sig_block_mask = forkArgs->sig_block_mask;
1425 
1426 	arch_thread_init_tls(thread);
1427 
1428 	free(forkArgs);
1429 
1430 	// set frame of the parent thread to this one, too
1431 
1432 	arch_restore_fork_frame(&archArgs);
1433 		// This one won't return here
1434 
1435 	return 0;
1436 }
1437 
1438 
1439 static thread_id
1440 fork_team(void)
1441 {
1442 	struct thread *parentThread = thread_get_current_thread();
1443 	struct team *parentTeam = parentThread->team, *team;
1444 	struct fork_arg *forkArgs;
1445 	struct area_info info;
1446 	thread_id threadID;
1447 	cpu_status state;
1448 	status_t status;
1449 	int32 cookie;
1450 
1451 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1452 
1453 	if (parentTeam == team_get_kernel_team())
1454 		return B_NOT_ALLOWED;
1455 
1456 	// create a new team
1457 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1458 
1459 	team = create_team_struct(parentTeam->name, false);
1460 	if (team == NULL)
1461 		return B_NO_MEMORY;
1462 
1463 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1464 
1465 	// Inherit the parent's user/group.
1466 	inherit_parent_user_and_group(team, parentTeam);
1467 
1468 	state = disable_interrupts();
1469 	GRAB_TEAM_LOCK();
1470 
1471 	hash_insert(sTeamHash, team);
1472 	insert_team_into_parent(parentTeam, team);
1473 	insert_team_into_group(parentTeam->group, team);
1474 	sUsedTeams++;
1475 
1476 	RELEASE_TEAM_LOCK();
1477 	restore_interrupts(state);
1478 
1479 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1480 	if (forkArgs == NULL) {
1481 		status = B_NO_MEMORY;
1482 		goto err1;
1483 	}
1484 
1485 	// create a new io_context for this team
1486 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1487 	if (!team->io_context) {
1488 		status = B_NO_MEMORY;
1489 		goto err2;
1490 	}
1491 
1492 	// duplicate the realtime sem context
1493 	if (parentTeam->realtime_sem_context) {
1494 		team->realtime_sem_context = clone_realtime_sem_context(
1495 			parentTeam->realtime_sem_context);
1496 		if (team->realtime_sem_context == NULL) {
1497 			status = B_NO_MEMORY;
1498 			goto err25;
1499 		}
1500 	}
1501 
1502 	// create an address space for this team
1503 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1504 		&team->address_space);
1505 	if (status < B_OK)
1506 		goto err3;
1507 
1508 	// copy all areas of the team
1509 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1510 	// ToDo: all stacks of other threads than the current one could be left out
1511 
1512 	cookie = 0;
1513 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1514 		void *address;
1515 		area_id area = vm_copy_area(team->address_space->id, info.name,
1516 			&address, B_CLONE_ADDRESS, info.protection, info.area);
1517 		if (area < B_OK) {
1518 			status = area;
1519 			break;
1520 		}
1521 
1522 		if (info.area == parentThread->user_stack_area)
1523 			forkArgs->user_stack_area = area;
1524 	}
1525 
1526 	if (status < B_OK)
1527 		goto err4;
1528 
1529 	forkArgs->user_stack_base = parentThread->user_stack_base;
1530 	forkArgs->user_stack_size = parentThread->user_stack_size;
1531 	forkArgs->user_local_storage = parentThread->user_local_storage;
1532 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1533 	arch_store_fork_frame(&forkArgs->arch_info);
1534 
1535 	// ToDo: copy image list
1536 
1537 	// create a kernel thread under the context of the new team
1538 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1539 		parentThread->name, parentThread->priority, forkArgs,
1540 		team->id, team->id);
1541 	if (threadID < 0) {
1542 		status = threadID;
1543 		goto err4;
1544 	}
1545 
1546 	// notify the debugger
1547 	user_debug_team_created(team->id);
1548 
1549 	T(TeamForked(threadID));
1550 
1551 	resume_thread(threadID);
1552 	return threadID;
1553 
1554 err4:
1555 	vm_delete_address_space(team->address_space);
1556 err3:
1557 	delete_realtime_sem_context(team->realtime_sem_context);
1558 err25:
1559 	vfs_free_io_context(team->io_context);
1560 err2:
1561 	free(forkArgs);
1562 err1:
1563 	// remove the team structure from the team hash table and delete the team structure
1564 	state = disable_interrupts();
1565 	GRAB_TEAM_LOCK();
1566 
1567 	remove_team_from_group(team);
1568 	remove_team_from_parent(parentTeam, team);
1569 	hash_remove(sTeamHash, team);
1570 
1571 	RELEASE_TEAM_LOCK();
1572 	restore_interrupts(state);
1573 
1574 	delete_team_struct(team);
1575 
1576 	return status;
1577 }
1578 
1579 
1580 /*!	Returns if the specified \a team has any children belonging to the
1581 	specified \a group.
1582 	Must be called with the team lock held.
1583 */
1584 static bool
1585 has_children_in_group(struct team *parent, pid_t groupID)
1586 {
1587 	struct team *team;
1588 
1589 	struct process_group *group = team_get_process_group_locked(
1590 		parent->group->session, groupID);
1591 	if (group == NULL)
1592 		return false;
1593 
1594 	for (team = group->teams; team; team = team->group_next) {
1595 		if (team->parent == parent)
1596 			return true;
1597 	}
1598 
1599 	return false;
1600 }
1601 
1602 
1603 static job_control_entry*
1604 get_job_control_entry(team_job_control_children* children, pid_t id)
1605 {
1606 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1607 		 job_control_entry* entry = it.Next();) {
1608 
1609 		if (id > 0) {
1610 			if (entry->thread == id)
1611 				return entry;
1612 		} else if (id == -1) {
1613 			return entry;
1614 		} else {
1615 			pid_t processGroup
1616 				= (entry->team ? entry->team->group_id : entry->group_id);
1617 			if (processGroup == -id)
1618 				return entry;
1619 		}
1620 	}
1621 
1622 	return NULL;
1623 }
1624 
1625 
1626 static job_control_entry*
1627 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1628 {
1629 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1630 
1631 	if (entry == NULL && (flags & WCONTINUED) != 0)
1632 		entry = get_job_control_entry(team->continued_children, id);
1633 
1634 	if (entry == NULL && (flags & WUNTRACED) != 0)
1635 		entry = get_job_control_entry(team->stopped_children, id);
1636 
1637 	return entry;
1638 }
1639 
1640 
1641 job_control_entry::job_control_entry()
1642 	:
1643 	has_group_ref(false)
1644 {
1645 }
1646 
1647 
1648 job_control_entry::~job_control_entry()
1649 {
1650 	if (has_group_ref) {
1651 		InterruptsSpinLocker locker(team_spinlock);
1652 		release_process_group_ref(group_id);
1653 	}
1654 }
1655 
1656 
1657 /*!	Team and thread lock must be held.
1658 */
1659 void
1660 job_control_entry::InitDeadState()
1661 {
1662 	if (team != NULL) {
1663 		struct thread* thread = team->main_thread;
1664 		group_id = team->group_id;
1665 		this->thread = thread->id;
1666 		status = thread->exit.status;
1667 		reason = thread->exit.reason;
1668 		signal = thread->exit.signal;
1669 		team = NULL;
1670 		acquire_process_group_ref(group_id);
1671 		has_group_ref = true;
1672 	}
1673 }
1674 
1675 
1676 job_control_entry&
1677 job_control_entry::operator=(const job_control_entry& other)
1678 {
1679 	state = other.state;
1680 	thread = other.thread;
1681 	has_group_ref = false;
1682 	team = other.team;
1683 	group_id = other.group_id;
1684 	status = other.status;
1685 	reason = other.reason;
1686 	signal = other.signal;
1687 
1688 	return *this;
1689 }
1690 
1691 
1692 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1693 	comes to the reason why a thread has died than waitpid() can be.
1694 */
1695 static thread_id
1696 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1697 	status_t *_returnCode)
1698 {
1699 	struct thread* thread = thread_get_current_thread();
1700 	struct team* team = thread->team;
1701 	struct job_control_entry foundEntry;
1702 	struct job_control_entry* freeDeathEntry = NULL;
1703 	status_t status = B_OK;
1704 
1705 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1706 
1707 	T(WaitForChild(child, flags));
1708 
1709 	if (child == 0) {
1710 		// wait for all children in the process group of the calling team
1711 		child = -team->group_id;
1712 	}
1713 
1714 	bool ignoreFoundEntries = false;
1715 	bool ignoreFoundEntriesChecked = false;
1716 
1717 	while (true) {
1718 		InterruptsSpinLocker locker(team_spinlock);
1719 
1720 		// check whether any condition holds
1721 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1722 
1723 		// If we don't have an entry yet, check whether there are any children
1724 		// complying to the process group specification at all.
1725 		if (entry == NULL) {
1726 			// No success yet -- check whether there are any children we could
1727 			// wait for.
1728 			bool childrenExist = false;
1729 			if (child == -1) {
1730 				childrenExist = team->children != NULL;
1731 			} else if (child < -1) {
1732 				childrenExist = has_children_in_group(team, -child);
1733 			} else {
1734 				if (struct team* childTeam = team_get_team_struct_locked(child))
1735 					childrenExist = childTeam->parent == team;
1736 			}
1737 
1738 			if (!childrenExist) {
1739 				// there is no child we could wait for
1740 				status = ECHILD;
1741 			} else {
1742 				// the children we're waiting for are still running
1743 				status = B_WOULD_BLOCK;
1744 			}
1745 		} else {
1746 			// got something
1747 			foundEntry = *entry;
1748 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1749 				// The child is dead. Reap its death entry.
1750 				freeDeathEntry = entry;
1751 				team->dead_children->entries.Remove(entry);
1752 				team->dead_children->count--;
1753 			} else {
1754 				// The child is well. Reset its job control state.
1755 				team_set_job_control_state(entry->team,
1756 					JOB_CONTROL_STATE_NONE, 0, false);
1757 			}
1758 		}
1759 
1760 		// If we haven't got anything yet, prepare for waiting for the
1761 		// condition variable.
1762 		ConditionVariableEntry deadWaitEntry;
1763 
1764 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1765 			deadWaitEntry.Add(team->dead_children, B_CAN_INTERRUPT);
1766 
1767 		locker.Unlock();
1768 
1769 		// we got our entry and can return to our caller
1770 		if (status == B_OK) {
1771 			if (ignoreFoundEntries) {
1772 				// ... unless we shall ignore found entries
1773 				delete freeDeathEntry;
1774 				freeDeathEntry = NULL;
1775 				continue;
1776 			}
1777 
1778 			break;
1779 		}
1780 
1781 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1782 			T(WaitForChildDone(status));
1783 			return status;
1784 		}
1785 
1786 		status = deadWaitEntry.Wait();
1787 		if (status == B_INTERRUPTED) {
1788 			T(WaitForChildDone(status));
1789 			return status;
1790 		}
1791 
1792 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1793 		// all our children are dead and fail with ECHILD. We check the
1794 		// condition at this point.
1795 		if (!ignoreFoundEntriesChecked) {
1796 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1797 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1798 				|| handler.sa_handler == SIG_IGN) {
1799 				ignoreFoundEntries = true;
1800 			}
1801 
1802 			ignoreFoundEntriesChecked = true;
1803 		}
1804 	}
1805 
1806 	delete freeDeathEntry;
1807 
1808 	// when we got here, we have a valid death entry, and
1809 	// already got unregistered from the team or group
1810 	int reason = 0;
1811 	switch (foundEntry.state) {
1812 		case JOB_CONTROL_STATE_DEAD:
1813 			reason = foundEntry.reason;
1814 			break;
1815 		case JOB_CONTROL_STATE_STOPPED:
1816 			reason = THREAD_STOPPED;
1817 			break;
1818 		case JOB_CONTROL_STATE_CONTINUED:
1819 			reason = THREAD_CONTINUED;
1820 			break;
1821 		case JOB_CONTROL_STATE_NONE:
1822 			// can't happen
1823 			break;
1824 	}
1825 
1826 	*_returnCode = foundEntry.status;
1827 	*_reason = (foundEntry.signal << 16) | reason;
1828 
1829 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1830 	// status is available.
1831 	if (is_signal_blocked(SIGCHLD)) {
1832 		InterruptsSpinLocker locker(team_spinlock);
1833 
1834 		if (get_job_control_entry(team, child, flags) == NULL)
1835 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1836 	}
1837 
1838 	T(WaitForChildDone(foundEntry));
1839 
1840 	return foundEntry.thread;
1841 }
1842 
1843 
1844 /*! Fills the team_info structure with information from the specified
1845 	team.
1846 	The team lock must be held when called.
1847 */
1848 static status_t
1849 fill_team_info(struct team *team, team_info *info, size_t size)
1850 {
1851 	if (size != sizeof(team_info))
1852 		return B_BAD_VALUE;
1853 
1854 	// ToDo: Set more informations for team_info
1855 	memset(info, 0, size);
1856 
1857 	info->team = team->id;
1858 	info->thread_count = team->num_threads;
1859 	info->image_count = count_images(team);
1860 	//info->area_count =
1861 	info->debugger_nub_thread = team->debug_info.nub_thread;
1862 	info->debugger_nub_port = team->debug_info.nub_port;
1863 	//info->uid =
1864 	//info->gid =
1865 
1866 	strlcpy(info->args, team->args, sizeof(info->args));
1867 	info->argc = 1;
1868 
1869 	return B_OK;
1870 }
1871 
1872 
1873 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1874 	Interrupts must be disabled and team lock be held.
1875 */
1876 static bool
1877 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1878 {
1879 	// Orphaned Process Group: "A process group in which the parent of every
1880 	// member is either itself a member of the group or is not a member of the
1881 	// group's session." (Open Group Base Specs Issue 6)
1882 
1883 	// once orphaned, things won't change (exception: cf. setpgid())
1884 	if (group->orphaned)
1885 		return true;
1886 
1887 	struct team* team = group->teams;
1888 	while (team != NULL) {
1889 		struct team* parent = team->parent;
1890 		if (team->id != dyingProcess && parent != NULL
1891 			&& parent->id != dyingProcess
1892 			&& parent->group_id != group->id
1893 			&& parent->session_id == group->session->id) {
1894 			return false;
1895 		}
1896 
1897 		team = team->group_next;
1898 	}
1899 
1900 	group->orphaned = true;
1901 	return true;
1902 }
1903 
1904 
1905 /*!	Returns whether the process group contains stopped processes.
1906 	Interrupts must be disabled and team lock be held.
1907 */
1908 static bool
1909 process_group_has_stopped_processes(process_group* group)
1910 {
1911 	SpinLocker _(thread_spinlock);
1912 
1913 	struct team* team = group->teams;
1914 	while (team != NULL) {
1915 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1916 			return true;
1917 
1918 		team = team->group_next;
1919 	}
1920 
1921 	return false;
1922 }
1923 
1924 
1925 //	#pragma mark - Private kernel API
1926 
1927 
1928 status_t
1929 team_init(kernel_args *args)
1930 {
1931 	struct process_session *session;
1932 	struct process_group *group;
1933 
1934 	// create the team hash table
1935 	sTeamHash = hash_init(16, offsetof(struct team, next),
1936 		&team_struct_compare, &team_struct_hash);
1937 
1938 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1939 		&process_group_compare, &process_group_hash);
1940 
1941 	// create initial session and process groups
1942 
1943 	session = create_process_session(1);
1944 	if (session == NULL)
1945 		panic("Could not create initial session.\n");
1946 
1947 	group = create_process_group(1);
1948 	if (group == NULL)
1949 		panic("Could not create initial process group.\n");
1950 
1951 	insert_group_into_session(session, group);
1952 
1953 	// create the kernel team
1954 	sKernelTeam = create_team_struct("kernel_team", true);
1955 	if (sKernelTeam == NULL)
1956 		panic("could not create kernel team!\n");
1957 	strcpy(sKernelTeam->args, sKernelTeam->name);
1958 	sKernelTeam->state = TEAM_STATE_NORMAL;
1959 
1960 	sKernelTeam->saved_set_uid = 0;
1961 	sKernelTeam->real_uid = 0;
1962 	sKernelTeam->effective_uid = 0;
1963 	sKernelTeam->saved_set_gid = 0;
1964 	sKernelTeam->real_gid = 0;
1965 	sKernelTeam->effective_gid = 0;
1966 	sKernelTeam->supplementary_groups = NULL;
1967 	sKernelTeam->supplementary_group_count = 0;
1968 
1969 	insert_team_into_group(group, sKernelTeam);
1970 
1971 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1972 	if (sKernelTeam->io_context == NULL)
1973 		panic("could not create io_context for kernel team!\n");
1974 
1975 	// stick it in the team hash
1976 	hash_insert(sTeamHash, sKernelTeam);
1977 
1978 	add_debugger_command_etc("team", &dump_team_info,
1979 		"Dump info about a particular team",
1980 		"[ <id> | <address> | <name> ]\n"
1981 		"Prints information about the specified team. If no argument is given\n"
1982 		"the current team is selected.\n"
1983 		"  <id>       - The ID of the team.\n"
1984 		"  <address>  - The address of the team structure.\n"
1985 		"  <name>     - The team's name.\n", 0);
1986 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1987 		"\n"
1988 		"Prints a list of all existing teams.\n", 0);
1989 	return 0;
1990 }
1991 
1992 
1993 int32
1994 team_max_teams(void)
1995 {
1996 	return sMaxTeams;
1997 }
1998 
1999 
2000 int32
2001 team_used_teams(void)
2002 {
2003 	return sUsedTeams;
2004 }
2005 
2006 
2007 /*! Fills the provided death entry if it's in the team.
2008 	You need to have the team lock held when calling this function.
2009 */
2010 job_control_entry*
2011 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2012 {
2013 	if (child <= 0)
2014 		return NULL;
2015 
2016 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2017 		child);
2018 	if (entry) {
2019 		// remove the entry only, if the caller is the parent of the found team
2020 		if (team_get_current_team_id() == entry->thread) {
2021 			team->dead_children->entries.Remove(entry);
2022 			team->dead_children->count--;
2023 			*_deleteEntry = true;
2024 		} else {
2025 			*_deleteEntry = false;
2026 		}
2027 	}
2028 
2029 	return entry;
2030 }
2031 
2032 
2033 /*! Quick check to see if we have a valid team ID. */
2034 bool
2035 team_is_valid(team_id id)
2036 {
2037 	struct team *team;
2038 	cpu_status state;
2039 
2040 	if (id <= 0)
2041 		return false;
2042 
2043 	state = disable_interrupts();
2044 	GRAB_TEAM_LOCK();
2045 
2046 	team = team_get_team_struct_locked(id);
2047 
2048 	RELEASE_TEAM_LOCK();
2049 	restore_interrupts(state);
2050 
2051 	return team != NULL;
2052 }
2053 
2054 
2055 struct team *
2056 team_get_team_struct_locked(team_id id)
2057 {
2058 	struct team_key key;
2059 	key.id = id;
2060 
2061 	return (struct team*)hash_lookup(sTeamHash, &key);
2062 }
2063 
2064 
2065 /*! This searches the session of the team for the specified group ID.
2066 	You must hold the team lock when you call this function.
2067 */
2068 struct process_group *
2069 team_get_process_group_locked(struct process_session *session, pid_t id)
2070 {
2071 	struct process_group *group;
2072 	struct team_key key;
2073 	key.id = id;
2074 
2075 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2076 	if (group != NULL && (session == NULL || session == group->session))
2077 		return group;
2078 
2079 	return NULL;
2080 }
2081 
2082 
2083 void
2084 team_delete_process_group(struct process_group *group)
2085 {
2086 	if (group == NULL)
2087 		return;
2088 
2089 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2090 
2091 	// remove_group_from_session() keeps this pointer around
2092 	// only if the session can be freed as well
2093 	if (group->session) {
2094 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2095 		free(group->session);
2096 	}
2097 
2098 	free(group);
2099 }
2100 
2101 
2102 void
2103 team_set_controlling_tty(int32 ttyIndex)
2104 {
2105 	struct team* team = thread_get_current_thread()->team;
2106 
2107 	InterruptsSpinLocker _(team_spinlock);
2108 
2109 	team->group->session->controlling_tty = ttyIndex;
2110 	team->group->session->foreground_group = -1;
2111 }
2112 
2113 
2114 int32
2115 team_get_controlling_tty()
2116 {
2117 	struct team* team = thread_get_current_thread()->team;
2118 
2119 	InterruptsSpinLocker _(team_spinlock);
2120 
2121 	return team->group->session->controlling_tty;
2122 }
2123 
2124 
2125 status_t
2126 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2127 {
2128 	struct thread* thread = thread_get_current_thread();
2129 	struct team* team = thread->team;
2130 
2131 	InterruptsSpinLocker locker(team_spinlock);
2132 
2133 	process_session* session = team->group->session;
2134 
2135 	// must be the controlling tty of the calling process
2136 	if (session->controlling_tty != ttyIndex)
2137 		return ENOTTY;
2138 
2139 	// check process group -- must belong to our session
2140 	process_group* group = team_get_process_group_locked(session,
2141 		processGroupID);
2142 	if (group == NULL)
2143 		return B_BAD_VALUE;
2144 
2145 	// If we are a background group, we can't do that unharmed, only if we
2146 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2147 	if (session->foreground_group != -1
2148 		&& session->foreground_group != team->group_id
2149 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2150 		&& !is_signal_blocked(SIGTTOU)) {
2151 		pid_t groupID = team->group->id;
2152 		locker.Unlock();
2153 		send_signal(-groupID, SIGTTOU);
2154 		return B_INTERRUPTED;
2155 	}
2156 
2157 	team->group->session->foreground_group = processGroupID;
2158 
2159 	return B_OK;
2160 }
2161 
2162 
2163 /*!	Removes the specified team from the global team hash, and from its parent.
2164 	It also moves all of its children up to the parent.
2165 	You must hold the team lock when you call this function.
2166 */
2167 void
2168 team_remove_team(struct team *team)
2169 {
2170 	struct team *parent = team->parent;
2171 
2172 	// remember how long this team lasted
2173 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2174 		+ team->dead_children->kernel_time;
2175 	parent->dead_children->user_time += team->dead_threads_user_time
2176 		+ team->dead_children->user_time;
2177 
2178 	// Also grab the thread spinlock while removing the team from the hash.
2179 	// This makes the following sequence safe: grab teams lock, lookup team,
2180 	// grab threads lock, unlock teams lock,
2181 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2182 	// lock another team's IO context.
2183 	GRAB_THREAD_LOCK();
2184 	hash_remove(sTeamHash, team);
2185 	RELEASE_THREAD_LOCK();
2186 	sUsedTeams--;
2187 
2188 	team->state = TEAM_STATE_DEATH;
2189 
2190 	// If we're a controlling process (i.e. a session leader with controlling
2191 	// terminal), there's a bit of signalling we have to do.
2192 	if (team->session_id == team->id
2193 		&& team->group->session->controlling_tty >= 0) {
2194 		process_session* session = team->group->session;
2195 
2196 		session->controlling_tty = -1;
2197 
2198 		// send SIGHUP to the foreground
2199 		if (session->foreground_group >= 0) {
2200 			send_signal_etc(-session->foreground_group, SIGHUP,
2201 				SIGNAL_FLAG_TEAMS_LOCKED);
2202 		}
2203 
2204 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2205 		// stopped processes
2206 		struct team* child = team->children;
2207 		while (child != NULL) {
2208 			process_group* childGroup = child->group;
2209 			if (!childGroup->orphaned
2210 				&& update_orphaned_process_group(childGroup, team->id)
2211 				&& process_group_has_stopped_processes(childGroup)) {
2212 				send_signal_etc(-childGroup->id, SIGHUP,
2213 					SIGNAL_FLAG_TEAMS_LOCKED);
2214 				send_signal_etc(-childGroup->id, SIGCONT,
2215 					SIGNAL_FLAG_TEAMS_LOCKED);
2216 			}
2217 
2218 			child = child->siblings_next;
2219 		}
2220 	} else {
2221 		// update "orphaned" flags of all children's process groups
2222 		struct team* child = team->children;
2223 		while (child != NULL) {
2224 			process_group* childGroup = child->group;
2225 			if (!childGroup->orphaned)
2226 				update_orphaned_process_group(childGroup, team->id);
2227 
2228 			child = child->siblings_next;
2229 		}
2230 
2231 		// update "orphaned" flag of this team's process group
2232 		update_orphaned_process_group(team->group, team->id);
2233 	}
2234 
2235 	// reparent each of the team's children
2236 	reparent_children(team);
2237 
2238 	// remove us from our process group
2239 	remove_team_from_group(team);
2240 
2241 	// remove us from our parent
2242 	remove_team_from_parent(parent, team);
2243 }
2244 
2245 
2246 void
2247 team_delete_team(struct team *team)
2248 {
2249 	team_id teamID = team->id;
2250 	port_id debuggerPort = -1;
2251 	cpu_status state;
2252 
2253 	if (team->num_threads > 0) {
2254 		// there are other threads still in this team,
2255 		// cycle through and signal kill on each of the threads
2256 		// ToDo: this can be optimized. There's got to be a better solution.
2257 		struct thread *temp_thread;
2258 		char death_sem_name[B_OS_NAME_LENGTH];
2259 		sem_id deathSem;
2260 		int32 threadCount;
2261 
2262 		sprintf(death_sem_name, "team %ld death sem", teamID);
2263 		deathSem = create_sem(0, death_sem_name);
2264 		if (deathSem < 0)
2265 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2266 
2267 		state = disable_interrupts();
2268 		GRAB_TEAM_LOCK();
2269 
2270 		team->death_sem = deathSem;
2271 		threadCount = team->num_threads;
2272 
2273 		// If the team was being debugged, that will stop with the termination
2274 		// of the nub thread. The team structure has already been removed from
2275 		// the team hash table at this point, so noone can install a debugger
2276 		// anymore. We fetch the debugger's port to send it a message at the
2277 		// bitter end.
2278 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2279 
2280 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2281 			debuggerPort = team->debug_info.debugger_port;
2282 
2283 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2284 
2285 		// we can safely walk the list because of the lock. no new threads can be created
2286 		// because of the TEAM_STATE_DEATH flag on the team
2287 		temp_thread = team->thread_list;
2288 		while (temp_thread) {
2289 			struct thread *next = temp_thread->team_next;
2290 
2291 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2292 			temp_thread = next;
2293 		}
2294 
2295 		RELEASE_TEAM_LOCK();
2296 		restore_interrupts(state);
2297 
2298 		// wait until all threads in team are dead.
2299 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2300 		delete_sem(team->death_sem);
2301 	}
2302 
2303 	// If someone is waiting for this team to be loaded, but it dies
2304 	// unexpectedly before being done, we need to notify the waiting
2305 	// thread now.
2306 
2307 	state = disable_interrupts();
2308 	GRAB_TEAM_LOCK();
2309 
2310 	if (team->loading_info) {
2311 		// there's indeed someone waiting
2312 		struct team_loading_info *loadingInfo = team->loading_info;
2313 		team->loading_info = NULL;
2314 
2315 		loadingInfo->result = B_ERROR;
2316 		loadingInfo->done = true;
2317 
2318 		GRAB_THREAD_LOCK();
2319 
2320 		// wake up the waiting thread
2321 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2322 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2323 
2324 		RELEASE_THREAD_LOCK();
2325 	}
2326 
2327 	RELEASE_TEAM_LOCK();
2328 	restore_interrupts(state);
2329 
2330 	// notify team watchers
2331 
2332 	{
2333 		// we're not reachable from anyone anymore at this point, so we
2334 		// can safely access the list without any locking
2335 		struct team_watcher *watcher;
2336 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2337 				&team->watcher_list)) != NULL) {
2338 			watcher->hook(teamID, watcher->data);
2339 			free(watcher);
2340 		}
2341 	}
2342 
2343 	// free team resources
2344 
2345 	vfs_free_io_context(team->io_context);
2346 	delete_realtime_sem_context(team->realtime_sem_context);
2347 	delete_owned_ports(teamID);
2348 	sem_delete_owned_sems(teamID);
2349 	remove_images(team);
2350 	vm_delete_address_space(team->address_space);
2351 
2352 	delete_team_struct(team);
2353 
2354 	// notify the debugger, that the team is gone
2355 	user_debug_team_deleted(teamID, debuggerPort);
2356 }
2357 
2358 
2359 struct team *
2360 team_get_kernel_team(void)
2361 {
2362 	return sKernelTeam;
2363 }
2364 
2365 
2366 team_id
2367 team_get_kernel_team_id(void)
2368 {
2369 	if (!sKernelTeam)
2370 		return 0;
2371 
2372 	return sKernelTeam->id;
2373 }
2374 
2375 
2376 team_id
2377 team_get_current_team_id(void)
2378 {
2379 	return thread_get_current_thread()->team->id;
2380 }
2381 
2382 
2383 status_t
2384 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2385 {
2386 	cpu_status state;
2387 	struct team *team;
2388 	status_t status;
2389 
2390 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2391 	if (id == 1) {
2392 		// we're the kernel team, so we don't have to go through all
2393 		// the hassle (locking and hash lookup)
2394 		*_addressSpace = vm_get_kernel_address_space();
2395 		return B_OK;
2396 	}
2397 
2398 	state = disable_interrupts();
2399 	GRAB_TEAM_LOCK();
2400 
2401 	team = team_get_team_struct_locked(id);
2402 	if (team != NULL) {
2403 		atomic_add(&team->address_space->ref_count, 1);
2404 		*_addressSpace = team->address_space;
2405 		status = B_OK;
2406 	} else
2407 		status = B_BAD_VALUE;
2408 
2409 	RELEASE_TEAM_LOCK();
2410 	restore_interrupts(state);
2411 
2412 	return status;
2413 }
2414 
2415 
2416 /*!	Sets the team's job control state.
2417 	Interrupts must be disabled and the team lock be held.
2418 	\a threadsLocked indicates whether the thread lock is being held, too.
2419 */
2420 void
2421 team_set_job_control_state(struct team* team, job_control_state newState,
2422 	int signal, bool threadsLocked)
2423 {
2424 	if (team == NULL || team->job_control_entry == NULL)
2425 		return;
2426 
2427 	// don't touch anything, if the state stays the same or the team is already
2428 	// dead
2429 	job_control_entry* entry = team->job_control_entry;
2430 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2431 		return;
2432 
2433 	T(SetJobControlState(team->id, newState, signal));
2434 
2435 	// remove from the old list
2436 	switch (entry->state) {
2437 		case JOB_CONTROL_STATE_NONE:
2438 			// entry is in no list ATM
2439 			break;
2440 		case JOB_CONTROL_STATE_DEAD:
2441 			// can't get here
2442 			break;
2443 		case JOB_CONTROL_STATE_STOPPED:
2444 			team->parent->stopped_children->entries.Remove(entry);
2445 			break;
2446 		case JOB_CONTROL_STATE_CONTINUED:
2447 			team->parent->continued_children->entries.Remove(entry);
2448 			break;
2449 	}
2450 
2451 	entry->state = newState;
2452 	entry->signal = signal;
2453 
2454 	// add to new list
2455 	team_job_control_children* childList = NULL;
2456 	switch (entry->state) {
2457 		case JOB_CONTROL_STATE_NONE:
2458 			// entry doesn't get into any list
2459 			break;
2460 		case JOB_CONTROL_STATE_DEAD:
2461 			childList = team->parent->dead_children;
2462 			team->parent->dead_children->count++;
2463 			break;
2464 		case JOB_CONTROL_STATE_STOPPED:
2465 			childList = team->parent->stopped_children;
2466 			break;
2467 		case JOB_CONTROL_STATE_CONTINUED:
2468 			childList = team->parent->continued_children;
2469 			break;
2470 	}
2471 
2472 	if (childList != NULL) {
2473 		childList->entries.Add(entry);
2474 		team->parent->dead_children->condition_variable.NotifyAll(
2475 			threadsLocked);
2476 	}
2477 }
2478 
2479 
2480 /*! Adds a hook to the team that is called as soon as this
2481 	team goes away.
2482 	This call might get public in the future.
2483 */
2484 status_t
2485 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2486 {
2487 	struct team_watcher *watcher;
2488 	struct team *team;
2489 	cpu_status state;
2490 
2491 	if (hook == NULL || teamID < B_OK)
2492 		return B_BAD_VALUE;
2493 
2494 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2495 	if (watcher == NULL)
2496 		return B_NO_MEMORY;
2497 
2498 	watcher->hook = hook;
2499 	watcher->data = data;
2500 
2501 	// find team and add watcher
2502 
2503 	state = disable_interrupts();
2504 	GRAB_TEAM_LOCK();
2505 
2506 	team = team_get_team_struct_locked(teamID);
2507 	if (team != NULL)
2508 		list_add_item(&team->watcher_list, watcher);
2509 
2510 	RELEASE_TEAM_LOCK();
2511 	restore_interrupts(state);
2512 
2513 	if (team == NULL) {
2514 		free(watcher);
2515 		return B_BAD_TEAM_ID;
2516 	}
2517 
2518 	return B_OK;
2519 }
2520 
2521 
2522 status_t
2523 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2524 {
2525 	struct team_watcher *watcher = NULL;
2526 	struct team *team;
2527 	cpu_status state;
2528 
2529 	if (hook == NULL || teamID < B_OK)
2530 		return B_BAD_VALUE;
2531 
2532 	// find team and remove watcher (if present)
2533 
2534 	state = disable_interrupts();
2535 	GRAB_TEAM_LOCK();
2536 
2537 	team = team_get_team_struct_locked(teamID);
2538 	if (team != NULL) {
2539 		// search for watcher
2540 		while ((watcher = (struct team_watcher*)list_get_next_item(
2541 				&team->watcher_list, watcher)) != NULL) {
2542 			if (watcher->hook == hook && watcher->data == data) {
2543 				// got it!
2544 				list_remove_item(&team->watcher_list, watcher);
2545 				break;
2546 			}
2547 		}
2548 	}
2549 
2550 	RELEASE_TEAM_LOCK();
2551 	restore_interrupts(state);
2552 
2553 	if (watcher == NULL)
2554 		return B_ENTRY_NOT_FOUND;
2555 
2556 	free(watcher);
2557 	return B_OK;
2558 }
2559 
2560 
2561 //	#pragma mark - Public kernel API
2562 
2563 
2564 thread_id
2565 load_image(int32 argCount, const char **args, const char **env)
2566 {
2567 	int32 envCount = 0;
2568 
2569 	// count env variables
2570 	while (env && env[envCount] != NULL)
2571 		envCount++;
2572 
2573 	return load_image_etc(argCount, (char * const *)args, envCount,
2574 		(char * const *)env, B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED,
2575 		-1, 0, true);
2576 }
2577 
2578 
2579 status_t
2580 wait_for_team(team_id id, status_t *_returnCode)
2581 {
2582 	struct team *team;
2583 	thread_id thread;
2584 	cpu_status state;
2585 
2586 	// find main thread and wait for that
2587 
2588 	state = disable_interrupts();
2589 	GRAB_TEAM_LOCK();
2590 
2591 	team = team_get_team_struct_locked(id);
2592 	if (team != NULL && team->main_thread != NULL)
2593 		thread = team->main_thread->id;
2594 	else
2595 		thread = B_BAD_THREAD_ID;
2596 
2597 	RELEASE_TEAM_LOCK();
2598 	restore_interrupts(state);
2599 
2600 	if (thread < 0)
2601 		return thread;
2602 
2603 	return wait_for_thread(thread, _returnCode);
2604 }
2605 
2606 
2607 status_t
2608 kill_team(team_id id)
2609 {
2610 	status_t status = B_OK;
2611 	thread_id threadID = -1;
2612 	struct team *team;
2613 	cpu_status state;
2614 
2615 	state = disable_interrupts();
2616 	GRAB_TEAM_LOCK();
2617 
2618 	team = team_get_team_struct_locked(id);
2619 	if (team != NULL) {
2620 		if (team != sKernelTeam) {
2621 			threadID = team->id;
2622 				// the team ID is the same as the ID of its main thread
2623 		} else
2624 			status = B_NOT_ALLOWED;
2625 	} else
2626 		status = B_BAD_THREAD_ID;
2627 
2628 	RELEASE_TEAM_LOCK();
2629 	restore_interrupts(state);
2630 
2631 	if (status < B_OK)
2632 		return status;
2633 
2634 	// just kill the main thread in the team. The cleanup code there will
2635 	// take care of the team
2636 	return kill_thread(threadID);
2637 }
2638 
2639 
2640 status_t
2641 _get_team_info(team_id id, team_info *info, size_t size)
2642 {
2643 	cpu_status state;
2644 	status_t status = B_OK;
2645 	struct team *team;
2646 
2647 	state = disable_interrupts();
2648 	GRAB_TEAM_LOCK();
2649 
2650 	if (id == B_CURRENT_TEAM)
2651 		team = thread_get_current_thread()->team;
2652 	else
2653 		team = team_get_team_struct_locked(id);
2654 
2655 	if (team == NULL) {
2656 		status = B_BAD_TEAM_ID;
2657 		goto err;
2658 	}
2659 
2660 	status = fill_team_info(team, info, size);
2661 
2662 err:
2663 	RELEASE_TEAM_LOCK();
2664 	restore_interrupts(state);
2665 
2666 	return status;
2667 }
2668 
2669 
2670 status_t
2671 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2672 {
2673 	status_t status = B_BAD_TEAM_ID;
2674 	struct team *team = NULL;
2675 	int32 slot = *cookie;
2676 	team_id lastTeamID;
2677 	cpu_status state;
2678 
2679 	if (slot < 1)
2680 		slot = 1;
2681 
2682 	state = disable_interrupts();
2683 	GRAB_TEAM_LOCK();
2684 
2685 	lastTeamID = peek_next_thread_id();
2686 	if (slot >= lastTeamID)
2687 		goto err;
2688 
2689 	// get next valid team
2690 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2691 		slot++;
2692 
2693 	if (team) {
2694 		status = fill_team_info(team, info, size);
2695 		*cookie = ++slot;
2696 	}
2697 
2698 err:
2699 	RELEASE_TEAM_LOCK();
2700 	restore_interrupts(state);
2701 
2702 	return status;
2703 }
2704 
2705 
2706 status_t
2707 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2708 {
2709 	bigtime_t kernelTime = 0, userTime = 0;
2710 	status_t status = B_OK;
2711 	struct team *team;
2712 	cpu_status state;
2713 
2714 	if (size != sizeof(team_usage_info)
2715 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2716 		return B_BAD_VALUE;
2717 
2718 	state = disable_interrupts();
2719 	GRAB_TEAM_LOCK();
2720 
2721 	if (id == B_CURRENT_TEAM)
2722 		team = thread_get_current_thread()->team;
2723 	else
2724 		team = team_get_team_struct_locked(id);
2725 
2726 	if (team == NULL) {
2727 		status = B_BAD_TEAM_ID;
2728 		goto out;
2729 	}
2730 
2731 	switch (who) {
2732 		case B_TEAM_USAGE_SELF:
2733 		{
2734 			struct thread *thread = team->thread_list;
2735 
2736 			for (; thread != NULL; thread = thread->team_next) {
2737 				kernelTime += thread->kernel_time;
2738 				userTime += thread->user_time;
2739 			}
2740 
2741 			kernelTime += team->dead_threads_kernel_time;
2742 			userTime += team->dead_threads_user_time;
2743 			break;
2744 		}
2745 
2746 		case B_TEAM_USAGE_CHILDREN:
2747 		{
2748 			struct team *child = team->children;
2749 			for (; child != NULL; child = child->siblings_next) {
2750 				struct thread *thread = team->thread_list;
2751 
2752 				for (; thread != NULL; thread = thread->team_next) {
2753 					kernelTime += thread->kernel_time;
2754 					userTime += thread->user_time;
2755 				}
2756 
2757 				kernelTime += child->dead_threads_kernel_time;
2758 				userTime += child->dead_threads_user_time;
2759 			}
2760 
2761 			kernelTime += team->dead_children->kernel_time;
2762 			userTime += team->dead_children->user_time;
2763 			break;
2764 		}
2765 	}
2766 
2767 out:
2768 	RELEASE_TEAM_LOCK();
2769 	restore_interrupts(state);
2770 
2771 	if (status == B_OK) {
2772 		info->kernel_time = kernelTime;
2773 		info->user_time = userTime;
2774 	}
2775 
2776 	return status;
2777 }
2778 
2779 
2780 pid_t
2781 getpid(void)
2782 {
2783 	return thread_get_current_thread()->team->id;
2784 }
2785 
2786 
2787 pid_t
2788 getppid(void)
2789 {
2790 	struct team *team = thread_get_current_thread()->team;
2791 	cpu_status state;
2792 	pid_t parent;
2793 
2794 	state = disable_interrupts();
2795 	GRAB_TEAM_LOCK();
2796 
2797 	parent = team->parent->id;
2798 
2799 	RELEASE_TEAM_LOCK();
2800 	restore_interrupts(state);
2801 
2802 	return parent;
2803 }
2804 
2805 
2806 pid_t
2807 getpgid(pid_t process)
2808 {
2809 	struct thread *thread;
2810 	pid_t result = -1;
2811 	cpu_status state;
2812 
2813 	if (process == 0)
2814 		process = thread_get_current_thread()->team->id;
2815 
2816 	state = disable_interrupts();
2817 	GRAB_THREAD_LOCK();
2818 
2819 	thread = thread_get_thread_struct_locked(process);
2820 	if (thread != NULL)
2821 		result = thread->team->group_id;
2822 
2823 	RELEASE_THREAD_LOCK();
2824 	restore_interrupts(state);
2825 
2826 	return thread != NULL ? result : B_BAD_VALUE;
2827 }
2828 
2829 
2830 pid_t
2831 getsid(pid_t process)
2832 {
2833 	struct thread *thread;
2834 	pid_t result = -1;
2835 	cpu_status state;
2836 
2837 	if (process == 0)
2838 		process = thread_get_current_thread()->team->id;
2839 
2840 	state = disable_interrupts();
2841 	GRAB_THREAD_LOCK();
2842 
2843 	thread = thread_get_thread_struct_locked(process);
2844 	if (thread != NULL)
2845 		result = thread->team->session_id;
2846 
2847 	RELEASE_THREAD_LOCK();
2848 	restore_interrupts(state);
2849 
2850 	return thread != NULL ? result : B_BAD_VALUE;
2851 }
2852 
2853 
2854 //	#pragma mark - User syscalls
2855 
2856 
2857 status_t
2858 _user_exec(const char *userPath, int32 argCount, char * const *userArgs,
2859 	int32 envCount, char * const *userEnvironment)
2860 {
2861 	char path[B_PATH_NAME_LENGTH];
2862 
2863 	if (argCount < 1)
2864 		return B_BAD_VALUE;
2865 
2866 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userArgs)
2867 		|| !IS_USER_ADDRESS(userEnvironment)
2868 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2869 		return B_BAD_ADDRESS;
2870 
2871 	return exec_team(path, argCount, userArgs, envCount, userEnvironment);
2872 		// this one only returns in case of error
2873 }
2874 
2875 
2876 thread_id
2877 _user_fork(void)
2878 {
2879 	return fork_team();
2880 }
2881 
2882 
2883 thread_id
2884 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
2885 {
2886 	status_t returnCode;
2887 	int32 reason;
2888 	thread_id deadChild;
2889 
2890 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
2891 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
2892 		return B_BAD_ADDRESS;
2893 
2894 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
2895 
2896 	if (deadChild >= B_OK) {
2897 		// copy result data on successful completion
2898 		if ((_userReason != NULL
2899 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
2900 			|| (_userReturnCode != NULL
2901 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
2902 					< B_OK)) {
2903 			return B_BAD_ADDRESS;
2904 		}
2905 
2906 		return deadChild;
2907 	}
2908 
2909 	return syscall_restart_handle_post(deadChild);
2910 }
2911 
2912 
2913 pid_t
2914 _user_process_info(pid_t process, int32 which)
2915 {
2916 	// we only allow to return the parent of the current process
2917 	if (which == PARENT_ID
2918 		&& process != 0 && process != thread_get_current_thread()->team->id)
2919 		return B_BAD_VALUE;
2920 
2921 	switch (which) {
2922 		case SESSION_ID:
2923 			return getsid(process);
2924 		case GROUP_ID:
2925 			return getpgid(process);
2926 		case PARENT_ID:
2927 			return getppid();
2928 	}
2929 
2930 	return B_BAD_VALUE;
2931 }
2932 
2933 
2934 pid_t
2935 _user_setpgid(pid_t processID, pid_t groupID)
2936 {
2937 	struct thread *thread = thread_get_current_thread();
2938 	struct team *currentTeam = thread->team;
2939 	struct team *team;
2940 
2941 	if (groupID < 0)
2942 		return B_BAD_VALUE;
2943 
2944 	if (processID == 0)
2945 		processID = currentTeam->id;
2946 
2947 	// if the group ID is not specified, use the target process' ID
2948 	if (groupID == 0)
2949 		groupID = processID;
2950 
2951 	if (processID == currentTeam->id) {
2952 		// we set our own group
2953 
2954 		// we must not change our process group ID if we're a session leader
2955 		if (is_session_leader(currentTeam))
2956 			return B_NOT_ALLOWED;
2957 	} else {
2958 		// another team is the target of the call -- check it out
2959 		InterruptsSpinLocker _(team_spinlock);
2960 
2961 		team = team_get_team_struct_locked(processID);
2962 		if (team == NULL)
2963 			return ESRCH;
2964 
2965 		// The team must be a child of the calling team and in the same session.
2966 		// (If that's the case it isn't a session leader either.)
2967 		if (team->parent != currentTeam
2968 			|| team->session_id != currentTeam->session_id) {
2969 			return B_NOT_ALLOWED;
2970 		}
2971 
2972 		if (team->group_id == groupID)
2973 			return groupID;
2974 
2975 		// The call is also supposed to fail on a child, when the child already
2976 		// has executed exec*() [EACCES].
2977 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
2978 			return EACCES;
2979 	}
2980 
2981 	struct process_group *group = NULL;
2982 	if (groupID == processID) {
2983 		// A new process group might be needed.
2984 		group = create_process_group(groupID);
2985 		if (group == NULL)
2986 			return B_NO_MEMORY;
2987 
2988 		// Assume orphaned. We consider the situation of the team's parent
2989 		// below.
2990 		group->orphaned = true;
2991 	}
2992 
2993 	status_t status = B_OK;
2994 	struct process_group *freeGroup = NULL;
2995 
2996 	InterruptsSpinLocker locker(team_spinlock);
2997 
2998 	team = team_get_team_struct_locked(processID);
2999 	if (team != NULL) {
3000 		// check the conditions again -- they might have changed in the meantime
3001 		if (is_session_leader(team)
3002 			|| team->session_id != currentTeam->session_id) {
3003 			status = B_NOT_ALLOWED;
3004 		} else if (team != currentTeam
3005 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3006 			status = EACCES;
3007 		} else if (team->group_id == groupID) {
3008 			// the team is already in the desired process group
3009 			freeGroup = group;
3010 		} else {
3011 			// Check if a process group with the requested ID already exists.
3012 			struct process_group *targetGroup
3013 				= team_get_process_group_locked(team->group->session, groupID);
3014 			if (targetGroup != NULL) {
3015 				// In case of processID == groupID we have to free the
3016 				// allocated group.
3017 				freeGroup = group;
3018 			} else if (processID == groupID) {
3019 				// We created a new process group, let us insert it into the
3020 				// team's session.
3021 				insert_group_into_session(team->group->session, group);
3022 				targetGroup = group;
3023 			}
3024 
3025 			if (targetGroup != NULL) {
3026 				// we got a group, let's move the team there
3027 				process_group* oldGroup = team->group;
3028 
3029 				remove_team_from_group(team);
3030 				insert_team_into_group(targetGroup, team);
3031 
3032 				// Update the "orphaned" flag of all potentially affected
3033 				// groups.
3034 
3035 				// the team's old group
3036 				if (oldGroup->teams != NULL) {
3037 					oldGroup->orphaned = false;
3038 					update_orphaned_process_group(oldGroup, -1);
3039 				}
3040 
3041 				// the team's new group
3042 				struct team* parent = team->parent;
3043 				targetGroup->orphaned &= parent == NULL
3044 					|| parent->group == targetGroup
3045 					|| team->parent->session_id != team->session_id;
3046 
3047 				// children's groups
3048 				struct team* child = team->children;
3049 				while (child != NULL) {
3050 					child->group->orphaned = false;
3051 					update_orphaned_process_group(child->group, -1);
3052 
3053 					child = child->siblings_next;
3054 				}
3055 			} else
3056 				status = B_NOT_ALLOWED;
3057 		}
3058 	} else
3059 		status = B_NOT_ALLOWED;
3060 
3061 	// Changing the process group might have changed the situation for a parent
3062 	// waiting in wait_for_child(). Hence we notify it.
3063 	if (status == B_OK)
3064 		team->parent->dead_children->condition_variable.NotifyAll(false);
3065 
3066 	locker.Unlock();
3067 
3068 	if (status != B_OK) {
3069 		// in case of error, the group hasn't been added into the hash
3070 		team_delete_process_group(group);
3071 	}
3072 
3073 	team_delete_process_group(freeGroup);
3074 
3075 	return status == B_OK ? groupID : status;
3076 }
3077 
3078 
3079 pid_t
3080 _user_setsid(void)
3081 {
3082 	struct team *team = thread_get_current_thread()->team;
3083 	struct process_session *session;
3084 	struct process_group *group;
3085 	cpu_status state;
3086 	bool failed = false;
3087 
3088 	// the team must not already be a process group leader
3089 	if (is_process_group_leader(team))
3090 		return B_NOT_ALLOWED;
3091 
3092 	group = create_process_group(team->id);
3093 	if (group == NULL)
3094 		return B_NO_MEMORY;
3095 
3096 	session = create_process_session(group->id);
3097 	if (session == NULL) {
3098 		team_delete_process_group(group);
3099 		return B_NO_MEMORY;
3100 	}
3101 
3102 	state = disable_interrupts();
3103 	GRAB_TEAM_LOCK();
3104 
3105 	// this may have changed since the check above
3106 	if (!is_process_group_leader(team)) {
3107 		remove_team_from_group(team);
3108 
3109 		insert_group_into_session(session, group);
3110 		insert_team_into_group(group, team);
3111 	} else
3112 		failed = true;
3113 
3114 	RELEASE_TEAM_LOCK();
3115 	restore_interrupts(state);
3116 
3117 	if (failed) {
3118 		team_delete_process_group(group);
3119 		free(session);
3120 		return B_NOT_ALLOWED;
3121 	}
3122 
3123 	return team->group_id;
3124 }
3125 
3126 
3127 status_t
3128 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3129 {
3130 	status_t returnCode;
3131 	status_t status;
3132 
3133 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3134 		return B_BAD_ADDRESS;
3135 
3136 	status = wait_for_team(id, &returnCode);
3137 	if (status >= B_OK && _userReturnCode != NULL) {
3138 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3139 			return B_BAD_ADDRESS;
3140 		return B_OK;
3141 	}
3142 
3143 	return syscall_restart_handle_post(status);
3144 }
3145 
3146 
3147 team_id
3148 _user_load_image(int32 argCount, const char **userArgs, int32 envCount,
3149 	const char **userEnv, int32 priority, uint32 flags, port_id errorPort,
3150 	uint32 errorToken)
3151 {
3152 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3153 
3154 	if (argCount < 1 || userArgs == NULL || userEnv == NULL)
3155 		return B_BAD_VALUE;
3156 
3157 	if (!IS_USER_ADDRESS(userArgs) || !IS_USER_ADDRESS(userEnv))
3158 		return B_BAD_ADDRESS;
3159 
3160 	return load_image_etc(argCount, (char * const *)userArgs,
3161 		envCount, (char * const *)userEnv, priority, flags, errorPort,
3162 		errorToken, false);
3163 }
3164 
3165 
3166 void
3167 _user_exit_team(status_t returnValue)
3168 {
3169 	struct thread *thread = thread_get_current_thread();
3170 
3171 	thread->exit.status = returnValue;
3172 	thread->exit.reason = THREAD_RETURN_EXIT;
3173 
3174 	send_signal(thread->id, SIGKILL);
3175 }
3176 
3177 
3178 status_t
3179 _user_kill_team(team_id team)
3180 {
3181 	return kill_team(team);
3182 }
3183 
3184 
3185 status_t
3186 _user_get_team_info(team_id id, team_info *userInfo)
3187 {
3188 	status_t status;
3189 	team_info info;
3190 
3191 	if (!IS_USER_ADDRESS(userInfo))
3192 		return B_BAD_ADDRESS;
3193 
3194 	status = _get_team_info(id, &info, sizeof(team_info));
3195 	if (status == B_OK) {
3196 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3197 			return B_BAD_ADDRESS;
3198 	}
3199 
3200 	return status;
3201 }
3202 
3203 
3204 status_t
3205 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3206 {
3207 	status_t status;
3208 	team_info info;
3209 	int32 cookie;
3210 
3211 	if (!IS_USER_ADDRESS(userCookie)
3212 		|| !IS_USER_ADDRESS(userInfo)
3213 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3214 		return B_BAD_ADDRESS;
3215 
3216 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3217 	if (status != B_OK)
3218 		return status;
3219 
3220 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3221 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3222 		return B_BAD_ADDRESS;
3223 
3224 	return status;
3225 }
3226 
3227 
3228 team_id
3229 _user_get_current_team(void)
3230 {
3231 	return team_get_current_team_id();
3232 }
3233 
3234 
3235 status_t
3236 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3237 {
3238 	team_usage_info info;
3239 	status_t status;
3240 
3241 	if (!IS_USER_ADDRESS(userInfo))
3242 		return B_BAD_ADDRESS;
3243 
3244 	status = _get_team_usage_info(team, who, &info, size);
3245 	if (status != B_OK)
3246 		return status;
3247 
3248 	if (user_memcpy(userInfo, &info, size) < B_OK)
3249 		return B_BAD_ADDRESS;
3250 
3251 	return status;
3252 }
3253 
3254