xref: /haiku/src/system/kernel/team.cpp (revision a381c8a06378de22ff08adf4282b4e3f7e50d250)
1 /*
2  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
3  * Distributed under the terms of the MIT License.
4  *
5  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
6  * Distributed under the terms of the NewOS License.
7  */
8 
9 /*!	Team functions */
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/wait.h>
15 
16 #include <OS.h>
17 
18 #include <AutoDeleter.h>
19 
20 #include <elf.h>
21 #include <file_cache.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <kimage.h>
25 #include <kscheduler.h>
26 #include <ksignal.h>
27 #include <port.h>
28 #include <sem.h>
29 #include <syscall_process_info.h>
30 #include <syscall_restart.h>
31 #include <syscalls.h>
32 #include <team.h>
33 #include <tls.h>
34 #include <tracing.h>
35 #include <user_runtime.h>
36 #include <vfs.h>
37 #include <vm.h>
38 #include <vm_address_space.h>
39 #include <util/AutoLock.h>
40 #include <util/khash.h>
41 
42 //#define TRACE_TEAM
43 #ifdef TRACE_TEAM
44 #	define TRACE(x) dprintf x
45 #else
46 #	define TRACE(x) ;
47 #endif
48 
49 
50 struct team_key {
51 	team_id id;
52 };
53 
54 struct team_arg {
55 	uint32	arg_count;
56 	char	**args;
57 	uint32	env_count;
58 	char	**env;
59 	port_id	error_port;
60 	uint32	error_token;
61 };
62 
63 struct fork_arg {
64 	area_id		user_stack_area;
65 	addr_t		user_stack_base;
66 	size_t		user_stack_size;
67 	addr_t		user_local_storage;
68 	sigset_t	sig_block_mask;
69 
70 	struct arch_fork_arg arch_info;
71 };
72 
73 
74 static hash_table *sTeamHash = NULL;
75 static hash_table *sGroupHash = NULL;
76 static struct team *sKernelTeam = NULL;
77 
78 // some arbitrary chosen limits - should probably depend on the available
79 // memory (the limit is not yet enforced)
80 static int32 sMaxTeams = 2048;
81 static int32 sUsedTeams = 1;
82 
83 spinlock team_spinlock = 0;
84 
85 
86 // #pragma mark - Tracing
87 
88 
89 #ifdef TEAM_TRACING
90 namespace TeamTracing {
91 
92 class TeamForked : public AbstractTraceEntry {
93 	public:
94 		TeamForked(thread_id forkedThread)
95 			:
96 			fForkedThread(forkedThread)
97 		{
98 			Initialized();
99 		}
100 
101 		virtual void AddDump(TraceOutput& out)
102 		{
103 			out.Print("team forked, new thread %ld", fForkedThread);
104 		}
105 
106 	private:
107 		thread_id			fForkedThread;
108 };
109 
110 
111 class ExecTeam : public AbstractTraceEntry {
112 	public:
113 		ExecTeam(const char* path, int32 argCount, const char* const* args,
114 				int32 envCount, const char* const* env)
115 			:
116 			fArgCount(argCount),
117 			fArgs(NULL)
118 		{
119 			fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
120 				false);
121 
122 			// determine the buffer size we need for the args
123 			size_t argBufferSize = 0;
124 			for (int32 i = 0; i < argCount; i++)
125 				argBufferSize += strlen(args[i]) + 1;
126 
127 			// allocate a buffer
128 			fArgs = (char*)alloc_tracing_buffer(argBufferSize);
129 			if (fArgs) {
130 				char* buffer = fArgs;
131 				for (int32 i = 0; i < argCount; i++) {
132 					size_t argSize = strlen(args[i]) + 1;
133 					memcpy(buffer, args[i], argSize);
134 					buffer += argSize;
135 				}
136 			}
137 
138 			// ignore env for the time being
139 			(void)envCount;
140 			(void)env;
141 
142 			Initialized();
143 		}
144 
145 		virtual void AddDump(TraceOutput& out)
146 		{
147 			out.Print("team exec, \"%p\", args:", fPath);
148 
149 			char* args = fArgs;
150 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
151 				out.Print(" \"%s\"", args);
152 				args += strlen(args) + 1;
153 			}
154 		}
155 
156 	private:
157 		char*	fPath;
158 		int32	fArgCount;
159 		char*	fArgs;
160 };
161 
162 
163 static const char*
164 job_control_state_name(job_control_state state)
165 {
166 	switch (state) {
167 		case JOB_CONTROL_STATE_NONE:
168 			return "none";
169 		case JOB_CONTROL_STATE_STOPPED:
170 			return "stopped";
171 		case JOB_CONTROL_STATE_CONTINUED:
172 			return "continued";
173 		case JOB_CONTROL_STATE_DEAD:
174 			return "dead";
175 		default:
176 			return "invalid";
177 	}
178 }
179 
180 
181 class SetJobControlState : public AbstractTraceEntry {
182 	public:
183 		SetJobControlState(team_id team, job_control_state newState, int signal)
184 			:
185 			fTeam(team),
186 			fNewState(newState),
187 			fSignal(signal)
188 		{
189 			Initialized();
190 		}
191 
192 		virtual void AddDump(TraceOutput& out)
193 		{
194 			out.Print("team set job control state, team %ld, "
195 				"new state: %s, signal: %d",
196 				fTeam, job_control_state_name(fNewState), fSignal);
197 		}
198 
199 	private:
200 		team_id				fTeam;
201 		job_control_state	fNewState;
202 		int					fSignal;
203 };
204 
205 
206 class WaitForChild : public AbstractTraceEntry {
207 	public:
208 		WaitForChild(pid_t child, uint32 flags)
209 			:
210 			fChild(child),
211 			fFlags(flags)
212 		{
213 			Initialized();
214 		}
215 
216 		virtual void AddDump(TraceOutput& out)
217 		{
218 			out.Print("team wait for child, child: %ld, "
219 				"flags: 0x%lx", fChild, fFlags);
220 		}
221 
222 	private:
223 		pid_t	fChild;
224 		uint32	fFlags;
225 };
226 
227 
228 class WaitForChildDone : public AbstractTraceEntry {
229 	public:
230 		WaitForChildDone(const job_control_entry& entry)
231 			:
232 			fState(entry.state),
233 			fTeam(entry.thread),
234 			fStatus(entry.status),
235 			fReason(entry.reason),
236 			fSignal(entry.signal)
237 		{
238 			Initialized();
239 		}
240 
241 		WaitForChildDone(status_t error)
242 			:
243 			fTeam(error)
244 		{
245 			Initialized();
246 		}
247 
248 		virtual void AddDump(TraceOutput& out)
249 		{
250 			if (fTeam >= 0) {
251 				out.Print("team wait for child done, team: %ld, "
252 					"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
253 					fTeam, job_control_state_name(fState), fStatus, fReason,
254 					fSignal);
255 			} else {
256 				out.Print("team wait for child failed, error: "
257 					"0x%lx, ", fTeam);
258 			}
259 		}
260 
261 	private:
262 		job_control_state	fState;
263 		team_id				fTeam;
264 		status_t			fStatus;
265 		uint16				fReason;
266 		uint16				fSignal;
267 };
268 
269 }	// namespace TeamTracing
270 
271 #	define T(x) new(std::nothrow) TeamTracing::x;
272 #else
273 #	define T(x) ;
274 #endif
275 
276 
277 
278 //	#pragma mark - Private functions
279 
280 
281 static void
282 _dump_team_info(struct team *team)
283 {
284 	kprintf("TEAM: %p\n", team);
285 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
286 	kprintf("name:        '%s'\n", team->name);
287 	kprintf("args:        '%s'\n", team->args);
288 	kprintf("next:        %p\n", team->next);
289 	kprintf("parent:      %p", team->parent);
290 	if (team->parent != NULL) {
291 		kprintf(" (id = %ld)\n", team->parent->id);
292 	} else
293 		kprintf("\n");
294 
295 	kprintf("children:    %p\n", team->children);
296 	kprintf("num_threads: %d\n", team->num_threads);
297 	kprintf("state:       %d\n", team->state);
298 	kprintf("flags:       0x%lx\n", team->flags);
299 	kprintf("io_context:  %p\n", team->io_context);
300 	if (team->address_space)
301 		kprintf("address_space: %p\n", team->address_space);
302 	kprintf("main_thread: %p\n", team->main_thread);
303 	kprintf("thread_list: %p\n", team->thread_list);
304 	kprintf("group_id:    %ld\n", team->group_id);
305 	kprintf("session_id:  %ld\n", team->session_id);
306 }
307 
308 
309 static int
310 dump_team_info(int argc, char **argv)
311 {
312 	struct hash_iterator iterator;
313 	struct team *team;
314 	team_id id = -1;
315 	bool found = false;
316 
317 	if (argc < 2) {
318 		struct thread* thread = thread_get_current_thread();
319 		if (thread != NULL && thread->team != NULL)
320 			_dump_team_info(thread->team);
321 		else
322 			kprintf("No current team!\n");
323 		return 0;
324 	}
325 
326 	id = strtoul(argv[1], NULL, 0);
327 	if (IS_KERNEL_ADDRESS(id)) {
328 		// semi-hack
329 		_dump_team_info((struct team *)id);
330 		return 0;
331 	}
332 
333 	// walk through the thread list, trying to match name or id
334 	hash_open(sTeamHash, &iterator);
335 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
336 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
337 			_dump_team_info(team);
338 			found = true;
339 			break;
340 		}
341 	}
342 	hash_close(sTeamHash, &iterator, false);
343 
344 	if (!found)
345 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
346 	return 0;
347 }
348 
349 
350 static int
351 dump_teams(int argc, char **argv)
352 {
353 	struct hash_iterator iterator;
354 	struct team *team;
355 
356 	kprintf("team           id  parent      name\n");
357 	hash_open(sTeamHash, &iterator);
358 
359 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
360 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
361 	}
362 
363 	hash_close(sTeamHash, &iterator, false);
364 	return 0;
365 }
366 
367 
368 /*!	Frees an array of strings in kernel space.
369 
370 	\param strings strings array
371 	\param count number of strings in array
372 */
373 static void
374 free_strings_array(char **strings, int32 count)
375 {
376 	int32 i;
377 
378 	if (strings == NULL)
379 		return;
380 
381 	for (i = 0; i < count; i++)
382 		free(strings[i]);
383 
384     free(strings);
385 }
386 
387 
388 /*!	Copy an array of strings in kernel space
389 
390 	\param strings strings array to be copied
391 	\param count number of strings in array
392 	\param kstrings	pointer to the kernel copy
393 	\return \c B_OK on success, or an appropriate error code on
394 		failure.
395 */
396 static status_t
397 kernel_copy_strings_array(char * const *in, int32 count, char ***_strings)
398 {
399 	status_t status;
400 	char **strings;
401 	int32 i = 0;
402 
403 	strings = (char **)malloc((count + 1) * sizeof(char *));
404 	if (strings == NULL)
405 		return B_NO_MEMORY;
406 
407 	for (; i < count; i++) {
408 		strings[i] = strdup(in[i]);
409 		if (strings[i] == NULL) {
410 			status = B_NO_MEMORY;
411 			goto error;
412 		}
413 	}
414 
415 	strings[count] = NULL;
416 	*_strings = strings;
417 
418 	return B_OK;
419 
420 error:
421 	free_strings_array(strings, i);
422 	return status;
423 }
424 
425 
426 /*!	Copy an array of strings from user space to kernel space
427 
428 	\param strings userspace strings array
429 	\param count number of strings in array
430 	\param kstrings	pointer to the kernel copy
431 	\return \c B_OK on success, or an appropriate error code on
432 		failure.
433 */
434 static status_t
435 user_copy_strings_array(char * const *userStrings, int32 count, char ***_strings)
436 {
437 	char *buffer;
438 	char **strings;
439 	status_t err;
440 	int32 i = 0;
441 
442 	if (!IS_USER_ADDRESS(userStrings))
443 		return B_BAD_ADDRESS;
444 
445 	// buffer for safely accessing the user string
446 	// TODO: maybe have a user_strdup() instead?
447 	buffer = (char *)malloc(4 * B_PAGE_SIZE);
448 	if (buffer == NULL)
449 		return B_NO_MEMORY;
450 
451 	strings = (char **)malloc((count + 1) * sizeof(char *));
452 	if (strings == NULL) {
453 		err = B_NO_MEMORY;
454 		goto error;
455 	}
456 
457 	if ((err = user_memcpy(strings, userStrings, count * sizeof(char *))) < B_OK)
458 		goto error;
459 
460 	// scan all strings and copy to kernel space
461 
462 	for (; i < count; i++) {
463 		err = user_strlcpy(buffer, strings[i], 4 * B_PAGE_SIZE);
464 		if (err < B_OK)
465 			goto error;
466 
467 		strings[i] = strdup(buffer);
468 		if (strings[i] == NULL) {
469 			err = B_NO_MEMORY;
470 			goto error;
471 		}
472 	}
473 
474 	strings[count] = NULL;
475 	*_strings = strings;
476 	free(buffer);
477 
478 	return B_OK;
479 
480 error:
481 	free_strings_array(strings, i);
482 	free(buffer);
483 
484 	TRACE(("user_copy_strings_array failed %ld\n", err));
485 	return err;
486 }
487 
488 
489 static status_t
490 copy_strings_array(char * const *strings, int32 count, char ***_strings,
491 	bool kernel)
492 {
493 	if (kernel)
494 		return kernel_copy_strings_array(strings, count, _strings);
495 
496 	return user_copy_strings_array(strings, count, _strings);
497 }
498 
499 
500 static int
501 team_struct_compare(void *_p, const void *_key)
502 {
503 	struct team *p = (struct team*)_p;
504 	const struct team_key *key = (const struct team_key*)_key;
505 
506 	if (p->id == key->id)
507 		return 0;
508 
509 	return 1;
510 }
511 
512 
513 static uint32
514 team_struct_hash(void *_p, const void *_key, uint32 range)
515 {
516 	struct team *p = (struct team*)_p;
517 	const struct team_key *key = (const struct team_key*)_key;
518 
519 	if (p != NULL)
520 		return p->id % range;
521 
522 	return (uint32)key->id % range;
523 }
524 
525 
526 static int
527 process_group_compare(void *_group, const void *_key)
528 {
529 	struct process_group *group = (struct process_group*)_group;
530 	const struct team_key *key = (const struct team_key*)_key;
531 
532 	if (group->id == key->id)
533 		return 0;
534 
535 	return 1;
536 }
537 
538 
539 static uint32
540 process_group_hash(void *_group, const void *_key, uint32 range)
541 {
542 	struct process_group *group = (struct process_group*)_group;
543 	const struct team_key *key = (const struct team_key*)_key;
544 
545 	if (group != NULL)
546 		return group->id % range;
547 
548 	return (uint32)key->id % range;
549 }
550 
551 
552 static void
553 insert_team_into_parent(struct team *parent, struct team *team)
554 {
555 	ASSERT(parent != NULL);
556 
557 	team->siblings_next = parent->children;
558 	parent->children = team;
559 	team->parent = parent;
560 }
561 
562 
563 /*!	Note: must have team lock held */
564 static void
565 remove_team_from_parent(struct team *parent, struct team *team)
566 {
567 	struct team *child, *last = NULL;
568 
569 	for (child = parent->children; child != NULL; child = child->siblings_next) {
570 		if (child == team) {
571 			if (last == NULL)
572 				parent->children = child->siblings_next;
573 			else
574 				last->siblings_next = child->siblings_next;
575 
576 			team->parent = NULL;
577 			break;
578 		}
579 		last = child;
580 	}
581 }
582 
583 
584 /*!	Reparent each of our children
585 	Note: must have team lock held
586 */
587 static void
588 reparent_children(struct team *team)
589 {
590 	struct team *child;
591 
592 	while ((child = team->children) != NULL) {
593 		// remove the child from the current proc and add to the parent
594 		remove_team_from_parent(team, child);
595 		insert_team_into_parent(sKernelTeam, child);
596 	}
597 
598 	// move job control entries too
599 	sKernelTeam->stopped_children->entries.MoveFrom(
600 		&team->stopped_children->entries);
601 	sKernelTeam->continued_children->entries.MoveFrom(
602 		&team->continued_children->entries);
603 
604 	// Note, we don't move the dead children entries. Those will be deleted
605 	// when the team structure is deleted.
606 }
607 
608 
609 static bool
610 is_session_leader(struct team *team)
611 {
612 	return team->session_id == team->id;
613 }
614 
615 
616 static bool
617 is_process_group_leader(struct team *team)
618 {
619 	return team->group_id == team->id;
620 }
621 
622 
623 /*!	You must hold the team lock when calling this function. */
624 static void
625 insert_group_into_session(struct process_session *session, struct process_group *group)
626 {
627 	if (group == NULL)
628 		return;
629 
630 	group->session = session;
631 	hash_insert(sGroupHash, group);
632 	session->group_count++;
633 }
634 
635 
636 /*!	You must hold the team lock when calling this function. */
637 static void
638 insert_team_into_group(struct process_group *group, struct team *team)
639 {
640 	team->group = group;
641 	team->group_id = group->id;
642 	team->session_id = group->session->id;
643 
644 	team->group_next = group->teams;
645 	group->teams = team;
646 }
647 
648 
649 /*!	Removes a group from a session, and puts the session object
650 	back into the session cache, if it's not used anymore.
651 	You must hold the team lock when calling this function.
652 */
653 static void
654 remove_group_from_session(struct process_group *group)
655 {
656 	struct process_session *session = group->session;
657 
658 	// the group must be in any session to let this function have any effect
659 	if (session == NULL)
660 		return;
661 
662 	hash_remove(sGroupHash, group);
663 
664 	// we cannot free the resource here, so we're keeping the group link
665 	// around - this way it'll be freed by free_process_group()
666 	if (--session->group_count > 0)
667 		group->session = NULL;
668 }
669 
670 
671 /*!	Removes the team from the group. If that group becomes therefore
672 	unused, it will set \a _freeGroup to point to the group - otherwise
673 	it will be \c NULL.
674 	It cannot be freed here because this function has to be called
675 	with having the team lock held.
676 
677 	\param team the team that'll be removed from it's group
678 	\param _freeGroup points to the group to be freed or NULL
679 */
680 static void
681 remove_team_from_group(struct team *team, struct process_group **_freeGroup)
682 {
683 	struct process_group *group = team->group;
684 	struct team *current, *last = NULL;
685 
686 	*_freeGroup = NULL;
687 
688 	// the team must be in any team to let this function have any effect
689 	if  (group == NULL)
690 		return;
691 
692 	for (current = group->teams; current != NULL; current = current->group_next) {
693 		if (current == team) {
694 			if (last == NULL)
695 				group->teams = current->group_next;
696 			else
697 				last->group_next = current->group_next;
698 
699 			team->group = NULL;
700 			break;
701 		}
702 		last = current;
703 	}
704 
705 	team->group = NULL;
706 	team->group_next = NULL;
707 
708 	if (group->teams != NULL)
709 		return;
710 
711 	// we can remove this group as it is no longer used
712 
713 	remove_group_from_session(group);
714 	*_freeGroup = group;
715 }
716 
717 
718 static struct process_group *
719 create_process_group(pid_t id)
720 {
721 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
722 	if (group == NULL)
723 		return NULL;
724 
725 	group->id = id;
726 	group->session = NULL;
727 	group->teams = NULL;
728 	group->orphaned = true;
729 	return group;
730 }
731 
732 
733 static struct process_session *
734 create_process_session(pid_t id)
735 {
736 	struct process_session *session
737 		= (struct process_session *)malloc(sizeof(struct process_session));
738 	if (session == NULL)
739 		return NULL;
740 
741 	session->id = id;
742 	session->group_count = 0;
743 	session->controlling_tty = -1;
744 	session->foreground_group = -1;
745 
746 	return session;
747 }
748 
749 
750 static void
751 set_team_name(struct team* team, const char* name)
752 {
753 	if (const char* lastSlash = strrchr(name, '/'))
754 		name = lastSlash + 1;
755 
756 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
757 }
758 
759 
760 static struct team *
761 create_team_struct(const char *name, bool kernel)
762 {
763 	struct team *team = (struct team *)malloc(sizeof(struct team));
764 	if (team == NULL)
765 		return NULL;
766 	MemoryDeleter teamDeleter(team);
767 
768 	team->next = team->siblings_next = team->children = team->parent = NULL;
769 	team->id = allocate_thread_id();
770 	set_team_name(team, name);
771 	team->args[0] = '\0';
772 	team->num_threads = 0;
773 	team->io_context = NULL;
774 	team->address_space = NULL;
775 	team->thread_list = NULL;
776 	team->main_thread = NULL;
777 	team->loading_info = NULL;
778 	team->state = TEAM_STATE_BIRTH;
779 	team->flags = 0;
780 	team->death_sem = -1;
781 
782 	team->dead_threads_kernel_time = 0;
783 	team->dead_threads_user_time = 0;
784 
785 	// dead threads
786 	list_init(&team->dead_threads);
787 	team->dead_threads_count = 0;
788 
789 	// dead children
790 	team->dead_children = new(nothrow) team_dead_children;
791 	if (team->dead_children == NULL)
792 		return NULL;
793 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
794 
795 	team->dead_children->count = 0;
796 	team->dead_children->kernel_time = 0;
797 	team->dead_children->user_time = 0;
798 
799 	// stopped children
800 	team->stopped_children = new(nothrow) team_job_control_children;
801 	if (team->stopped_children == NULL)
802 		return NULL;
803 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
804 		team->stopped_children);
805 
806 	// continued children
807 	team->continued_children = new(nothrow) team_job_control_children;
808 	if (team->continued_children == NULL)
809 		return NULL;
810 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
811 		team->continued_children);
812 
813 	// job control entry
814 	team->job_control_entry = new(nothrow) job_control_entry;
815 	if (team->job_control_entry == NULL)
816 		return NULL;
817 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
818 		team->job_control_entry);
819 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
820 	team->job_control_entry->thread = team->id;
821 	team->job_control_entry->team = team;
822 
823 	list_init(&team->image_list);
824 	list_init(&team->watcher_list);
825 
826 	clear_team_debug_info(&team->debug_info, true);
827 
828 	if (arch_team_init_team_struct(team, kernel) < 0)
829 		return NULL;
830 
831 	// publish dead/stopped/continued children condition vars
832 	team->dead_children->condition_variable.Publish(team->dead_children,
833 		"dead children");
834 	team->stopped_children->condition_variable.Publish(team->stopped_children,
835 		"stopped children");
836 	team->continued_children->condition_variable.Publish(
837 		team->continued_children, "continued children");
838 
839 	// keep all allocated structures
840 	jobControlEntryDeleter.Detach();
841 	continuedChildrenDeleter.Detach();
842 	stoppedChildrenDeleter.Detach();
843 	deadChildrenDeleter.Detach();
844 	teamDeleter.Detach();
845 
846 	return team;
847 }
848 
849 
850 static void
851 delete_team_struct(struct team *team)
852 {
853 	team->stopped_children->condition_variable.Unpublish();
854 	team->continued_children->condition_variable.Unpublish();
855 
856 	team->dead_children->condition_variable.Unpublish();
857 
858 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
859 			&team->dead_threads)) {
860 		free(threadDeathEntry);
861 	}
862 
863 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
864 		delete entry;
865 
866 	delete team->job_control_entry;
867 		// usually already NULL and transferred to the parent
868 	delete team->continued_children;
869 	delete team->stopped_children;
870 	delete team->dead_children;
871 	free(team);
872 }
873 
874 
875 static uint32
876 get_arguments_data_size(char **args, int32 argc)
877 {
878 	uint32 size = 0;
879 	int32 count;
880 
881 	for (count = 0; count < argc; count++)
882 		size += strlen(args[count]) + 1;
883 
884 	return size + (argc + 1) * sizeof(char *) + sizeof(struct user_space_program_args);
885 }
886 
887 
888 static void
889 free_team_arg(struct team_arg *teamArg)
890 {
891 	free_strings_array(teamArg->args, teamArg->arg_count);
892 	free_strings_array(teamArg->env, teamArg->env_count);
893 
894 	free(teamArg);
895 }
896 
897 
898 static status_t
899 create_team_arg(struct team_arg **_teamArg, int32 argCount, char * const *args,
900 	int32 envCount, char * const *env, port_id port, uint32 token, bool kernel)
901 {
902 	status_t status;
903 	char **argsCopy;
904 	char **envCopy;
905 
906 	struct team_arg *teamArg = (struct team_arg *)malloc(sizeof(struct team_arg));
907 	if (teamArg == NULL)
908 		return B_NO_MEMORY;
909 
910 	// copy the args over
911 
912 	status = copy_strings_array(args, argCount, &argsCopy, kernel);
913 	if (status != B_OK)
914 		return status;
915 
916 	status = copy_strings_array(env, envCount, &envCopy, kernel);
917 	if (status != B_OK) {
918 		free_strings_array(argsCopy, argCount);
919 		return status;
920 	}
921 
922 	teamArg->arg_count = argCount;
923 	teamArg->args = argsCopy;
924 	teamArg->env_count = envCount;
925 	teamArg->env = envCopy;
926 	teamArg->error_port = port;
927 	teamArg->error_token = token;
928 
929 	*_teamArg = teamArg;
930 	return B_OK;
931 }
932 
933 
934 static int32
935 team_create_thread_start(void *args)
936 {
937 	status_t err;
938 	struct thread *t;
939 	struct team *team;
940 	struct team_arg *teamArgs = (struct team_arg*)args;
941 	const char *path;
942 	addr_t entry;
943 	char ustack_name[128];
944 	uint32 sizeLeft;
945 	char **userArgs;
946 	char **userEnv;
947 	char *userDest;
948 	struct user_space_program_args *programArgs;
949 	uint32 argCount, envCount, i;
950 
951 	t = thread_get_current_thread();
952 	team = t->team;
953 	cache_node_launched(teamArgs->arg_count, teamArgs->args);
954 
955 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
956 
957 	// create an initial primary stack area
958 
959 	// Main stack area layout is currently as follows (starting from 0):
960 	//
961 	// size							| usage
962 	// -----------------------------+--------------------------------
963 	// USER_MAIN_THREAD_STACK_SIZE	| actual stack
964 	// TLS_SIZE						| TLS data
965 	// ENV_SIZE						| environment variables
966 	// arguments size				| arguments passed to the team
967 
968 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
969 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
970 
971 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE + TLS_SIZE + ENV_SIZE +
972 		get_arguments_data_size(teamArgs->args, teamArgs->arg_count));
973 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
974 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE;
975 		// the exact location at the end of the user stack area
976 
977 	sprintf(ustack_name, "%s_main_stack", team->name);
978 	t->user_stack_area = create_area_etc(team, ustack_name, (void **)&t->user_stack_base,
979 		B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA);
980 	if (t->user_stack_area < 0) {
981 		dprintf("team_create_thread_start: could not create default user stack region\n");
982 
983 		free_team_arg(teamArgs);
984 		return t->user_stack_area;
985 	}
986 
987 	// now that the TLS area is allocated, initialize TLS
988 	arch_thread_init_tls(t);
989 
990 	argCount = teamArgs->arg_count;
991 	envCount = teamArgs->env_count;
992 
993 	programArgs = (struct user_space_program_args *)(t->user_stack_base
994 		+ t->user_stack_size + TLS_SIZE + ENV_SIZE);
995 	userArgs = (char **)(programArgs + 1);
996 	userDest = (char *)(userArgs + argCount + 1);
997 
998 	TRACE(("addr: stack base = 0x%lx, userArgs = %p, userDest = %p, sizeLeft = %lu\n",
999 		t->user_stack_base, userArgs, userDest, sizeLeft));
1000 
1001 	sizeLeft = t->user_stack_base + sizeLeft - (addr_t)userDest;
1002 
1003 	for (i = 0; i < argCount; i++) {
1004 		ssize_t length = user_strlcpy(userDest, teamArgs->args[i], sizeLeft);
1005 		if (length < B_OK) {
1006 			argCount = 0;
1007 			break;
1008 		}
1009 
1010 		userArgs[i] = userDest;
1011 		userDest += ++length;
1012 		sizeLeft -= length;
1013 	}
1014 	userArgs[argCount] = NULL;
1015 
1016 	userEnv = (char **)(t->user_stack_base + t->user_stack_size + TLS_SIZE);
1017 	sizeLeft = ENV_SIZE;
1018 	userDest = (char *)userEnv + ENV_SIZE - 1;
1019 		// the environment variables are copied from back to front
1020 
1021 	TRACE(("team_create_thread_start: envc: %ld, env: %p\n",
1022 		teamArgs->env_count, (void *)teamArgs->env));
1023 
1024 	for (i = 0; i < envCount; i++) {
1025 		ssize_t length = strlen(teamArgs->env[i]) + 1;
1026 		userDest -= length;
1027 		if (userDest < (char *)&userEnv[envCount]) {
1028 			envCount = i;
1029 			break;
1030 		}
1031 
1032 		userEnv[i] = userDest;
1033 
1034 		if (user_memcpy(userDest, teamArgs->env[i], length) < B_OK) {
1035 			envCount = 0;
1036 			break;
1037 		}
1038 
1039 		sizeLeft -= length;
1040 	}
1041 	userEnv[envCount] = NULL;
1042 
1043 	path = teamArgs->args[0];
1044 	if (user_memcpy(programArgs->program_path, path,
1045 				sizeof(programArgs->program_path)) < B_OK
1046 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1047 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1048 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1049 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1050 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1051 				sizeof(port_id)) < B_OK
1052 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1053 				sizeof(uint32)) < B_OK) {
1054 		// the team deletion process will clean this mess
1055 		return B_BAD_ADDRESS;
1056 	}
1057 
1058 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1059 
1060 	// add args to info member
1061 	team->args[0] = 0;
1062 	strlcpy(team->args, path, sizeof(team->args));
1063 	for (i = 1; i < argCount; i++) {
1064 		strlcat(team->args, " ", sizeof(team->args));
1065 		strlcat(team->args, teamArgs->args[i], sizeof(team->args));
1066 	}
1067 
1068 	free_team_arg(teamArgs);
1069 		// the arguments are already on the user stack, we no longer need them in this form
1070 
1071 	// ToDo: don't use fixed paths!
1072 	err = elf_load_user_image("/boot/beos/system/runtime_loader", team, 0, &entry);
1073 	if (err < B_OK) {
1074 		// Luckily, we don't have to clean up the mess we created - that's
1075 		// done for us by the normal team deletion process
1076 		TRACE(("team_create_thread_start: error when elf_load_user_image() %s\n", strerror(err)));
1077 		return err;
1078 	}
1079 
1080 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1081 
1082 	team->state = TEAM_STATE_NORMAL;
1083 
1084 	// jump to the entry point in user space
1085 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1086 		// only returns in case of error
1087 }
1088 
1089 
1090 /*!	The BeOS kernel exports a function with this name, but most probably with
1091 	different parameters; we should not make it public.
1092 */
1093 static thread_id
1094 load_image_etc(int32 argCount, char * const *args, int32 envCount,
1095 	char * const *env, int32 priority, uint32 flags,
1096 	port_id errorPort, uint32 errorToken, bool kernel)
1097 {
1098 	struct process_group *group;
1099 	struct team *team, *parent;
1100 	const char *threadName;
1101 	thread_id thread;
1102 	status_t status;
1103 	cpu_status state;
1104 	struct team_arg *teamArgs;
1105 	struct team_loading_info loadingInfo;
1106 
1107 	if (args == NULL || argCount == 0)
1108 		return B_BAD_VALUE;
1109 
1110 	TRACE(("load_image_etc: name '%s', args = %p, argCount = %ld\n",
1111 		args[0], args, argCount));
1112 
1113 	team = create_team_struct(args[0], false);
1114 	if (team == NULL)
1115 		return B_NO_MEMORY;
1116 
1117 	parent = thread_get_current_thread()->team;
1118 
1119 	if (flags & B_WAIT_TILL_LOADED) {
1120 		loadingInfo.thread = thread_get_current_thread();
1121 		loadingInfo.result = B_ERROR;
1122 		loadingInfo.done = false;
1123 		team->loading_info = &loadingInfo;
1124 	}
1125 
1126 	state = disable_interrupts();
1127 	GRAB_TEAM_LOCK();
1128 
1129 	hash_insert(sTeamHash, team);
1130 	insert_team_into_parent(parent, team);
1131 	insert_team_into_group(parent->group, team);
1132 	sUsedTeams++;
1133 
1134 	RELEASE_TEAM_LOCK();
1135 	restore_interrupts(state);
1136 
1137 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1138 		errorPort, errorToken, kernel);
1139 	if (status != B_OK)
1140 		goto err1;
1141 
1142 	// create a new io_context for this team
1143 	team->io_context = vfs_new_io_context(parent->io_context);
1144 	if (!team->io_context) {
1145 		status = B_NO_MEMORY;
1146 		goto err2;
1147 	}
1148 
1149 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1150 	vfs_exec_io_context(team->io_context);
1151 
1152 	// create an address space for this team
1153 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1154 		&team->address_space);
1155 	if (status < B_OK)
1156 		goto err3;
1157 
1158 	// cut the path from the main thread name
1159 	threadName = strrchr(args[0], '/');
1160 	if (threadName != NULL)
1161 		threadName++;
1162 	else
1163 		threadName = args[0];
1164 
1165 	// Create a kernel thread, but under the context of the new team
1166 	// The new thread will take over ownership of teamArgs
1167 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1168 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1169 	if (thread < 0) {
1170 		status = thread;
1171 		goto err4;
1172 	}
1173 
1174 	// wait for the loader of the new team to finish its work
1175 	if (flags & B_WAIT_TILL_LOADED) {
1176 		struct thread *mainThread;
1177 
1178 		state = disable_interrupts();
1179 		GRAB_THREAD_LOCK();
1180 
1181 		mainThread = thread_get_thread_struct_locked(thread);
1182 		if (mainThread) {
1183 			// resume the team's main thread
1184 			if (mainThread->state == B_THREAD_SUSPENDED)
1185 				scheduler_enqueue_in_run_queue(mainThread);
1186 
1187 			// Now suspend ourselves until loading is finished.
1188 			// We will be woken either by the thread, when it finished or
1189 			// aborted loading, or when the team is going to die (e.g. is
1190 			// killed). In either case the one setting `loadingInfo.done' is
1191 			// responsible for removing the info from the team structure.
1192 			while (!loadingInfo.done) {
1193 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1194 				scheduler_reschedule();
1195 			}
1196 		} else {
1197 			// Impressive! Someone managed to kill the thread in this short
1198 			// time.
1199 		}
1200 
1201 		RELEASE_THREAD_LOCK();
1202 		restore_interrupts(state);
1203 
1204 		if (loadingInfo.result < B_OK)
1205 			return loadingInfo.result;
1206 	}
1207 
1208 	// notify the debugger
1209 	user_debug_team_created(team->id);
1210 
1211 	return thread;
1212 
1213 err4:
1214 	vm_put_address_space(team->address_space);
1215 err3:
1216 	vfs_free_io_context(team->io_context);
1217 err2:
1218 	free_team_arg(teamArgs);
1219 err1:
1220 	// remove the team structure from the team hash table and delete the team structure
1221 	state = disable_interrupts();
1222 	GRAB_TEAM_LOCK();
1223 
1224 	remove_team_from_group(team, &group);
1225 	remove_team_from_parent(parent, team);
1226 	hash_remove(sTeamHash, team);
1227 
1228 	RELEASE_TEAM_LOCK();
1229 	restore_interrupts(state);
1230 
1231 	team_delete_process_group(group);
1232 	delete_team_struct(team);
1233 
1234 	return status;
1235 }
1236 
1237 
1238 /*!	Almost shuts down the current team and loads a new image into it.
1239 	If successful, this function does not return and will takeover ownership of
1240 	the arguments provided.
1241 	This function may only be called from user space.
1242 */
1243 static status_t
1244 exec_team(const char *path, int32 argCount, char * const *args,
1245 	int32 envCount, char * const *env)
1246 {
1247 	struct team *team = thread_get_current_thread()->team;
1248 	struct team_arg *teamArgs;
1249 	const char *threadName;
1250 	status_t status = B_OK;
1251 	cpu_status state;
1252 	struct thread *thread;
1253 	thread_id nubThreadID = -1;
1254 
1255 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1256 		args[0], argCount, envCount, team->id));
1257 
1258 	T(ExecTeam(path, argCount, args, envCount, env));
1259 
1260 	// switching the kernel at run time is probably not a good idea :)
1261 	if (team == team_get_kernel_team())
1262 		return B_NOT_ALLOWED;
1263 
1264 	// we currently need to be single threaded here
1265 	// ToDo: maybe we should just kill all other threads and
1266 	//	make the current thread the team's main thread?
1267 	if (team->main_thread != thread_get_current_thread())
1268 		return B_NOT_ALLOWED;
1269 
1270 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1271 	// We iterate through the thread list to make sure that there's no other
1272 	// thread.
1273 	state = disable_interrupts();
1274 	GRAB_TEAM_LOCK();
1275 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1276 
1277 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1278 		nubThreadID = team->debug_info.nub_thread;
1279 
1280 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1281 
1282 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1283 		if (thread != team->main_thread && thread->id != nubThreadID) {
1284 			status = B_NOT_ALLOWED;
1285 			break;
1286 		}
1287 	}
1288 
1289 	RELEASE_TEAM_LOCK();
1290 	restore_interrupts(state);
1291 
1292 	if (status != B_OK)
1293 		return status;
1294 
1295 	status = create_team_arg(&teamArgs, argCount, args, envCount, env,
1296 		-1, 0, false);
1297 	if (status != B_OK)
1298 		return status;
1299 
1300 	// replace args[0] with the path argument, just to be on the safe side
1301 	free(teamArgs->args[0]);
1302 	teamArgs->args[0] = strdup(path);
1303 
1304 	// ToDo: remove team resources if there are any left
1305 	// thread_atkernel_exit() might not be called at all
1306 
1307 	thread_reset_for_exec();
1308 
1309 	user_debug_prepare_for_exec();
1310 
1311 	vm_delete_areas(team->address_space);
1312 	delete_owned_ports(team->id);
1313 	sem_delete_owned_sems(team->id);
1314 	remove_images(team);
1315 	vfs_exec_io_context(team->io_context);
1316 
1317 	user_debug_finish_after_exec();
1318 
1319 	// rename the team
1320 
1321 	set_team_name(team, path);
1322 
1323 	// cut the path from the team name and rename the main thread, too
1324 	threadName = strrchr(path, '/');
1325 	if (threadName != NULL)
1326 		threadName++;
1327 	else
1328 		threadName = path;
1329 	rename_thread(thread_get_current_thread_id(), threadName);
1330 
1331 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1332 
1333 	status = team_create_thread_start(teamArgs);
1334 		// this one usually doesn't return...
1335 
1336 	// sorry, we have to kill us, there is no way out anymore
1337 	// (without any areas left and all that)
1338 	exit_thread(status);
1339 
1340 	// we return a status here since the signal that is sent by the
1341 	// call above is not immediately handled
1342 	return B_ERROR;
1343 }
1344 
1345 
1346 /*! This is the first function to be called from the newly created
1347 	main child thread.
1348 	It will fill in everything what's left to do from fork_arg, and
1349 	return from the parent's fork() syscall to the child.
1350 */
1351 static int32
1352 fork_team_thread_start(void *_args)
1353 {
1354 	struct thread *thread = thread_get_current_thread();
1355 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1356 
1357 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1358 		// we need a local copy of the arch dependent part
1359 
1360 	thread->user_stack_area = forkArgs->user_stack_area;
1361 	thread->user_stack_base = forkArgs->user_stack_base;
1362 	thread->user_stack_size = forkArgs->user_stack_size;
1363 	thread->user_local_storage = forkArgs->user_local_storage;
1364 	thread->sig_block_mask = forkArgs->sig_block_mask;
1365 
1366 	arch_thread_init_tls(thread);
1367 
1368 	free(forkArgs);
1369 
1370 	// set frame of the parent thread to this one, too
1371 
1372 	arch_restore_fork_frame(&archArgs);
1373 		// This one won't return here
1374 
1375 	return 0;
1376 }
1377 
1378 
1379 static thread_id
1380 fork_team(void)
1381 {
1382 	struct thread *parentThread = thread_get_current_thread();
1383 	struct team *parentTeam = parentThread->team, *team;
1384 	struct process_group *group = NULL;
1385 	struct fork_arg *forkArgs;
1386 	struct area_info info;
1387 	thread_id threadID;
1388 	cpu_status state;
1389 	status_t status;
1390 	int32 cookie;
1391 
1392 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1393 
1394 	if (parentTeam == team_get_kernel_team())
1395 		return B_NOT_ALLOWED;
1396 
1397 	// create a new team
1398 	// ToDo: this is very similar to team_create_team() - maybe we can do something about it :)
1399 
1400 	team = create_team_struct(parentTeam->name, false);
1401 	if (team == NULL)
1402 		return B_NO_MEMORY;
1403 
1404 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1405 
1406 	state = disable_interrupts();
1407 	GRAB_TEAM_LOCK();
1408 
1409 	hash_insert(sTeamHash, team);
1410 	insert_team_into_parent(parentTeam, team);
1411 	insert_team_into_group(parentTeam->group, team);
1412 	sUsedTeams++;
1413 
1414 	RELEASE_TEAM_LOCK();
1415 	restore_interrupts(state);
1416 
1417 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1418 	if (forkArgs == NULL) {
1419 		status = B_NO_MEMORY;
1420 		goto err1;
1421 	}
1422 
1423 	// create a new io_context for this team
1424 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1425 	if (!team->io_context) {
1426 		status = B_NO_MEMORY;
1427 		goto err2;
1428 	}
1429 
1430 	// create an address space for this team
1431 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1432 		&team->address_space);
1433 	if (status < B_OK)
1434 		goto err3;
1435 
1436 	// copy all areas of the team
1437 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1438 	// ToDo: all stacks of other threads than the current one could be left out
1439 
1440 	cookie = 0;
1441 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1442 		void *address;
1443 		area_id area = vm_copy_area(team->address_space->id, info.name,
1444 			&address, B_CLONE_ADDRESS, info.protection, info.area);
1445 		if (area < B_OK) {
1446 			status = area;
1447 			break;
1448 		}
1449 
1450 		if (info.area == parentThread->user_stack_area)
1451 			forkArgs->user_stack_area = area;
1452 	}
1453 
1454 	if (status < B_OK)
1455 		goto err4;
1456 
1457 	forkArgs->user_stack_base = parentThread->user_stack_base;
1458 	forkArgs->user_stack_size = parentThread->user_stack_size;
1459 	forkArgs->user_local_storage = parentThread->user_local_storage;
1460 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1461 	arch_store_fork_frame(&forkArgs->arch_info);
1462 
1463 	// ToDo: copy image list
1464 
1465 	// create a kernel thread under the context of the new team
1466 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1467 		parentThread->name, parentThread->priority, forkArgs,
1468 		team->id, team->id);
1469 	if (threadID < 0) {
1470 		status = threadID;
1471 		goto err4;
1472 	}
1473 
1474 	// notify the debugger
1475 	user_debug_team_created(team->id);
1476 
1477 	T(TeamForked(threadID));
1478 
1479 	resume_thread(threadID);
1480 	return threadID;
1481 
1482 err4:
1483 	vm_delete_address_space(team->address_space);
1484 err3:
1485 	vfs_free_io_context(team->io_context);
1486 err2:
1487 	free(forkArgs);
1488 err1:
1489 	// remove the team structure from the team hash table and delete the team structure
1490 	state = disable_interrupts();
1491 	GRAB_TEAM_LOCK();
1492 
1493 	remove_team_from_group(team, &group);
1494 	remove_team_from_parent(parentTeam, team);
1495 	hash_remove(sTeamHash, team);
1496 
1497 	RELEASE_TEAM_LOCK();
1498 	restore_interrupts(state);
1499 
1500 	team_delete_process_group(group);
1501 	delete_team_struct(team);
1502 
1503 	return status;
1504 }
1505 
1506 
1507 /*!	Returns if the specified \a team has any children belonging to the
1508 	specified \a group.
1509 	Must be called with the team lock held.
1510 */
1511 static bool
1512 has_children_in_group(struct team *parent, pid_t groupID)
1513 {
1514 	struct team *team;
1515 
1516 	struct process_group *group = team_get_process_group_locked(
1517 		parent->group->session, groupID);
1518 	if (group == NULL)
1519 		return false;
1520 
1521 	for (team = group->teams; team; team = team->group_next) {
1522 		if (team->parent == parent)
1523 			return true;
1524 	}
1525 
1526 	return false;
1527 }
1528 
1529 
1530 static job_control_entry*
1531 get_job_control_entry(team_job_control_children* children, pid_t id)
1532 {
1533 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1534 		 job_control_entry* entry = it.Next();) {
1535 
1536 		if (id > 0) {
1537 			if (entry->thread == id)
1538 				return entry;
1539 		} else if (id == -1) {
1540 			return entry;
1541 		} else {
1542 			pid_t processGroup
1543 				= (entry->team ? entry->team->group_id : entry->group_id);
1544 			if (processGroup == -id)
1545 				return entry;
1546 		}
1547 	}
1548 
1549 	return NULL;
1550 }
1551 
1552 
1553 static job_control_entry*
1554 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1555 {
1556 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1557 
1558 	if (entry == NULL && (flags & WCONTINUED) != 0)
1559 		entry = get_job_control_entry(team->continued_children, id);
1560 
1561 	if (entry == NULL && (flags & WUNTRACED) != 0)
1562 		entry = get_job_control_entry(team->stopped_children, id);
1563 
1564 	return entry;
1565 }
1566 
1567 
1568 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1569 	comes to the reason why a thread has died than waitpid() can be.
1570 */
1571 static thread_id
1572 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1573 	status_t *_returnCode)
1574 {
1575 	struct thread* thread = thread_get_current_thread();
1576 	struct team* team = thread->team;
1577 	struct job_control_entry foundEntry;
1578 	struct job_control_entry* freeDeathEntry = NULL;
1579 	status_t status = B_OK;
1580 
1581 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1582 
1583 	T(WaitForChild(child, flags));
1584 
1585 	if (child == 0) {
1586 		// wait for all children in the process group of the calling team
1587 		child = -team->group_id;
1588 	}
1589 
1590 	bool ignoreFoundEntries = false;
1591 	bool ignoreFoundEntriesChecked = false;
1592 
1593 	while (true) {
1594 		InterruptsSpinLocker locker(team_spinlock);
1595 
1596 		// check whether any condition holds
1597 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1598 
1599 		// If we don't have an entry yet, check whether there are any children
1600 		// complying to the process group specification at all.
1601 		if (entry == NULL) {
1602 			// No success yet -- check whether there are any children we could
1603 			// wait for.
1604 			bool childrenExist = false;
1605 			if (child == -1) {
1606 				childrenExist = team->children != NULL;
1607 			} else if (child < -1) {
1608 				childrenExist = has_children_in_group(team, -child);
1609 			} else {
1610 				if (struct team* childTeam = team_get_team_struct_locked(child))
1611 					childrenExist = childTeam->parent == team;
1612 			}
1613 
1614 			if (!childrenExist) {
1615 				// there is no child we could wait for
1616 				status = ECHILD;
1617 			} else {
1618 				// the children we're waiting for are still running
1619 				status = B_WOULD_BLOCK;
1620 			}
1621 		} else {
1622 			// got something
1623 			foundEntry = *entry;
1624 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1625 				// The child is dead. Reap its death entry.
1626 				freeDeathEntry = entry;
1627 				team->dead_children->entries.Remove(entry);
1628 				team->dead_children->count--;
1629 			} else {
1630 				// The child is well. Reset its job control state.
1631 				team_set_job_control_state(entry->team,
1632 					JOB_CONTROL_STATE_NONE, 0, false);
1633 			}
1634 		}
1635 
1636 		// If we haven't got anything yet, add prepare for waiting for the
1637 		// condition variables.
1638 		ConditionVariableEntry<team_dead_children> deadWaitEntry;
1639 		ConditionVariableEntry<team_job_control_children> continuedWaitEntry;
1640 		ConditionVariableEntry<team_job_control_children> stoppedWaitEntry;
1641 
1642 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0) {
1643 			deadWaitEntry.Add(team->dead_children);
1644 
1645 			if ((flags & WCONTINUED) != 0) {
1646 				continuedWaitEntry.Add(team->continued_children,
1647 					&deadWaitEntry);
1648 			}
1649 
1650 			if ((flags & WUNTRACED) != 0)
1651 				stoppedWaitEntry.Add(team->stopped_children, &deadWaitEntry);
1652 		}
1653 
1654 		locker.Unlock();
1655 
1656 		// we got our entry and can return to our caller
1657 		if (status == B_OK) {
1658 			if (ignoreFoundEntries) {
1659 				// ... unless we shall ignore found entries
1660 				delete freeDeathEntry;
1661 				freeDeathEntry = NULL;
1662 				continue;
1663 			}
1664 
1665 			break;
1666 		}
1667 
1668 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1669 			T(WaitForChildDone(status));
1670 			return status;
1671 		}
1672 
1673 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1674 		if (status == B_INTERRUPTED) {
1675 			T(WaitForChildDone(status));
1676 			return status;
1677 		}
1678 
1679 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1680 		// all our children are dead and fail with ECHILD. We check the
1681 		// condition at this point.
1682 		if (!ignoreFoundEntriesChecked) {
1683 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1684 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1685 				|| handler.sa_handler == SIG_IGN) {
1686 				ignoreFoundEntries = true;
1687 			}
1688 
1689 			ignoreFoundEntriesChecked = true;
1690 		}
1691 	}
1692 
1693 	delete freeDeathEntry;
1694 
1695 	// when we got here, we have a valid death entry, and
1696 	// already got unregistered from the team or group
1697 	int reason = 0;
1698 	switch (foundEntry.state) {
1699 		case JOB_CONTROL_STATE_DEAD:
1700 			reason = foundEntry.reason;
1701 			break;
1702 		case JOB_CONTROL_STATE_STOPPED:
1703 			reason = THREAD_STOPPED;
1704 			break;
1705 		case JOB_CONTROL_STATE_CONTINUED:
1706 			reason = THREAD_CONTINUED;
1707 			break;
1708 		case JOB_CONTROL_STATE_NONE:
1709 			// can't happen
1710 			break;
1711 	}
1712 
1713 	*_returnCode = foundEntry.status;
1714 	*_reason = (foundEntry.signal << 16) | reason;
1715 
1716 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1717 	// status is available.
1718 	if (is_signal_blocked(SIGCHLD)) {
1719 		InterruptsSpinLocker locker(team_spinlock);
1720 
1721 		if (get_job_control_entry(team, child, flags) == NULL)
1722 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1723 	}
1724 
1725 	T(WaitForChildDone(foundEntry));
1726 
1727 	return foundEntry.thread;
1728 }
1729 
1730 
1731 /*! Fills the team_info structure with information from the specified
1732 	team.
1733 	The team lock must be held when called.
1734 */
1735 static status_t
1736 fill_team_info(struct team *team, team_info *info, size_t size)
1737 {
1738 	if (size != sizeof(team_info))
1739 		return B_BAD_VALUE;
1740 
1741 	// ToDo: Set more informations for team_info
1742 	memset(info, 0, size);
1743 
1744 	info->team = team->id;
1745 	info->thread_count = team->num_threads;
1746 	info->image_count = count_images(team);
1747 	//info->area_count =
1748 	info->debugger_nub_thread = team->debug_info.nub_thread;
1749 	info->debugger_nub_port = team->debug_info.nub_port;
1750 	//info->uid =
1751 	//info->gid =
1752 
1753 	strlcpy(info->args, team->args, sizeof(info->args));
1754 	info->argc = 1;
1755 
1756 	return B_OK;
1757 }
1758 
1759 
1760 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1761 	Interrupts must be disabled and team lock be held.
1762 */
1763 static bool
1764 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1765 {
1766 	// Orphaned Process Group: "A process group in which the parent of every
1767 	// member is either itself a member of the group or is not a member of the
1768 	// group's session." (Open Group Base Specs Issue 6)
1769 
1770 	// once orphaned, things won't change (exception: cf. setpgid())
1771 	if (group->orphaned)
1772 		return true;
1773 
1774 	struct team* team = group->teams;
1775 	while (team != NULL) {
1776 		struct team* parent = team->parent;
1777 		if (team->id != dyingProcess && parent != NULL
1778 			&& parent->id != dyingProcess
1779 			&& parent->group_id != group->id
1780 			&& parent->session_id == group->session->id) {
1781 			return false;
1782 		}
1783 
1784 		team = team->group_next;
1785 	}
1786 
1787 	group->orphaned = true;
1788 	return true;
1789 }
1790 
1791 
1792 /*!	Returns whether the process group contains stopped processes.
1793 	Interrupts must be disabled and team lock be held.
1794 */
1795 static bool
1796 process_group_has_stopped_processes(process_group* group)
1797 {
1798 	SpinLocker _(thread_spinlock);
1799 
1800 	struct team* team = group->teams;
1801 	while (team != NULL) {
1802 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1803 			return true;
1804 
1805 		team = team->group_next;
1806 	}
1807 
1808 	return false;
1809 }
1810 
1811 
1812 //	#pragma mark - Private kernel API
1813 
1814 
1815 status_t
1816 team_init(kernel_args *args)
1817 {
1818 	struct process_session *session;
1819 	struct process_group *group;
1820 
1821 	// create the team hash table
1822 	sTeamHash = hash_init(16, offsetof(struct team, next),
1823 		&team_struct_compare, &team_struct_hash);
1824 
1825 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
1826 		&process_group_compare, &process_group_hash);
1827 
1828 	// create initial session and process groups
1829 
1830 	session = create_process_session(1);
1831 	if (session == NULL)
1832 		panic("Could not create initial session.\n");
1833 
1834 	group = create_process_group(1);
1835 	if (group == NULL)
1836 		panic("Could not create initial process group.\n");
1837 
1838 	insert_group_into_session(session, group);
1839 
1840 	// create the kernel team
1841 	sKernelTeam = create_team_struct("kernel_team", true);
1842 	if (sKernelTeam == NULL)
1843 		panic("could not create kernel team!\n");
1844 	strcpy(sKernelTeam->args, sKernelTeam->name);
1845 	sKernelTeam->state = TEAM_STATE_NORMAL;
1846 
1847 	insert_team_into_group(group, sKernelTeam);
1848 
1849 	sKernelTeam->io_context = vfs_new_io_context(NULL);
1850 	if (sKernelTeam->io_context == NULL)
1851 		panic("could not create io_context for kernel team!\n");
1852 
1853 	// stick it in the team hash
1854 	hash_insert(sTeamHash, sKernelTeam);
1855 
1856 	add_debugger_command_etc("team", &dump_team_info,
1857 		"Dump info about a particular team",
1858 		"[ <id> | <address> | <name> ]\n"
1859 		"Prints information about the specified team. If no argument is given\n"
1860 		"the current team is selected.\n"
1861 		"  <id>       - The ID of the team.\n"
1862 		"  <address>  - The address of the team structure.\n"
1863 		"  <name>     - The team's name.\n", 0);
1864 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
1865 		"\n"
1866 		"Prints a list of all existing teams.\n", 0);
1867 	return 0;
1868 }
1869 
1870 
1871 int32
1872 team_max_teams(void)
1873 {
1874 	return sMaxTeams;
1875 }
1876 
1877 
1878 int32
1879 team_used_teams(void)
1880 {
1881 	return sUsedTeams;
1882 }
1883 
1884 
1885 /*! Fills the provided death entry if it's in the team.
1886 	You need to have the team lock held when calling this function.
1887 */
1888 job_control_entry*
1889 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
1890 {
1891 	if (child <= 0)
1892 		return NULL;
1893 
1894 	job_control_entry* entry = get_job_control_entry(team->dead_children,
1895 		child);
1896 	if (entry) {
1897 		// remove the entry only, if the caller is the parent of the found team
1898 		if (team_get_current_team_id() == entry->thread) {
1899 			team->dead_children->entries.Remove(entry);
1900 			team->dead_children->count--;
1901 			*_deleteEntry = true;
1902 		} else {
1903 			*_deleteEntry = false;
1904 		}
1905 	}
1906 
1907 	return entry;
1908 }
1909 
1910 
1911 /*! Quick check to see if we have a valid team ID. */
1912 bool
1913 team_is_valid(team_id id)
1914 {
1915 	struct team *team;
1916 	cpu_status state;
1917 
1918 	if (id <= 0)
1919 		return false;
1920 
1921 	state = disable_interrupts();
1922 	GRAB_TEAM_LOCK();
1923 
1924 	team = team_get_team_struct_locked(id);
1925 
1926 	RELEASE_TEAM_LOCK();
1927 	restore_interrupts(state);
1928 
1929 	return team != NULL;
1930 }
1931 
1932 
1933 struct team *
1934 team_get_team_struct_locked(team_id id)
1935 {
1936 	struct team_key key;
1937 	key.id = id;
1938 
1939 	return (struct team*)hash_lookup(sTeamHash, &key);
1940 }
1941 
1942 
1943 /*! This searches the session of the team for the specified group ID.
1944 	You must hold the team lock when you call this function.
1945 */
1946 struct process_group *
1947 team_get_process_group_locked(struct process_session *session, pid_t id)
1948 {
1949 	struct process_group *group;
1950 	struct team_key key;
1951 	key.id = id;
1952 
1953 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
1954 	if (group != NULL && (session == NULL || session == group->session))
1955 		return group;
1956 
1957 	return NULL;
1958 }
1959 
1960 
1961 void
1962 team_delete_process_group(struct process_group *group)
1963 {
1964 	if (group == NULL)
1965 		return;
1966 
1967 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
1968 
1969 	// remove_group_from_session() keeps this pointer around
1970 	// only if the session can be freed as well
1971 	if (group->session) {
1972 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
1973 		free(group->session);
1974 	}
1975 
1976 	free(group);
1977 }
1978 
1979 
1980 void
1981 team_set_controlling_tty(int32 ttyIndex)
1982 {
1983 	struct team* team = thread_get_current_thread()->team;
1984 
1985 	InterruptsSpinLocker _(team_spinlock);
1986 
1987 	team->group->session->controlling_tty = ttyIndex;
1988 	team->group->session->foreground_group = -1;
1989 }
1990 
1991 
1992 int32
1993 team_get_controlling_tty()
1994 {
1995 	struct team* team = thread_get_current_thread()->team;
1996 
1997 	InterruptsSpinLocker _(team_spinlock);
1998 
1999 	return team->group->session->controlling_tty;
2000 }
2001 
2002 
2003 status_t
2004 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2005 {
2006 	struct thread* thread = thread_get_current_thread();
2007 	struct team* team = thread->team;
2008 
2009 	InterruptsSpinLocker locker(team_spinlock);
2010 
2011 	process_session* session = team->group->session;
2012 
2013 	// must be the controlling tty of the calling process
2014 	if (session->controlling_tty != ttyIndex)
2015 		return ENOTTY;
2016 
2017 	// check process group -- must belong to our session
2018 	process_group* group = team_get_process_group_locked(session,
2019 		processGroupID);
2020 	if (group == NULL)
2021 		return B_BAD_VALUE;
2022 
2023 	// If we are a background group, we can't do that unharmed, only if we
2024 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2025 	if (session->foreground_group != -1
2026 		&& session->foreground_group != team->group_id
2027 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2028 		&& !is_signal_blocked(SIGTTOU)) {
2029 		pid_t groupID = team->group->id;
2030 		locker.Unlock();
2031 		send_signal(-groupID, SIGTTOU);
2032 		return B_INTERRUPTED;
2033 	}
2034 
2035 	team->group->session->foreground_group = processGroupID;
2036 
2037 	return B_OK;
2038 }
2039 
2040 
2041 /*!	Removes the specified team from the global team hash, and from its parent.
2042 	It also moves all of its children up to the parent.
2043 	You must hold the team lock when you call this function.
2044 	If \a _freeGroup is set to a value other than \c NULL, it must be freed
2045 	from the calling function.
2046 */
2047 void
2048 team_remove_team(struct team *team, struct process_group **_freeGroup)
2049 {
2050 	struct team *parent = team->parent;
2051 
2052 	// remember how long this team lasted
2053 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2054 		+ team->dead_children->kernel_time;
2055 	parent->dead_children->user_time += team->dead_threads_user_time
2056 		+ team->dead_children->user_time;
2057 
2058 	hash_remove(sTeamHash, team);
2059 	sUsedTeams--;
2060 
2061 	team->state = TEAM_STATE_DEATH;
2062 
2063 	// If we're a controlling process (i.e. a session leader with controlling
2064 	// terminal), there's a bit of signalling we have to do.
2065 	if (team->session_id == team->id
2066 		&& team->group->session->controlling_tty >= 0) {
2067 		process_session* session = team->group->session;
2068 
2069 		session->controlling_tty = -1;
2070 
2071 		// send SIGHUP to the foreground
2072 		if (session->foreground_group >= 0) {
2073 			send_signal_etc(-session->foreground_group, SIGHUP,
2074 				SIGNAL_FLAG_TEAMS_LOCKED);
2075 		}
2076 
2077 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2078 		// stopped processes
2079 		struct team* child = team->children;
2080 		while (child != NULL) {
2081 			process_group* childGroup = child->group;
2082 			if (!childGroup->orphaned
2083 				&& update_orphaned_process_group(childGroup, team->id)
2084 				&& process_group_has_stopped_processes(childGroup)) {
2085 				send_signal_etc(-childGroup->id, SIGHUP,
2086 					SIGNAL_FLAG_TEAMS_LOCKED);
2087 				send_signal_etc(-childGroup->id, SIGCONT,
2088 					SIGNAL_FLAG_TEAMS_LOCKED);
2089 			}
2090 
2091 			child = child->siblings_next;
2092 		}
2093 	} else {
2094 		// update "orphaned" flags of all children's process groups
2095 		struct team* child = team->children;
2096 		while (child != NULL) {
2097 			process_group* childGroup = child->group;
2098 			if (!childGroup->orphaned)
2099 				update_orphaned_process_group(childGroup, team->id);
2100 
2101 			child = child->siblings_next;
2102 		}
2103 
2104 		// update "orphaned" flag of this team's process group
2105 		update_orphaned_process_group(team->group, team->id);
2106 	}
2107 
2108 	// reparent each of the team's children
2109 	reparent_children(team);
2110 
2111 	// remove us from our process group
2112 	remove_team_from_group(team, _freeGroup);
2113 
2114 	// remove us from our parent
2115 	remove_team_from_parent(parent, team);
2116 }
2117 
2118 
2119 void
2120 team_delete_team(struct team *team)
2121 {
2122 	team_id teamID = team->id;
2123 	port_id debuggerPort = -1;
2124 	cpu_status state;
2125 
2126 	if (team->num_threads > 0) {
2127 		// there are other threads still in this team,
2128 		// cycle through and signal kill on each of the threads
2129 		// ToDo: this can be optimized. There's got to be a better solution.
2130 		struct thread *temp_thread;
2131 		char death_sem_name[B_OS_NAME_LENGTH];
2132 		sem_id deathSem;
2133 		int32 threadCount;
2134 
2135 		sprintf(death_sem_name, "team %ld death sem", teamID);
2136 		deathSem = create_sem(0, death_sem_name);
2137 		if (deathSem < 0)
2138 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2139 
2140 		state = disable_interrupts();
2141 		GRAB_TEAM_LOCK();
2142 
2143 		team->death_sem = deathSem;
2144 		threadCount = team->num_threads;
2145 
2146 		// If the team was being debugged, that will stop with the termination
2147 		// of the nub thread. The team structure has already been removed from
2148 		// the team hash table at this point, so noone can install a debugger
2149 		// anymore. We fetch the debugger's port to send it a message at the
2150 		// bitter end.
2151 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2152 
2153 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2154 			debuggerPort = team->debug_info.debugger_port;
2155 
2156 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2157 
2158 		// we can safely walk the list because of the lock. no new threads can be created
2159 		// because of the TEAM_STATE_DEATH flag on the team
2160 		temp_thread = team->thread_list;
2161 		while (temp_thread) {
2162 			struct thread *next = temp_thread->team_next;
2163 
2164 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2165 			temp_thread = next;
2166 		}
2167 
2168 		RELEASE_TEAM_LOCK();
2169 		restore_interrupts(state);
2170 
2171 		// wait until all threads in team are dead.
2172 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2173 		delete_sem(team->death_sem);
2174 	}
2175 
2176 	// If someone is waiting for this team to be loaded, but it dies
2177 	// unexpectedly before being done, we need to notify the waiting
2178 	// thread now.
2179 
2180 	state = disable_interrupts();
2181 	GRAB_TEAM_LOCK();
2182 
2183 	if (team->loading_info) {
2184 		// there's indeed someone waiting
2185 		struct team_loading_info *loadingInfo = team->loading_info;
2186 		team->loading_info = NULL;
2187 
2188 		loadingInfo->result = B_ERROR;
2189 		loadingInfo->done = true;
2190 
2191 		GRAB_THREAD_LOCK();
2192 
2193 		// wake up the waiting thread
2194 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2195 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2196 
2197 		RELEASE_THREAD_LOCK();
2198 	}
2199 
2200 	RELEASE_TEAM_LOCK();
2201 	restore_interrupts(state);
2202 
2203 	// notify team watchers
2204 
2205 	{
2206 		// we're not reachable from anyone anymore at this point, so we
2207 		// can safely access the list without any locking
2208 		struct team_watcher *watcher;
2209 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2210 				&team->watcher_list)) != NULL) {
2211 			watcher->hook(teamID, watcher->data);
2212 			free(watcher);
2213 		}
2214 	}
2215 
2216 	// free team resources
2217 
2218 	vfs_free_io_context(team->io_context);
2219 	delete_owned_ports(teamID);
2220 	sem_delete_owned_sems(teamID);
2221 	remove_images(team);
2222 	vm_delete_address_space(team->address_space);
2223 
2224 	delete_team_struct(team);
2225 
2226 	// notify the debugger, that the team is gone
2227 	user_debug_team_deleted(teamID, debuggerPort);
2228 }
2229 
2230 
2231 struct team *
2232 team_get_kernel_team(void)
2233 {
2234 	return sKernelTeam;
2235 }
2236 
2237 
2238 team_id
2239 team_get_kernel_team_id(void)
2240 {
2241 	if (!sKernelTeam)
2242 		return 0;
2243 
2244 	return sKernelTeam->id;
2245 }
2246 
2247 
2248 team_id
2249 team_get_current_team_id(void)
2250 {
2251 	return thread_get_current_thread()->team->id;
2252 }
2253 
2254 
2255 status_t
2256 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2257 {
2258 	cpu_status state;
2259 	struct team *team;
2260 	status_t status;
2261 
2262 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2263 	if (id == 1) {
2264 		// we're the kernel team, so we don't have to go through all
2265 		// the hassle (locking and hash lookup)
2266 		*_addressSpace = vm_get_kernel_address_space();
2267 		return B_OK;
2268 	}
2269 
2270 	state = disable_interrupts();
2271 	GRAB_TEAM_LOCK();
2272 
2273 	team = team_get_team_struct_locked(id);
2274 	if (team != NULL) {
2275 		atomic_add(&team->address_space->ref_count, 1);
2276 		*_addressSpace = team->address_space;
2277 		status = B_OK;
2278 	} else
2279 		status = B_BAD_VALUE;
2280 
2281 	RELEASE_TEAM_LOCK();
2282 	restore_interrupts(state);
2283 
2284 	return status;
2285 }
2286 
2287 
2288 /*!	Sets the team's job control state.
2289 	Interrupts must be disabled and the team lock be held.
2290 	\a threadsLocked indicates whether the thread lock is being held, too.
2291 */
2292 void
2293 team_set_job_control_state(struct team* team, job_control_state newState,
2294 	int signal, bool threadsLocked)
2295 {
2296 	if (team == NULL || team->job_control_entry == NULL)
2297 		return;
2298 
2299 	// don't touch anything, if the state stays the same or the team is already
2300 	// dead
2301 	job_control_entry* entry = team->job_control_entry;
2302 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2303 		return;
2304 
2305 	T(SetJobControlState(team->id, newState, signal));
2306 
2307 	// remove from the old list
2308 	switch (entry->state) {
2309 		case JOB_CONTROL_STATE_NONE:
2310 			// entry is in no list ATM
2311 			break;
2312 		case JOB_CONTROL_STATE_DEAD:
2313 			// can't get here
2314 			break;
2315 		case JOB_CONTROL_STATE_STOPPED:
2316 			team->parent->stopped_children->entries.Remove(entry);
2317 			break;
2318 		case JOB_CONTROL_STATE_CONTINUED:
2319 			team->parent->continued_children->entries.Remove(entry);
2320 			break;
2321 	}
2322 
2323 	entry->state = newState;
2324 	entry->signal = signal;
2325 
2326 	// add to new list
2327 	team_job_control_children* childList = NULL;
2328 	switch (entry->state) {
2329 		case JOB_CONTROL_STATE_NONE:
2330 			// entry doesn't get into any list
2331 			break;
2332 		case JOB_CONTROL_STATE_DEAD:
2333 			childList = team->parent->dead_children;
2334 			team->parent->dead_children->count++;
2335 			// When a child dies, we need to notify all lists, since that might
2336 			// have been the last of the parent's children, and a waiting
2337 			// parent thread wouldn't wake up otherwise.
2338 			team->parent->stopped_children->condition_variable.NotifyAll(
2339 				threadsLocked);
2340 			team->parent->continued_children->condition_variable.NotifyAll(
2341 				threadsLocked);
2342 			break;
2343 		case JOB_CONTROL_STATE_STOPPED:
2344 			childList = team->parent->stopped_children;
2345 			break;
2346 		case JOB_CONTROL_STATE_CONTINUED:
2347 			childList = team->parent->continued_children;
2348 			break;
2349 	}
2350 
2351 	if (childList != NULL) {
2352 		childList->entries.Add(entry);
2353 		childList->condition_variable.NotifyAll(threadsLocked);
2354 	}
2355 }
2356 
2357 
2358 /*! Adds a hook to the team that is called as soon as this
2359 	team goes away.
2360 	This call might get public in the future.
2361 */
2362 status_t
2363 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2364 {
2365 	struct team_watcher *watcher;
2366 	struct team *team;
2367 	cpu_status state;
2368 
2369 	if (hook == NULL || teamID < B_OK)
2370 		return B_BAD_VALUE;
2371 
2372 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2373 	if (watcher == NULL)
2374 		return B_NO_MEMORY;
2375 
2376 	watcher->hook = hook;
2377 	watcher->data = data;
2378 
2379 	// find team and add watcher
2380 
2381 	state = disable_interrupts();
2382 	GRAB_TEAM_LOCK();
2383 
2384 	team = team_get_team_struct_locked(teamID);
2385 	if (team != NULL)
2386 		list_add_item(&team->watcher_list, watcher);
2387 
2388 	RELEASE_TEAM_LOCK();
2389 	restore_interrupts(state);
2390 
2391 	if (team == NULL) {
2392 		free(watcher);
2393 		return B_BAD_TEAM_ID;
2394 	}
2395 
2396 	return B_OK;
2397 }
2398 
2399 
2400 status_t
2401 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2402 {
2403 	struct team_watcher *watcher = NULL;
2404 	struct team *team;
2405 	cpu_status state;
2406 
2407 	if (hook == NULL || teamID < B_OK)
2408 		return B_BAD_VALUE;
2409 
2410 	// find team and remove watcher (if present)
2411 
2412 	state = disable_interrupts();
2413 	GRAB_TEAM_LOCK();
2414 
2415 	team = team_get_team_struct_locked(teamID);
2416 	if (team != NULL) {
2417 		// search for watcher
2418 		while ((watcher = (struct team_watcher*)list_get_next_item(
2419 				&team->watcher_list, watcher)) != NULL) {
2420 			if (watcher->hook == hook && watcher->data == data) {
2421 				// got it!
2422 				list_remove_item(&team->watcher_list, watcher);
2423 				break;
2424 			}
2425 		}
2426 	}
2427 
2428 	RELEASE_TEAM_LOCK();
2429 	restore_interrupts(state);
2430 
2431 	if (watcher == NULL)
2432 		return B_ENTRY_NOT_FOUND;
2433 
2434 	free(watcher);
2435 	return B_OK;
2436 }
2437 
2438 
2439 //	#pragma mark - Public kernel API
2440 
2441 
2442 thread_id
2443 load_image(int32 argCount, const char **args, const char **env)
2444 {
2445 	int32 envCount = 0;
2446 
2447 	// count env variables
2448 	while (env && env[envCount] != NULL)
2449 		envCount++;
2450 
2451 	return load_image_etc(argCount, (char * const *)args, envCount,
2452 		(char * const *)env, B_NORMAL_PRIORITY, B_WAIT_TILL_LOADED,
2453 		-1, 0, true);
2454 }
2455 
2456 
2457 status_t
2458 wait_for_team(team_id id, status_t *_returnCode)
2459 {
2460 	struct team *team;
2461 	thread_id thread;
2462 	cpu_status state;
2463 
2464 	// find main thread and wait for that
2465 
2466 	state = disable_interrupts();
2467 	GRAB_TEAM_LOCK();
2468 
2469 	team = team_get_team_struct_locked(id);
2470 	if (team != NULL && team->main_thread != NULL)
2471 		thread = team->main_thread->id;
2472 	else
2473 		thread = B_BAD_THREAD_ID;
2474 
2475 	RELEASE_TEAM_LOCK();
2476 	restore_interrupts(state);
2477 
2478 	if (thread < 0)
2479 		return thread;
2480 
2481 	return wait_for_thread(thread, _returnCode);
2482 }
2483 
2484 
2485 status_t
2486 kill_team(team_id id)
2487 {
2488 	status_t status = B_OK;
2489 	thread_id threadID = -1;
2490 	struct team *team;
2491 	cpu_status state;
2492 
2493 	state = disable_interrupts();
2494 	GRAB_TEAM_LOCK();
2495 
2496 	team = team_get_team_struct_locked(id);
2497 	if (team != NULL) {
2498 		if (team != sKernelTeam) {
2499 			threadID = team->id;
2500 				// the team ID is the same as the ID of its main thread
2501 		} else
2502 			status = B_NOT_ALLOWED;
2503 	} else
2504 		status = B_BAD_THREAD_ID;
2505 
2506 	RELEASE_TEAM_LOCK();
2507 	restore_interrupts(state);
2508 
2509 	if (status < B_OK)
2510 		return status;
2511 
2512 	// just kill the main thread in the team. The cleanup code there will
2513 	// take care of the team
2514 	return kill_thread(threadID);
2515 }
2516 
2517 
2518 status_t
2519 _get_team_info(team_id id, team_info *info, size_t size)
2520 {
2521 	cpu_status state;
2522 	status_t status = B_OK;
2523 	struct team *team;
2524 
2525 	state = disable_interrupts();
2526 	GRAB_TEAM_LOCK();
2527 
2528 	if (id == B_CURRENT_TEAM)
2529 		team = thread_get_current_thread()->team;
2530 	else
2531 		team = team_get_team_struct_locked(id);
2532 
2533 	if (team == NULL) {
2534 		status = B_BAD_TEAM_ID;
2535 		goto err;
2536 	}
2537 
2538 	status = fill_team_info(team, info, size);
2539 
2540 err:
2541 	RELEASE_TEAM_LOCK();
2542 	restore_interrupts(state);
2543 
2544 	return status;
2545 }
2546 
2547 
2548 status_t
2549 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2550 {
2551 	status_t status = B_BAD_TEAM_ID;
2552 	struct team *team = NULL;
2553 	int32 slot = *cookie;
2554 	team_id lastTeamID;
2555 	cpu_status state;
2556 
2557 	if (slot < 1)
2558 		slot = 1;
2559 
2560 	state = disable_interrupts();
2561 	GRAB_TEAM_LOCK();
2562 
2563 	lastTeamID = peek_next_thread_id();
2564 	if (slot >= lastTeamID)
2565 		goto err;
2566 
2567 	// get next valid team
2568 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2569 		slot++;
2570 
2571 	if (team) {
2572 		status = fill_team_info(team, info, size);
2573 		*cookie = ++slot;
2574 	}
2575 
2576 err:
2577 	RELEASE_TEAM_LOCK();
2578 	restore_interrupts(state);
2579 
2580 	return status;
2581 }
2582 
2583 
2584 status_t
2585 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2586 {
2587 	bigtime_t kernelTime = 0, userTime = 0;
2588 	status_t status = B_OK;
2589 	struct team *team;
2590 	cpu_status state;
2591 
2592 	if (size != sizeof(team_usage_info)
2593 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2594 		return B_BAD_VALUE;
2595 
2596 	state = disable_interrupts();
2597 	GRAB_TEAM_LOCK();
2598 
2599 	if (id == B_CURRENT_TEAM)
2600 		team = thread_get_current_thread()->team;
2601 	else
2602 		team = team_get_team_struct_locked(id);
2603 
2604 	if (team == NULL) {
2605 		status = B_BAD_TEAM_ID;
2606 		goto out;
2607 	}
2608 
2609 	switch (who) {
2610 		case B_TEAM_USAGE_SELF:
2611 		{
2612 			struct thread *thread = team->thread_list;
2613 
2614 			for (; thread != NULL; thread = thread->team_next) {
2615 				kernelTime += thread->kernel_time;
2616 				userTime += thread->user_time;
2617 			}
2618 
2619 			kernelTime += team->dead_threads_kernel_time;
2620 			userTime += team->dead_threads_user_time;
2621 			break;
2622 		}
2623 
2624 		case B_TEAM_USAGE_CHILDREN:
2625 		{
2626 			struct team *child = team->children;
2627 			for (; child != NULL; child = child->siblings_next) {
2628 				struct thread *thread = team->thread_list;
2629 
2630 				for (; thread != NULL; thread = thread->team_next) {
2631 					kernelTime += thread->kernel_time;
2632 					userTime += thread->user_time;
2633 				}
2634 
2635 				kernelTime += child->dead_threads_kernel_time;
2636 				userTime += child->dead_threads_user_time;
2637 			}
2638 
2639 			kernelTime += team->dead_children->kernel_time;
2640 			userTime += team->dead_children->user_time;
2641 			break;
2642 		}
2643 	}
2644 
2645 out:
2646 	RELEASE_TEAM_LOCK();
2647 	restore_interrupts(state);
2648 
2649 	if (status == B_OK) {
2650 		info->kernel_time = kernelTime;
2651 		info->user_time = userTime;
2652 	}
2653 
2654 	return status;
2655 }
2656 
2657 
2658 pid_t
2659 getpid(void)
2660 {
2661 	return thread_get_current_thread()->team->id;
2662 }
2663 
2664 
2665 pid_t
2666 getppid(void)
2667 {
2668 	struct team *team = thread_get_current_thread()->team;
2669 	cpu_status state;
2670 	pid_t parent;
2671 
2672 	state = disable_interrupts();
2673 	GRAB_TEAM_LOCK();
2674 
2675 	parent = team->parent->id;
2676 
2677 	RELEASE_TEAM_LOCK();
2678 	restore_interrupts(state);
2679 
2680 	return parent;
2681 }
2682 
2683 
2684 pid_t
2685 getpgid(pid_t process)
2686 {
2687 	struct thread *thread;
2688 	pid_t result = -1;
2689 	cpu_status state;
2690 
2691 	if (process == 0)
2692 		process = thread_get_current_thread()->team->id;
2693 
2694 	state = disable_interrupts();
2695 	GRAB_THREAD_LOCK();
2696 
2697 	thread = thread_get_thread_struct_locked(process);
2698 	if (thread != NULL)
2699 		result = thread->team->group_id;
2700 
2701 	RELEASE_THREAD_LOCK();
2702 	restore_interrupts(state);
2703 
2704 	return thread != NULL ? result : B_BAD_VALUE;
2705 }
2706 
2707 
2708 pid_t
2709 getsid(pid_t process)
2710 {
2711 	struct thread *thread;
2712 	pid_t result = -1;
2713 	cpu_status state;
2714 
2715 	if (process == 0)
2716 		process = thread_get_current_thread()->team->id;
2717 
2718 	state = disable_interrupts();
2719 	GRAB_THREAD_LOCK();
2720 
2721 	thread = thread_get_thread_struct_locked(process);
2722 	if (thread != NULL)
2723 		result = thread->team->session_id;
2724 
2725 	RELEASE_THREAD_LOCK();
2726 	restore_interrupts(state);
2727 
2728 	return thread != NULL ? result : B_BAD_VALUE;
2729 }
2730 
2731 
2732 //	#pragma mark - User syscalls
2733 
2734 
2735 status_t
2736 _user_exec(const char *userPath, int32 argCount, char * const *userArgs,
2737 	int32 envCount, char * const *userEnvironment)
2738 {
2739 	char path[B_PATH_NAME_LENGTH];
2740 
2741 	if (argCount < 1)
2742 		return B_BAD_VALUE;
2743 
2744 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userArgs)
2745 		|| !IS_USER_ADDRESS(userEnvironment)
2746 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
2747 		return B_BAD_ADDRESS;
2748 
2749 	return exec_team(path, argCount, userArgs, envCount, userEnvironment);
2750 		// this one only returns in case of error
2751 }
2752 
2753 
2754 thread_id
2755 _user_fork(void)
2756 {
2757 	return fork_team();
2758 }
2759 
2760 
2761 thread_id
2762 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
2763 {
2764 	status_t returnCode;
2765 	int32 reason;
2766 	thread_id deadChild;
2767 
2768 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
2769 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
2770 		return B_BAD_ADDRESS;
2771 
2772 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
2773 
2774 	if (deadChild >= B_OK) {
2775 		// copy result data on successful completion
2776 		if ((_userReason != NULL
2777 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
2778 			|| (_userReturnCode != NULL
2779 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
2780 					< B_OK)) {
2781 			return B_BAD_ADDRESS;
2782 		}
2783 
2784 		return deadChild;
2785 	}
2786 
2787 	return syscall_restart_handle_post(deadChild);
2788 }
2789 
2790 
2791 pid_t
2792 _user_process_info(pid_t process, int32 which)
2793 {
2794 	// we only allow to return the parent of the current process
2795 	if (which == PARENT_ID
2796 		&& process != 0 && process != thread_get_current_thread()->team->id)
2797 		return B_BAD_VALUE;
2798 
2799 	switch (which) {
2800 		case SESSION_ID:
2801 			return getsid(process);
2802 		case GROUP_ID:
2803 			return getpgid(process);
2804 		case PARENT_ID:
2805 			return getppid();
2806 	}
2807 
2808 	return B_BAD_VALUE;
2809 }
2810 
2811 
2812 pid_t
2813 _user_setpgid(pid_t processID, pid_t groupID)
2814 {
2815 	struct thread *thread = thread_get_current_thread();
2816 	struct team *currentTeam = thread->team;
2817 	struct team *team;
2818 
2819 	if (groupID < 0)
2820 		return B_BAD_VALUE;
2821 
2822 	if (processID == 0)
2823 		processID = currentTeam->id;
2824 
2825 	// if the group ID is not specified, use the target process' ID
2826 	if (groupID == 0)
2827 		groupID = processID;
2828 
2829 	if (processID == currentTeam->id) {
2830 		// we set our own group
2831 
2832 		// we must not change our process group ID if we're a session leader
2833 		if (is_session_leader(currentTeam))
2834 			return B_NOT_ALLOWED;
2835 	} else {
2836 		// another team is the target of the call -- check it out
2837 		InterruptsSpinLocker _(team_spinlock);
2838 
2839 		team = team_get_team_struct_locked(processID);
2840 		if (team == NULL)
2841 			return ESRCH;
2842 
2843 		// The team must be a child of the calling team and in the same session.
2844 		// (If that's the case it isn't a session leader either.)
2845 		if (team->parent != currentTeam
2846 			|| team->session_id != currentTeam->session_id) {
2847 			return B_NOT_ALLOWED;
2848 		}
2849 
2850 		if (team->group_id == groupID)
2851 			return groupID;
2852 
2853 		// The call is also supposed to fail on a child, when the child already
2854 		// has executed exec*() [EACCES].
2855 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
2856 			return EACCES;
2857 	}
2858 
2859 	struct process_group *group = NULL;
2860 	if (groupID == processID) {
2861 		// A new process group might be needed.
2862 		group = create_process_group(groupID);
2863 		if (group == NULL)
2864 			return B_NO_MEMORY;
2865 
2866 		// Assume orphaned. We consider the situation of the team's parent
2867 		// below.
2868 		group->orphaned = true;
2869 	}
2870 
2871 	status_t status = B_OK;
2872 	struct process_group *freeGroup = NULL;
2873 	struct process_group *freeGroup2 = NULL;
2874 
2875 	InterruptsSpinLocker locker(team_spinlock);
2876 
2877 	team = team_get_team_struct_locked(processID);
2878 	if (team != NULL) {
2879 		// check the conditions again -- they might have changed in the meantime
2880 		if (is_session_leader(team)
2881 			|| team->session_id != currentTeam->session_id) {
2882 			status = B_NOT_ALLOWED;
2883 		} else if (team != currentTeam
2884 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
2885 			status = EACCES;
2886 		} else if (team->group_id == groupID) {
2887 			// the team is already in the desired process group
2888 			freeGroup = group;
2889 		} else {
2890 			// Check if a process group with the requested ID already exists.
2891 			struct process_group *targetGroup
2892 				= team_get_process_group_locked(team->group->session, groupID);
2893 			if (targetGroup != NULL) {
2894 				// In case of processID == groupID we have to free the
2895 				// allocated group.
2896 				freeGroup2 = group;
2897 			} else if (processID == groupID) {
2898 				// We created a new process group, let us insert it into the
2899 				// team's session.
2900 				insert_group_into_session(team->group->session, group);
2901 				targetGroup = group;
2902 			}
2903 
2904 			if (targetGroup != NULL) {
2905 				// we got a group, let's move the team there
2906 				process_group* oldGroup = team->group;
2907 
2908 				remove_team_from_group(team, &freeGroup);
2909 				insert_team_into_group(targetGroup, team);
2910 
2911 				// Update the "orphaned" flag of all potentially affected
2912 				// groups.
2913 
2914 				// the team's old group
2915 				if (oldGroup->teams != NULL) {
2916 					oldGroup->orphaned = false;
2917 					update_orphaned_process_group(oldGroup, -1);
2918 				}
2919 
2920 				// the team's new group
2921 				struct team* parent = team->parent;
2922 				targetGroup->orphaned &= parent == NULL
2923 					|| parent->group == targetGroup
2924 					|| team->parent->session_id != team->session_id;
2925 
2926 				// children's groups
2927 				struct team* child = team->children;
2928 				while (child != NULL) {
2929 					child->group->orphaned = false;
2930 					update_orphaned_process_group(child->group, -1);
2931 
2932 					child = child->siblings_next;
2933 				}
2934 			} else
2935 				status = B_NOT_ALLOWED;
2936 		}
2937 	} else
2938 		status = B_NOT_ALLOWED;
2939 
2940 	// Changing the process group might have changed the situation for a parent
2941 	// waiting in wait_for_child(). Hence we notify it.
2942 	if (status == B_OK) {
2943 		team->parent->dead_children->condition_variable.NotifyAll(false);
2944 		team->parent->stopped_children->condition_variable.NotifyAll(false);
2945 		team->parent->continued_children->condition_variable.NotifyAll(false);
2946 	}
2947 
2948 	locker.Unlock();
2949 
2950 	if (status != B_OK) {
2951 		// in case of error, the group hasn't been added into the hash
2952 		team_delete_process_group(group);
2953 	}
2954 
2955 	team_delete_process_group(freeGroup);
2956 	team_delete_process_group(freeGroup2);
2957 
2958 	return status == B_OK ? groupID : status;
2959 }
2960 
2961 
2962 pid_t
2963 _user_setsid(void)
2964 {
2965 	struct team *team = thread_get_current_thread()->team;
2966 	struct process_session *session;
2967 	struct process_group *group, *freeGroup = NULL;
2968 	cpu_status state;
2969 	bool failed = false;
2970 
2971 	// the team must not already be a process group leader
2972 	if (is_process_group_leader(team))
2973 		return B_NOT_ALLOWED;
2974 
2975 	group = create_process_group(team->id);
2976 	if (group == NULL)
2977 		return B_NO_MEMORY;
2978 
2979 	session = create_process_session(group->id);
2980 	if (session == NULL) {
2981 		team_delete_process_group(group);
2982 		return B_NO_MEMORY;
2983 	}
2984 
2985 	state = disable_interrupts();
2986 	GRAB_TEAM_LOCK();
2987 
2988 	// this may have changed since the check above
2989 	if (!is_process_group_leader(team)) {
2990 		remove_team_from_group(team, &freeGroup);
2991 
2992 		insert_group_into_session(session, group);
2993 		insert_team_into_group(group, team);
2994 	} else
2995 		failed = true;
2996 
2997 	RELEASE_TEAM_LOCK();
2998 	restore_interrupts(state);
2999 
3000 	if (failed) {
3001 		team_delete_process_group(group);
3002 		free(session);
3003 		return B_NOT_ALLOWED;
3004 	} else
3005 		team_delete_process_group(freeGroup);
3006 
3007 	return team->group_id;
3008 }
3009 
3010 
3011 status_t
3012 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3013 {
3014 	status_t returnCode;
3015 	status_t status;
3016 
3017 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3018 		return B_BAD_ADDRESS;
3019 
3020 	status = wait_for_team(id, &returnCode);
3021 	if (status >= B_OK && _userReturnCode != NULL) {
3022 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3023 			return B_BAD_ADDRESS;
3024 		return B_OK;
3025 	}
3026 
3027 	return syscall_restart_handle_post(status);
3028 }
3029 
3030 
3031 team_id
3032 _user_load_image(int32 argCount, const char **userArgs, int32 envCount,
3033 	const char **userEnv, int32 priority, uint32 flags, port_id errorPort,
3034 	uint32 errorToken)
3035 {
3036 	TRACE(("_user_load_image_etc: argc = %ld\n", argCount));
3037 
3038 	if (argCount < 1 || userArgs == NULL || userEnv == NULL)
3039 		return B_BAD_VALUE;
3040 
3041 	if (!IS_USER_ADDRESS(userArgs) || !IS_USER_ADDRESS(userEnv))
3042 		return B_BAD_ADDRESS;
3043 
3044 	return load_image_etc(argCount, (char * const *)userArgs,
3045 		envCount, (char * const *)userEnv, priority, flags, errorPort,
3046 		errorToken, false);
3047 }
3048 
3049 
3050 void
3051 _user_exit_team(status_t returnValue)
3052 {
3053 	struct thread *thread = thread_get_current_thread();
3054 
3055 	thread->exit.status = returnValue;
3056 	thread->exit.reason = THREAD_RETURN_EXIT;
3057 
3058 	send_signal(thread->id, SIGKILL);
3059 }
3060 
3061 
3062 status_t
3063 _user_kill_team(team_id team)
3064 {
3065 	return kill_team(team);
3066 }
3067 
3068 
3069 status_t
3070 _user_get_team_info(team_id id, team_info *userInfo)
3071 {
3072 	status_t status;
3073 	team_info info;
3074 
3075 	if (!IS_USER_ADDRESS(userInfo))
3076 		return B_BAD_ADDRESS;
3077 
3078 	status = _get_team_info(id, &info, sizeof(team_info));
3079 	if (status == B_OK) {
3080 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3081 			return B_BAD_ADDRESS;
3082 	}
3083 
3084 	return status;
3085 }
3086 
3087 
3088 status_t
3089 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3090 {
3091 	status_t status;
3092 	team_info info;
3093 	int32 cookie;
3094 
3095 	if (!IS_USER_ADDRESS(userCookie)
3096 		|| !IS_USER_ADDRESS(userInfo)
3097 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3098 		return B_BAD_ADDRESS;
3099 
3100 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3101 	if (status != B_OK)
3102 		return status;
3103 
3104 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3105 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3106 		return B_BAD_ADDRESS;
3107 
3108 	return status;
3109 }
3110 
3111 
3112 team_id
3113 _user_get_current_team(void)
3114 {
3115 	return team_get_current_team_id();
3116 }
3117 
3118 
3119 status_t
3120 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3121 {
3122 	team_usage_info info;
3123 	status_t status;
3124 
3125 	if (!IS_USER_ADDRESS(userInfo))
3126 		return B_BAD_ADDRESS;
3127 
3128 	status = _get_team_usage_info(team, who, &info, size);
3129 	if (status != B_OK)
3130 		return status;
3131 
3132 	if (user_memcpy(userInfo, &info, size) < B_OK)
3133 		return B_BAD_ADDRESS;
3134 
3135 	return status;
3136 }
3137 
3138