xref: /haiku/src/system/kernel/team.cpp (revision 58481f0f6ef1a61ba07283f012cafbc2ed874ead)
1 /*
2  * Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /*!	Team functions */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/wait.h>
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <FindDirectory.h>
21 
22 #include <boot_device.h>
23 #include <elf.h>
24 #include <file_cache.h>
25 #include <fs/KPath.h>
26 #include <heap.h>
27 #include <int.h>
28 #include <kernel.h>
29 #include <kimage.h>
30 #include <kscheduler.h>
31 #include <ksignal.h>
32 #include <Notifications.h>
33 #include <port.h>
34 #include <posix/realtime_sem.h>
35 #include <posix/xsi_semaphore.h>
36 #include <sem.h>
37 #include <syscall_process_info.h>
38 #include <syscall_restart.h>
39 #include <syscalls.h>
40 #include <team.h>
41 #include <tls.h>
42 #include <tracing.h>
43 #include <user_runtime.h>
44 #include <user_thread.h>
45 #include <usergroup.h>
46 #include <vfs.h>
47 #include <vm.h>
48 #include <vm_address_space.h>
49 #include <util/AutoLock.h>
50 #include <util/khash.h>
51 
52 //#define TRACE_TEAM
53 #ifdef TRACE_TEAM
54 #	define TRACE(x) dprintf x
55 #else
56 #	define TRACE(x) ;
57 #endif
58 
59 
60 struct team_key {
61 	team_id id;
62 };
63 
64 struct team_arg {
65 	char	*path;
66 	char	**flat_args;
67 	size_t	flat_args_size;
68 	uint32	arg_count;
69 	uint32	env_count;
70 	port_id	error_port;
71 	uint32	error_token;
72 };
73 
74 struct fork_arg {
75 	area_id		user_stack_area;
76 	addr_t		user_stack_base;
77 	size_t		user_stack_size;
78 	addr_t		user_local_storage;
79 	sigset_t	sig_block_mask;
80 	struct user_thread* user_thread;
81 
82 	struct arch_fork_arg arch_info;
83 };
84 
85 class TeamNotificationService : public DefaultNotificationService {
86 public:
87 							TeamNotificationService();
88 
89 			void			Notify(uint32 eventCode, struct team* team);
90 };
91 
92 
93 static hash_table *sTeamHash = NULL;
94 static hash_table *sGroupHash = NULL;
95 static struct team *sKernelTeam = NULL;
96 
97 // some arbitrary chosen limits - should probably depend on the available
98 // memory (the limit is not yet enforced)
99 static int32 sMaxTeams = 2048;
100 static int32 sUsedTeams = 1;
101 
102 static TeamNotificationService sNotificationService;
103 
104 spinlock gTeamSpinlock = B_SPINLOCK_INITIALIZER;
105 
106 
107 // #pragma mark - Tracing
108 
109 
110 #if TEAM_TRACING
111 namespace TeamTracing {
112 
113 class TeamForked : public AbstractTraceEntry {
114 public:
115 	TeamForked(thread_id forkedThread)
116 		:
117 		fForkedThread(forkedThread)
118 	{
119 		Initialized();
120 	}
121 
122 	virtual void AddDump(TraceOutput& out)
123 	{
124 		out.Print("team forked, new thread %ld", fForkedThread);
125 	}
126 
127 private:
128 	thread_id			fForkedThread;
129 };
130 
131 
132 class ExecTeam : public AbstractTraceEntry {
133 public:
134 	ExecTeam(const char* path, int32 argCount, const char* const* args,
135 			int32 envCount, const char* const* env)
136 		:
137 		fArgCount(argCount),
138 		fArgs(NULL)
139 	{
140 		fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
141 			false);
142 
143 		// determine the buffer size we need for the args
144 		size_t argBufferSize = 0;
145 		for (int32 i = 0; i < argCount; i++)
146 			argBufferSize += strlen(args[i]) + 1;
147 
148 		// allocate a buffer
149 		fArgs = (char*)alloc_tracing_buffer(argBufferSize);
150 		if (fArgs) {
151 			char* buffer = fArgs;
152 			for (int32 i = 0; i < argCount; i++) {
153 				size_t argSize = strlen(args[i]) + 1;
154 				memcpy(buffer, args[i], argSize);
155 				buffer += argSize;
156 			}
157 		}
158 
159 		// ignore env for the time being
160 		(void)envCount;
161 		(void)env;
162 
163 		Initialized();
164 	}
165 
166 	virtual void AddDump(TraceOutput& out)
167 	{
168 		out.Print("team exec, \"%p\", args:", fPath);
169 
170 		if (fArgs != NULL) {
171 			char* args = fArgs;
172 			for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
173 				out.Print(" \"%s\"", args);
174 				args += strlen(args) + 1;
175 			}
176 		} else
177 			out.Print(" <too long>");
178 	}
179 
180 private:
181 	char*	fPath;
182 	int32	fArgCount;
183 	char*	fArgs;
184 };
185 
186 
187 static const char*
188 job_control_state_name(job_control_state state)
189 {
190 	switch (state) {
191 		case JOB_CONTROL_STATE_NONE:
192 			return "none";
193 		case JOB_CONTROL_STATE_STOPPED:
194 			return "stopped";
195 		case JOB_CONTROL_STATE_CONTINUED:
196 			return "continued";
197 		case JOB_CONTROL_STATE_DEAD:
198 			return "dead";
199 		default:
200 			return "invalid";
201 	}
202 }
203 
204 
205 class SetJobControlState : public AbstractTraceEntry {
206 public:
207 	SetJobControlState(team_id team, job_control_state newState, int signal)
208 		:
209 		fTeam(team),
210 		fNewState(newState),
211 		fSignal(signal)
212 	{
213 		Initialized();
214 	}
215 
216 	virtual void AddDump(TraceOutput& out)
217 	{
218 		out.Print("team set job control state, team %ld, "
219 			"new state: %s, signal: %d",
220 			fTeam, job_control_state_name(fNewState), fSignal);
221 	}
222 
223 private:
224 	team_id				fTeam;
225 	job_control_state	fNewState;
226 	int					fSignal;
227 };
228 
229 
230 class WaitForChild : public AbstractTraceEntry {
231 public:
232 	WaitForChild(pid_t child, uint32 flags)
233 		:
234 		fChild(child),
235 		fFlags(flags)
236 	{
237 		Initialized();
238 	}
239 
240 	virtual void AddDump(TraceOutput& out)
241 	{
242 		out.Print("team wait for child, child: %ld, "
243 			"flags: 0x%lx", fChild, fFlags);
244 	}
245 
246 private:
247 	pid_t	fChild;
248 	uint32	fFlags;
249 };
250 
251 
252 class WaitForChildDone : public AbstractTraceEntry {
253 public:
254 	WaitForChildDone(const job_control_entry& entry)
255 		:
256 		fState(entry.state),
257 		fTeam(entry.thread),
258 		fStatus(entry.status),
259 		fReason(entry.reason),
260 		fSignal(entry.signal)
261 	{
262 		Initialized();
263 	}
264 
265 	WaitForChildDone(status_t error)
266 		:
267 		fTeam(error)
268 	{
269 		Initialized();
270 	}
271 
272 	virtual void AddDump(TraceOutput& out)
273 	{
274 		if (fTeam >= 0) {
275 			out.Print("team wait for child done, team: %ld, "
276 				"state: %s, status: 0x%lx, reason: 0x%x, signal: %d\n",
277 				fTeam, job_control_state_name(fState), fStatus, fReason,
278 				fSignal);
279 		} else {
280 			out.Print("team wait for child failed, error: "
281 				"0x%lx, ", fTeam);
282 		}
283 	}
284 
285 private:
286 	job_control_state	fState;
287 	team_id				fTeam;
288 	status_t			fStatus;
289 	uint16				fReason;
290 	uint16				fSignal;
291 };
292 
293 }	// namespace TeamTracing
294 
295 #	define T(x) new(std::nothrow) TeamTracing::x;
296 #else
297 #	define T(x) ;
298 #endif
299 
300 
301 //	#pragma mark - TeamNotificationService
302 
303 
304 TeamNotificationService::TeamNotificationService()
305 	: DefaultNotificationService("teams")
306 {
307 }
308 
309 
310 void
311 TeamNotificationService::Notify(uint32 eventCode, struct team* team)
312 {
313 	char eventBuffer[128];
314 	KMessage event;
315 	event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
316 	event.AddInt32("event", eventCode);
317 	event.AddInt32("team", team->id);
318 	event.AddPointer("teamStruct", team);
319 
320 	DefaultNotificationService::Notify(event, eventCode);
321 }
322 
323 
324 //	#pragma mark - Private functions
325 
326 
327 static void
328 _dump_team_info(struct team *team)
329 {
330 	kprintf("TEAM: %p\n", team);
331 	kprintf("id:          %ld (%#lx)\n", team->id, team->id);
332 	kprintf("name:        '%s'\n", team->name);
333 	kprintf("args:        '%s'\n", team->args);
334 	kprintf("next:        %p\n", team->next);
335 	kprintf("parent:      %p", team->parent);
336 	if (team->parent != NULL) {
337 		kprintf(" (id = %ld)\n", team->parent->id);
338 	} else
339 		kprintf("\n");
340 
341 	kprintf("children:    %p\n", team->children);
342 	kprintf("num_threads: %d\n", team->num_threads);
343 	kprintf("state:       %d\n", team->state);
344 	kprintf("flags:       0x%lx\n", team->flags);
345 	kprintf("io_context:  %p\n", team->io_context);
346 	if (team->address_space)
347 		kprintf("address_space: %p\n", team->address_space);
348 	kprintf("main_thread: %p\n", team->main_thread);
349 	kprintf("thread_list: %p\n", team->thread_list);
350 	kprintf("group_id:    %ld\n", team->group_id);
351 	kprintf("session_id:  %ld\n", team->session_id);
352 }
353 
354 
355 static int
356 dump_team_info(int argc, char **argv)
357 {
358 	struct hash_iterator iterator;
359 	struct team *team;
360 	team_id id = -1;
361 	bool found = false;
362 
363 	if (argc < 2) {
364 		struct thread* thread = thread_get_current_thread();
365 		if (thread != NULL && thread->team != NULL)
366 			_dump_team_info(thread->team);
367 		else
368 			kprintf("No current team!\n");
369 		return 0;
370 	}
371 
372 	id = strtoul(argv[1], NULL, 0);
373 	if (IS_KERNEL_ADDRESS(id)) {
374 		// semi-hack
375 		_dump_team_info((struct team *)id);
376 		return 0;
377 	}
378 
379 	// walk through the thread list, trying to match name or id
380 	hash_open(sTeamHash, &iterator);
381 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
382 		if ((team->name && strcmp(argv[1], team->name) == 0) || team->id == id) {
383 			_dump_team_info(team);
384 			found = true;
385 			break;
386 		}
387 	}
388 	hash_close(sTeamHash, &iterator, false);
389 
390 	if (!found)
391 		kprintf("team \"%s\" (%ld) doesn't exist!\n", argv[1], id);
392 	return 0;
393 }
394 
395 
396 static int
397 dump_teams(int argc, char **argv)
398 {
399 	struct hash_iterator iterator;
400 	struct team *team;
401 
402 	kprintf("team           id  parent      name\n");
403 	hash_open(sTeamHash, &iterator);
404 
405 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
406 		kprintf("%p%7ld  %p  %s\n", team, team->id, team->parent, team->name);
407 	}
408 
409 	hash_close(sTeamHash, &iterator, false);
410 	return 0;
411 }
412 
413 
414 static int
415 team_struct_compare(void *_p, const void *_key)
416 {
417 	struct team *p = (struct team*)_p;
418 	const struct team_key *key = (const struct team_key*)_key;
419 
420 	if (p->id == key->id)
421 		return 0;
422 
423 	return 1;
424 }
425 
426 
427 static uint32
428 team_struct_hash(void *_p, const void *_key, uint32 range)
429 {
430 	struct team *p = (struct team*)_p;
431 	const struct team_key *key = (const struct team_key*)_key;
432 
433 	if (p != NULL)
434 		return p->id % range;
435 
436 	return (uint32)key->id % range;
437 }
438 
439 
440 static int
441 process_group_compare(void *_group, const void *_key)
442 {
443 	struct process_group *group = (struct process_group*)_group;
444 	const struct team_key *key = (const struct team_key*)_key;
445 
446 	if (group->id == key->id)
447 		return 0;
448 
449 	return 1;
450 }
451 
452 
453 static uint32
454 process_group_hash(void *_group, const void *_key, uint32 range)
455 {
456 	struct process_group *group = (struct process_group*)_group;
457 	const struct team_key *key = (const struct team_key*)_key;
458 
459 	if (group != NULL)
460 		return group->id % range;
461 
462 	return (uint32)key->id % range;
463 }
464 
465 
466 static void
467 insert_team_into_parent(struct team *parent, struct team *team)
468 {
469 	ASSERT(parent != NULL);
470 
471 	team->siblings_next = parent->children;
472 	parent->children = team;
473 	team->parent = parent;
474 }
475 
476 
477 /*!	Note: must have team lock held */
478 static void
479 remove_team_from_parent(struct team *parent, struct team *team)
480 {
481 	struct team *child, *last = NULL;
482 
483 	for (child = parent->children; child != NULL; child = child->siblings_next) {
484 		if (child == team) {
485 			if (last == NULL)
486 				parent->children = child->siblings_next;
487 			else
488 				last->siblings_next = child->siblings_next;
489 
490 			team->parent = NULL;
491 			break;
492 		}
493 		last = child;
494 	}
495 }
496 
497 
498 /*!	Reparent each of our children
499 	Note: must have team lock held
500 */
501 static void
502 reparent_children(struct team *team)
503 {
504 	struct team *child;
505 
506 	while ((child = team->children) != NULL) {
507 		// remove the child from the current proc and add to the parent
508 		remove_team_from_parent(team, child);
509 		insert_team_into_parent(sKernelTeam, child);
510 	}
511 
512 	// move job control entries too
513 	sKernelTeam->stopped_children->entries.MoveFrom(
514 		&team->stopped_children->entries);
515 	sKernelTeam->continued_children->entries.MoveFrom(
516 		&team->continued_children->entries);
517 
518 	// Note, we don't move the dead children entries. Those will be deleted
519 	// when the team structure is deleted.
520 }
521 
522 
523 static bool
524 is_session_leader(struct team *team)
525 {
526 	return team->session_id == team->id;
527 }
528 
529 
530 static bool
531 is_process_group_leader(struct team *team)
532 {
533 	return team->group_id == team->id;
534 }
535 
536 
537 static void
538 deferred_delete_process_group(struct process_group *group)
539 {
540 	if (group == NULL)
541 		return;
542 
543 	// remove_group_from_session() keeps this pointer around
544 	// only if the session can be freed as well
545 	if (group->session) {
546 		TRACE(("deferred_delete_process_group(): frees session %ld\n",
547 			group->session->id));
548 		deferred_free(group->session);
549 	}
550 
551 	deferred_free(group);
552 }
553 
554 
555 /*!	Removes a group from a session, and puts the session object
556 	back into the session cache, if it's not used anymore.
557 	You must hold the team lock when calling this function.
558 */
559 static void
560 remove_group_from_session(struct process_group *group)
561 {
562 	struct process_session *session = group->session;
563 
564 	// the group must be in any session to let this function have any effect
565 	if (session == NULL)
566 		return;
567 
568 	hash_remove(sGroupHash, group);
569 
570 	// we cannot free the resource here, so we're keeping the group link
571 	// around - this way it'll be freed by free_process_group()
572 	if (--session->group_count > 0)
573 		group->session = NULL;
574 }
575 
576 
577 /*!	Team lock must be held.
578 */
579 static void
580 acquire_process_group_ref(pid_t groupID)
581 {
582 	process_group* group = team_get_process_group_locked(NULL, groupID);
583 	if (group == NULL) {
584 		panic("acquire_process_group_ref(): unknown group ID: %ld", groupID);
585 		return;
586 	}
587 
588 	group->refs++;
589 }
590 
591 
592 /*!	Team lock must be held.
593 */
594 static void
595 release_process_group_ref(pid_t groupID)
596 {
597 	process_group* group = team_get_process_group_locked(NULL, groupID);
598 	if (group == NULL) {
599 		panic("release_process_group_ref(): unknown group ID: %ld", groupID);
600 		return;
601 	}
602 
603 	if (group->refs <= 0) {
604 		panic("release_process_group_ref(%ld): ref count already 0", groupID);
605 		return;
606 	}
607 
608 	if (--group->refs > 0)
609 		return;
610 
611 	// group is no longer used
612 
613 	remove_group_from_session(group);
614 	deferred_delete_process_group(group);
615 }
616 
617 
618 /*!	You must hold the team lock when calling this function. */
619 static void
620 insert_group_into_session(struct process_session *session, struct process_group *group)
621 {
622 	if (group == NULL)
623 		return;
624 
625 	group->session = session;
626 	hash_insert(sGroupHash, group);
627 	session->group_count++;
628 }
629 
630 
631 /*!	You must hold the team lock when calling this function. */
632 static void
633 insert_team_into_group(struct process_group *group, struct team *team)
634 {
635 	team->group = group;
636 	team->group_id = group->id;
637 	team->session_id = group->session->id;
638 
639 	team->group_next = group->teams;
640 	group->teams = team;
641 	acquire_process_group_ref(group->id);
642 }
643 
644 
645 /*!	Removes the team from the group.
646 
647 	\param team the team that'll be removed from it's group
648 */
649 static void
650 remove_team_from_group(struct team *team)
651 {
652 	struct process_group *group = team->group;
653 	struct team *current, *last = NULL;
654 
655 	// the team must be in any team to let this function have any effect
656 	if  (group == NULL)
657 		return;
658 
659 	for (current = group->teams; current != NULL; current = current->group_next) {
660 		if (current == team) {
661 			if (last == NULL)
662 				group->teams = current->group_next;
663 			else
664 				last->group_next = current->group_next;
665 
666 			team->group = NULL;
667 			break;
668 		}
669 		last = current;
670 	}
671 
672 	team->group = NULL;
673 	team->group_next = NULL;
674 
675 	release_process_group_ref(group->id);
676 }
677 
678 
679 static struct process_group *
680 create_process_group(pid_t id)
681 {
682 	struct process_group *group = (struct process_group *)malloc(sizeof(struct process_group));
683 	if (group == NULL)
684 		return NULL;
685 
686 	group->id = id;
687 	group->refs = 0;
688 	group->session = NULL;
689 	group->teams = NULL;
690 	group->orphaned = true;
691 	return group;
692 }
693 
694 
695 static struct process_session *
696 create_process_session(pid_t id)
697 {
698 	struct process_session *session
699 		= (struct process_session *)malloc(sizeof(struct process_session));
700 	if (session == NULL)
701 		return NULL;
702 
703 	session->id = id;
704 	session->group_count = 0;
705 	session->controlling_tty = -1;
706 	session->foreground_group = -1;
707 
708 	return session;
709 }
710 
711 
712 static void
713 set_team_name(struct team* team, const char* name)
714 {
715 	if (const char* lastSlash = strrchr(name, '/'))
716 		name = lastSlash + 1;
717 
718 	strlcpy(team->name, name, B_OS_NAME_LENGTH);
719 }
720 
721 
722 static struct team *
723 create_team_struct(const char *name, bool kernel)
724 {
725 	struct team *team = (struct team *)malloc(sizeof(struct team));
726 	if (team == NULL)
727 		return NULL;
728 	MemoryDeleter teamDeleter(team);
729 
730 	team->next = team->siblings_next = team->children = team->parent = NULL;
731 	team->id = allocate_thread_id();
732 	set_team_name(team, name);
733 	team->args[0] = '\0';
734 	team->num_threads = 0;
735 	team->io_context = NULL;
736 	team->address_space = NULL;
737 	team->realtime_sem_context = NULL;
738 	team->xsi_sem_context = NULL;
739 	team->thread_list = NULL;
740 	team->main_thread = NULL;
741 	team->loading_info = NULL;
742 	team->state = TEAM_STATE_BIRTH;
743 	team->flags = 0;
744 	team->death_sem = -1;
745 	team->user_data_area = -1;
746 	team->user_data = 0;
747 	team->used_user_data = 0;
748 	team->user_data_size = 0;
749 	team->free_user_threads = NULL;
750 
751 	team->supplementary_groups = NULL;
752 	team->supplementary_group_count = 0;
753 
754 	team->dead_threads_kernel_time = 0;
755 	team->dead_threads_user_time = 0;
756 
757 	// dead threads
758 	list_init(&team->dead_threads);
759 	team->dead_threads_count = 0;
760 
761 	// dead children
762 	team->dead_children = new(nothrow) team_dead_children;
763 	if (team->dead_children == NULL)
764 		return NULL;
765 	ObjectDeleter<team_dead_children> deadChildrenDeleter(team->dead_children);
766 
767 	team->dead_children->count = 0;
768 	team->dead_children->kernel_time = 0;
769 	team->dead_children->user_time = 0;
770 
771 	// stopped children
772 	team->stopped_children = new(nothrow) team_job_control_children;
773 	if (team->stopped_children == NULL)
774 		return NULL;
775 	ObjectDeleter<team_job_control_children> stoppedChildrenDeleter(
776 		team->stopped_children);
777 
778 	// continued children
779 	team->continued_children = new(nothrow) team_job_control_children;
780 	if (team->continued_children == NULL)
781 		return NULL;
782 	ObjectDeleter<team_job_control_children> continuedChildrenDeleter(
783 		team->continued_children);
784 
785 	// job control entry
786 	team->job_control_entry = new(nothrow) job_control_entry;
787 	if (team->job_control_entry == NULL)
788 		return NULL;
789 	ObjectDeleter<job_control_entry> jobControlEntryDeleter(
790 		team->job_control_entry);
791 	team->job_control_entry->state = JOB_CONTROL_STATE_NONE;
792 	team->job_control_entry->thread = team->id;
793 	team->job_control_entry->team = team;
794 
795 	list_init(&team->image_list);
796 	list_init(&team->watcher_list);
797 
798 	clear_team_debug_info(&team->debug_info, true);
799 
800 	if (arch_team_init_team_struct(team, kernel) < 0)
801 		return NULL;
802 
803 	// publish dead/stopped/continued children condition vars
804 	team->dead_children->condition_variable.Init(team->dead_children,
805 		"team children");
806 
807 	// keep all allocated structures
808 	jobControlEntryDeleter.Detach();
809 	continuedChildrenDeleter.Detach();
810 	stoppedChildrenDeleter.Detach();
811 	deadChildrenDeleter.Detach();
812 	teamDeleter.Detach();
813 
814 	return team;
815 }
816 
817 
818 static void
819 delete_team_struct(struct team *team)
820 {
821 	while (death_entry* threadDeathEntry = (death_entry*)list_remove_head_item(
822 			&team->dead_threads)) {
823 		free(threadDeathEntry);
824 	}
825 
826 	while (job_control_entry* entry = team->dead_children->entries.RemoveHead())
827 		delete entry;
828 
829 	while (free_user_thread* entry = team->free_user_threads) {
830 		team->free_user_threads = entry->next;
831 		free(entry);
832 	}
833 
834 	malloc_referenced_release(team->supplementary_groups);
835 
836 	delete team->job_control_entry;
837 		// usually already NULL and transferred to the parent
838 	delete team->continued_children;
839 	delete team->stopped_children;
840 	delete team->dead_children;
841 	free(team);
842 }
843 
844 
845 static status_t
846 create_team_user_data(struct team* team)
847 {
848 	void* address = (void*)KERNEL_USER_DATA_BASE;
849 	size_t size = 4 * B_PAGE_SIZE;
850 	team->user_data_area = create_area_etc(team->id, "user area", &address,
851 		B_BASE_ADDRESS, size, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0);
852 	if (team->user_data_area < 0)
853 		return team->user_data_area;
854 
855 	team->user_data = (addr_t)address;
856 	team->used_user_data = 0;
857 	team->user_data_size = size;
858 	team->free_user_threads = NULL;
859 
860 	return B_OK;
861 }
862 
863 
864 static void
865 delete_team_user_data(struct team* team)
866 {
867 	if (team->user_data_area >= 0) {
868 		vm_delete_area(team->id, team->user_data_area, true);
869 		team->user_data = 0;
870 		team->used_user_data = 0;
871 		team->user_data_size = 0;
872 		team->user_data_area = -1;
873 		while (free_user_thread* entry = team->free_user_threads) {
874 			team->free_user_threads = entry->next;
875 			free(entry);
876 		}
877 	}
878 }
879 
880 
881 static status_t
882 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
883 	int32 argCount, int32 envCount, char**& _flatArgs)
884 {
885 	if (argCount < 0 || envCount < 0)
886 		return B_BAD_VALUE;
887 
888 	if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
889 		return B_TOO_MANY_ARGS;
890 	if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
891 		return B_BAD_VALUE;
892 
893 	if (!IS_USER_ADDRESS(userFlatArgs))
894 		return B_BAD_ADDRESS;
895 
896 	// allocate kernel memory
897 	char** flatArgs = (char**)malloc(flatArgsSize);
898 	if (flatArgs == NULL)
899 		return B_NO_MEMORY;
900 
901 	if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
902 		free(flatArgs);
903 		return B_BAD_ADDRESS;
904 	}
905 
906 	// check and relocate the array
907 	status_t error = B_OK;
908 	const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
909 	const char* stringEnd = (char*)flatArgs + flatArgsSize;
910 	for (int32 i = 0; i < argCount + envCount + 2; i++) {
911 		if (i == argCount || i == argCount + envCount + 1) {
912 			// check array null termination
913 			if (flatArgs[i] != NULL) {
914 				error = B_BAD_VALUE;
915 				break;
916 			}
917 		} else {
918 			// check string
919 			char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
920 			size_t maxLen = stringEnd - arg;
921 			if (arg < stringBase || arg >= stringEnd
922 					|| strnlen(arg, maxLen) == maxLen) {
923 				error = B_BAD_VALUE;
924 				break;
925 			}
926 
927 			flatArgs[i] = arg;
928 		}
929 	}
930 
931 	if (error == B_OK)
932 		_flatArgs = flatArgs;
933 	else
934 		free(flatArgs);
935 
936 	return error;
937 }
938 
939 
940 static void
941 free_team_arg(struct team_arg *teamArg)
942 {
943 	if (teamArg != NULL) {
944 		free(teamArg->flat_args);
945 		free(teamArg->path);
946 		free(teamArg);
947 	}
948 }
949 
950 
951 static status_t
952 create_team_arg(struct team_arg **_teamArg, const char *path, char** flatArgs,
953 	size_t flatArgsSize, int32 argCount, int32 envCount, port_id port,
954 	uint32 token)
955 {
956 	struct team_arg *teamArg = (struct team_arg*)malloc(sizeof(team_arg));
957 	if (teamArg == NULL)
958 		return B_NO_MEMORY;
959 
960 	teamArg->path = strdup(path);
961 	if (teamArg->path == NULL) {
962 		free(teamArg);
963 		return B_NO_MEMORY;
964 	}
965 
966 	// copy the args over
967 
968 	teamArg->flat_args = flatArgs;
969 	teamArg->flat_args_size = flatArgsSize;
970 	teamArg->arg_count = argCount;
971 	teamArg->env_count = envCount;
972 	teamArg->error_port = port;
973 	teamArg->error_token = token;
974 
975 	*_teamArg = teamArg;
976 	return B_OK;
977 }
978 
979 
980 static int32
981 team_create_thread_start(void *args)
982 {
983 	status_t err;
984 	struct thread *t;
985 	struct team *team;
986 	struct team_arg *teamArgs = (struct team_arg*)args;
987 	const char *path;
988 	addr_t entry;
989 	char ustack_name[128];
990 	uint32 sizeLeft;
991 	char **userArgs;
992 	char **userEnv;
993 	struct user_space_program_args *programArgs;
994 	uint32 argCount, envCount, i;
995 
996 	t = thread_get_current_thread();
997 	team = t->team;
998 	cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
999 
1000 	TRACE(("team_create_thread_start: entry thread %ld\n", t->id));
1001 
1002 	// get a user thread for the main thread
1003 	t->user_thread = team_allocate_user_thread(team);
1004 
1005 	// create an initial primary stack area
1006 
1007 	// Main stack area layout is currently as follows (starting from 0):
1008 	//
1009 	// size								| usage
1010 	// ---------------------------------+--------------------------------
1011 	// USER_MAIN_THREAD_STACK_SIZE		| actual stack
1012 	// TLS_SIZE							| TLS data
1013 	// sizeof(user_space_program_args)	| argument structure for the runtime
1014 	//									| loader
1015 	// flat arguments size				| flat process arguments and environment
1016 
1017 	// ToDo: ENV_SIZE is a) limited, and b) not used after libroot copied it to the heap
1018 	// ToDo: we could reserve the whole USER_STACK_REGION upfront...
1019 
1020 	sizeLeft = PAGE_ALIGN(USER_MAIN_THREAD_STACK_SIZE
1021 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE + TLS_SIZE
1022 		+ sizeof(struct user_space_program_args) + teamArgs->flat_args_size);
1023 	t->user_stack_base = USER_STACK_REGION + USER_STACK_REGION_SIZE - sizeLeft;
1024 	t->user_stack_size = USER_MAIN_THREAD_STACK_SIZE
1025 		+ USER_STACK_GUARD_PAGES * B_PAGE_SIZE;
1026 		// the exact location at the end of the user stack area
1027 
1028 	sprintf(ustack_name, "%s_main_stack", team->name);
1029 	t->user_stack_area = create_area_etc(team->id, ustack_name,
1030 		(void **)&t->user_stack_base, B_EXACT_ADDRESS, sizeLeft, B_NO_LOCK,
1031 		B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 0);
1032 	if (t->user_stack_area < 0) {
1033 		dprintf("team_create_thread_start: could not create default user stack "
1034 			"region: %s\n", strerror(t->user_stack_area));
1035 
1036 		free_team_arg(teamArgs);
1037 		return t->user_stack_area;
1038 	}
1039 
1040 	// now that the TLS area is allocated, initialize TLS
1041 	arch_thread_init_tls(t);
1042 
1043 	argCount = teamArgs->arg_count;
1044 	envCount = teamArgs->env_count;
1045 
1046 	programArgs = (struct user_space_program_args *)(t->user_stack_base
1047 		+ t->user_stack_size + TLS_SIZE);
1048 
1049 	userArgs = (char**)(programArgs + 1);
1050 	userEnv = userArgs + argCount + 1;
1051 	path = teamArgs->path;
1052 
1053 	if (user_strlcpy(programArgs->program_path, path,
1054 				sizeof(programArgs->program_path)) < B_OK
1055 		|| user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1056 		|| user_memcpy(&programArgs->args, &userArgs, sizeof(char **)) < B_OK
1057 		|| user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1058 		|| user_memcpy(&programArgs->env, &userEnv, sizeof(char **)) < B_OK
1059 		|| user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1060 				sizeof(port_id)) < B_OK
1061 		|| user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1062 				sizeof(uint32)) < B_OK
1063 		|| user_memcpy(userArgs, teamArgs->flat_args,
1064 				teamArgs->flat_args_size) < B_OK) {
1065 		// the team deletion process will clean this mess
1066 		return B_BAD_ADDRESS;
1067 	}
1068 
1069 	TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1070 
1071 	// add args to info member
1072 	team->args[0] = 0;
1073 	strlcpy(team->args, path, sizeof(team->args));
1074 	for (i = 1; i < argCount; i++) {
1075 		strlcat(team->args, " ", sizeof(team->args));
1076 		strlcat(team->args, teamArgs->flat_args[i], sizeof(team->args));
1077 	}
1078 
1079 	free_team_arg(teamArgs);
1080 		// the arguments are already on the user stack, we no longer need
1081 		// them in this form
1082 
1083 	// NOTE: Normally arch_thread_enter_userspace() never returns, that is
1084 	// automatic variables with function scope will never be destroyed.
1085 	{
1086 		// find runtime_loader path
1087 		KPath runtimeLoaderPath;
1088 		err = find_directory(B_BEOS_SYSTEM_DIRECTORY, gBootDevice, false,
1089 			runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1090 		if (err < B_OK) {
1091 			TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1092 				strerror(err)));
1093 			return err;
1094 		}
1095 		runtimeLoaderPath.UnlockBuffer();
1096 		err = runtimeLoaderPath.Append("runtime_loader");
1097 
1098 		if (err == B_OK)
1099 			err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, &entry);
1100 	}
1101 
1102 	if (err < B_OK) {
1103 		// Luckily, we don't have to clean up the mess we created - that's
1104 		// done for us by the normal team deletion process
1105 		TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1106 			"%s\n", strerror(err)));
1107 		return err;
1108 	}
1109 
1110 	TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1111 
1112 	team->state = TEAM_STATE_NORMAL;
1113 
1114 	// jump to the entry point in user space
1115 	return arch_thread_enter_userspace(t, entry, programArgs, NULL);
1116 		// only returns in case of error
1117 }
1118 
1119 
1120 static thread_id
1121 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1122 	int32 envCount, int32 priority, team_id parentID, uint32 flags,
1123 	port_id errorPort, uint32 errorToken)
1124 {
1125 	char** flatArgs = _flatArgs;
1126 	struct team *team;
1127 	const char *threadName;
1128 	thread_id thread;
1129 	status_t status;
1130 	cpu_status state;
1131 	struct team_arg *teamArgs;
1132 	struct team_loading_info loadingInfo;
1133 	io_context* parentIOContext = NULL;
1134 
1135 	if (flatArgs == NULL || argCount == 0)
1136 		return B_BAD_VALUE;
1137 
1138 	const char* path = flatArgs[0];
1139 
1140 	TRACE(("load_image_internal: name '%s', args = %p, argCount = %ld\n",
1141 		path, flatArgs, argCount));
1142 
1143 	team = create_team_struct(path, false);
1144 	if (team == NULL)
1145 		return B_NO_MEMORY;
1146 
1147 	if (flags & B_WAIT_TILL_LOADED) {
1148 		loadingInfo.thread = thread_get_current_thread();
1149 		loadingInfo.result = B_ERROR;
1150 		loadingInfo.done = false;
1151 		team->loading_info = &loadingInfo;
1152 	}
1153 
1154  	InterruptsSpinLocker teamLocker(gTeamSpinlock);
1155 
1156 	// get the parent team
1157 	struct team* parent;
1158 
1159 	if (parentID == B_CURRENT_TEAM)
1160 		parent = thread_get_current_thread()->team;
1161 	else
1162 		parent = team_get_team_struct_locked(parentID);
1163 
1164 	if (parent == NULL) {
1165 		teamLocker.Unlock();
1166 		status = B_BAD_TEAM_ID;
1167 		goto err0;
1168 	}
1169 
1170 	// inherit the parent's user/group
1171 	inherit_parent_user_and_group_locked(team, parent);
1172 
1173 	hash_insert(sTeamHash, team);
1174 	insert_team_into_parent(parent, team);
1175 	insert_team_into_group(parent->group, team);
1176 	sUsedTeams++;
1177 
1178 	// get a reference to the parent's I/O context -- we need it to create ours
1179 	parentIOContext = parent->io_context;
1180 	vfs_get_io_context(parentIOContext);
1181 
1182 	teamLocker.Unlock();
1183 
1184 	// check the executable's set-user/group-id permission
1185 	update_set_id_user_and_group(team, path);
1186 
1187 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1188 		envCount, errorPort, errorToken);
1189 
1190 	if (status != B_OK)
1191 		goto err1;
1192 
1193 	_flatArgs = NULL;
1194 		// args are owned by the team_arg structure now
1195 
1196 	// create a new io_context for this team
1197 	team->io_context = vfs_new_io_context(parentIOContext);
1198 	if (!team->io_context) {
1199 		status = B_NO_MEMORY;
1200 		goto err2;
1201 	}
1202 
1203 	// We don't need the parent's I/O context any longer.
1204 	vfs_put_io_context(parentIOContext);
1205 	parentIOContext = NULL;
1206 
1207 	// remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1208 	vfs_exec_io_context(team->io_context);
1209 
1210 	// create an address space for this team
1211 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1212 		&team->address_space);
1213 	if (status < B_OK)
1214 		goto err3;
1215 
1216 	// cut the path from the main thread name
1217 	threadName = strrchr(path, '/');
1218 	if (threadName != NULL)
1219 		threadName++;
1220 	else
1221 		threadName = path;
1222 
1223 	// create the user data area
1224 	status = create_team_user_data(team);
1225 	if (status != B_OK)
1226 		goto err4;
1227 
1228 	// notify team listeners
1229 	sNotificationService.Notify(TEAM_ADDED, team);
1230 
1231 	// Create a kernel thread, but under the context of the new team
1232 	// The new thread will take over ownership of teamArgs
1233 	thread = spawn_kernel_thread_etc(team_create_thread_start, threadName,
1234 		B_NORMAL_PRIORITY, teamArgs, team->id, team->id);
1235 	if (thread < 0) {
1236 		status = thread;
1237 		goto err5;
1238 	}
1239 
1240 	// wait for the loader of the new team to finish its work
1241 	if (flags & B_WAIT_TILL_LOADED) {
1242 		struct thread *mainThread;
1243 
1244 		state = disable_interrupts();
1245 		GRAB_THREAD_LOCK();
1246 
1247 		mainThread = thread_get_thread_struct_locked(thread);
1248 		if (mainThread) {
1249 			// resume the team's main thread
1250 			if (mainThread->state == B_THREAD_SUSPENDED)
1251 				scheduler_enqueue_in_run_queue(mainThread);
1252 
1253 			// Now suspend ourselves until loading is finished.
1254 			// We will be woken either by the thread, when it finished or
1255 			// aborted loading, or when the team is going to die (e.g. is
1256 			// killed). In either case the one setting `loadingInfo.done' is
1257 			// responsible for removing the info from the team structure.
1258 			while (!loadingInfo.done) {
1259 				thread_get_current_thread()->next_state = B_THREAD_SUSPENDED;
1260 				scheduler_reschedule();
1261 			}
1262 		} else {
1263 			// Impressive! Someone managed to kill the thread in this short
1264 			// time.
1265 		}
1266 
1267 		RELEASE_THREAD_LOCK();
1268 		restore_interrupts(state);
1269 
1270 		if (loadingInfo.result < B_OK)
1271 			return loadingInfo.result;
1272 	}
1273 
1274 	// notify the debugger
1275 	user_debug_team_created(team->id);
1276 
1277 	return thread;
1278 
1279 err5:
1280 	delete_team_user_data(team);
1281 err4:
1282 	vm_put_address_space(team->address_space);
1283 err3:
1284 	vfs_put_io_context(team->io_context);
1285 err2:
1286 	free_team_arg(teamArgs);
1287 err1:
1288 	if (parentIOContext != NULL)
1289 		vfs_put_io_context(parentIOContext);
1290 
1291 	// remove the team structure from the team hash table and delete the team structure
1292 	state = disable_interrupts();
1293 	GRAB_TEAM_LOCK();
1294 
1295 	remove_team_from_group(team);
1296 	remove_team_from_parent(team->parent, team);
1297 	hash_remove(sTeamHash, team);
1298 
1299 	RELEASE_TEAM_LOCK();
1300 	restore_interrupts(state);
1301 
1302 err0:
1303 	delete_team_struct(team);
1304 
1305 	return status;
1306 }
1307 
1308 
1309 /*!	Almost shuts down the current team and loads a new image into it.
1310 	If successful, this function does not return and will takeover ownership of
1311 	the arguments provided.
1312 	This function may only be called from user space.
1313 */
1314 static status_t
1315 exec_team(const char *path, char**& _flatArgs, size_t flatArgsSize,
1316 	int32 argCount, int32 envCount)
1317 {
1318 	// NOTE: Since this function normally doesn't return, don't use automatic
1319 	// variables that need destruction in the function scope.
1320 	char** flatArgs = _flatArgs;
1321 	struct team *team = thread_get_current_thread()->team;
1322 	struct team_arg *teamArgs;
1323 	const char *threadName;
1324 	status_t status = B_OK;
1325 	cpu_status state;
1326 	struct thread *thread;
1327 	thread_id nubThreadID = -1;
1328 
1329 	TRACE(("exec_team(path = \"%s\", argc = %ld, envCount = %ld): team %ld\n",
1330 		path, argCount, envCount, team->id));
1331 
1332 	T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1333 
1334 	// switching the kernel at run time is probably not a good idea :)
1335 	if (team == team_get_kernel_team())
1336 		return B_NOT_ALLOWED;
1337 
1338 	// we currently need to be single threaded here
1339 	// ToDo: maybe we should just kill all other threads and
1340 	//	make the current thread the team's main thread?
1341 	if (team->main_thread != thread_get_current_thread())
1342 		return B_NOT_ALLOWED;
1343 
1344 	// The debug nub thread, a pure kernel thread, is allowed to survive.
1345 	// We iterate through the thread list to make sure that there's no other
1346 	// thread.
1347 	state = disable_interrupts();
1348 	GRAB_TEAM_LOCK();
1349 	GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1350 
1351 	if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1352 		nubThreadID = team->debug_info.nub_thread;
1353 
1354 	RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
1355 
1356 	for (thread = team->thread_list; thread; thread = thread->team_next) {
1357 		if (thread != team->main_thread && thread->id != nubThreadID) {
1358 			status = B_NOT_ALLOWED;
1359 			break;
1360 		}
1361 	}
1362 
1363 	RELEASE_TEAM_LOCK();
1364 	restore_interrupts(state);
1365 
1366 	if (status != B_OK)
1367 		return status;
1368 
1369 	status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1370 		envCount, -1, 0);
1371 
1372 	if (status != B_OK)
1373 		return status;
1374 
1375 	_flatArgs = NULL;
1376 		// args are owned by the team_arg structure now
1377 
1378 	// ToDo: remove team resources if there are any left
1379 	// thread_atkernel_exit() might not be called at all
1380 
1381 	thread_reset_for_exec();
1382 
1383 	user_debug_prepare_for_exec();
1384 
1385 	delete_team_user_data(team);
1386 	vm_delete_areas(team->address_space);
1387 	xsi_sem_undo(team);
1388 	delete_owned_ports(team->id);
1389 	sem_delete_owned_sems(team->id);
1390 	remove_images(team);
1391 	vfs_exec_io_context(team->io_context);
1392 	delete_realtime_sem_context(team->realtime_sem_context);
1393 	team->realtime_sem_context = NULL;
1394 
1395 	status = create_team_user_data(team);
1396 	if (status != B_OK) {
1397 		// creating the user data failed -- we're toast
1398 		// TODO: We should better keep the old user area in the first place.
1399 		exit_thread(status);
1400 		return status;
1401 	}
1402 
1403 	user_debug_finish_after_exec();
1404 
1405 	// rename the team
1406 
1407 	set_team_name(team, path);
1408 
1409 	// cut the path from the team name and rename the main thread, too
1410 	threadName = strrchr(path, '/');
1411 	if (threadName != NULL)
1412 		threadName++;
1413 	else
1414 		threadName = path;
1415 	rename_thread(thread_get_current_thread_id(), threadName);
1416 
1417 	atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
1418 
1419 	// Update user/group according to the executable's set-user/group-id
1420 	// permission.
1421 	update_set_id_user_and_group(team, path);
1422 
1423 	user_debug_team_exec();
1424 
1425 	// notify team listeners
1426 	sNotificationService.Notify(TEAM_EXEC, team);
1427 
1428 	status = team_create_thread_start(teamArgs);
1429 		// this one usually doesn't return...
1430 
1431 	// sorry, we have to kill us, there is no way out anymore
1432 	// (without any areas left and all that)
1433 	exit_thread(status);
1434 
1435 	// we return a status here since the signal that is sent by the
1436 	// call above is not immediately handled
1437 	return B_ERROR;
1438 }
1439 
1440 
1441 /*! This is the first function to be called from the newly created
1442 	main child thread.
1443 	It will fill in everything what's left to do from fork_arg, and
1444 	return from the parent's fork() syscall to the child.
1445 */
1446 static int32
1447 fork_team_thread_start(void *_args)
1448 {
1449 	struct thread *thread = thread_get_current_thread();
1450 	struct fork_arg *forkArgs = (struct fork_arg *)_args;
1451 
1452 	struct arch_fork_arg archArgs = forkArgs->arch_info;
1453 		// we need a local copy of the arch dependent part
1454 
1455 	thread->user_stack_area = forkArgs->user_stack_area;
1456 	thread->user_stack_base = forkArgs->user_stack_base;
1457 	thread->user_stack_size = forkArgs->user_stack_size;
1458 	thread->user_local_storage = forkArgs->user_local_storage;
1459 	thread->sig_block_mask = forkArgs->sig_block_mask;
1460 	thread->user_thread = forkArgs->user_thread;
1461 
1462 	arch_thread_init_tls(thread);
1463 
1464 	free(forkArgs);
1465 
1466 	// set frame of the parent thread to this one, too
1467 
1468 	arch_restore_fork_frame(&archArgs);
1469 		// This one won't return here
1470 
1471 	return 0;
1472 }
1473 
1474 
1475 static thread_id
1476 fork_team(void)
1477 {
1478 	struct thread *parentThread = thread_get_current_thread();
1479 	struct team *parentTeam = parentThread->team, *team;
1480 	struct fork_arg *forkArgs;
1481 	struct area_info info;
1482 	thread_id threadID;
1483 	cpu_status state;
1484 	status_t status;
1485 	int32 cookie;
1486 
1487 	TRACE(("fork_team(): team %ld\n", parentTeam->id));
1488 
1489 	if (parentTeam == team_get_kernel_team())
1490 		return B_NOT_ALLOWED;
1491 
1492 	// create a new team
1493 	// TODO: this is very similar to load_image_internal() - maybe we can do
1494 	// something about it :)
1495 
1496 	team = create_team_struct(parentTeam->name, false);
1497 	if (team == NULL)
1498 		return B_NO_MEMORY;
1499 
1500 	strlcpy(team->args, parentTeam->args, sizeof(team->args));
1501 
1502 	// Inherit the parent's user/group.
1503 	inherit_parent_user_and_group(team, parentTeam);
1504 
1505 	state = disable_interrupts();
1506 	GRAB_TEAM_LOCK();
1507 
1508 	hash_insert(sTeamHash, team);
1509 	insert_team_into_parent(parentTeam, team);
1510 	insert_team_into_group(parentTeam->group, team);
1511 	sUsedTeams++;
1512 
1513 	RELEASE_TEAM_LOCK();
1514 	restore_interrupts(state);
1515 
1516 	forkArgs = (struct fork_arg *)malloc(sizeof(struct fork_arg));
1517 	if (forkArgs == NULL) {
1518 		status = B_NO_MEMORY;
1519 		goto err1;
1520 	}
1521 
1522 	// create a new io_context for this team
1523 	team->io_context = vfs_new_io_context(parentTeam->io_context);
1524 	if (!team->io_context) {
1525 		status = B_NO_MEMORY;
1526 		goto err2;
1527 	}
1528 
1529 	// duplicate the realtime sem context
1530 	if (parentTeam->realtime_sem_context) {
1531 		team->realtime_sem_context = clone_realtime_sem_context(
1532 			parentTeam->realtime_sem_context);
1533 		if (team->realtime_sem_context == NULL) {
1534 			status = B_NO_MEMORY;
1535 			goto err25;
1536 		}
1537 	}
1538 
1539 	// create an address space for this team
1540 	status = vm_create_address_space(team->id, USER_BASE, USER_SIZE, false,
1541 		&team->address_space);
1542 	if (status < B_OK)
1543 		goto err3;
1544 
1545 	// copy all areas of the team
1546 	// ToDo: should be able to handle stack areas differently (ie. don't have them copy-on-write)
1547 	// ToDo: all stacks of other threads than the current one could be left out
1548 
1549 	forkArgs->user_thread = NULL;
1550 
1551 	cookie = 0;
1552 	while (get_next_area_info(B_CURRENT_TEAM, &cookie, &info) == B_OK) {
1553 		if (info.area == parentTeam->user_data_area) {
1554 			// don't clone the user area; just create a new one
1555 			status = create_team_user_data(team);
1556 			if (status != B_OK)
1557 				break;
1558 
1559 			forkArgs->user_thread = team_allocate_user_thread(team);
1560 		} else {
1561 			void *address;
1562 			area_id area = vm_copy_area(team->address_space->id, info.name,
1563 				&address, B_CLONE_ADDRESS, info.protection, info.area);
1564 			if (area < B_OK) {
1565 				status = area;
1566 				break;
1567 			}
1568 
1569 			if (info.area == parentThread->user_stack_area)
1570 				forkArgs->user_stack_area = area;
1571 		}
1572 	}
1573 
1574 	if (status < B_OK)
1575 		goto err4;
1576 
1577 	if (forkArgs->user_thread == NULL) {
1578 #if KDEBUG
1579 		panic("user data area not found, parent area is %ld",
1580 			parentTeam->user_data_area);
1581 #endif
1582 		status = B_ERROR;
1583 		goto err4;
1584 	}
1585 
1586 	forkArgs->user_stack_base = parentThread->user_stack_base;
1587 	forkArgs->user_stack_size = parentThread->user_stack_size;
1588 	forkArgs->user_local_storage = parentThread->user_local_storage;
1589 	forkArgs->sig_block_mask = parentThread->sig_block_mask;
1590 	arch_store_fork_frame(&forkArgs->arch_info);
1591 
1592 	// copy image list
1593 	image_info imageInfo;
1594 	cookie = 0;
1595 	while (get_next_image_info(parentTeam->id, &cookie, &imageInfo) == B_OK) {
1596 		image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1597 		if (image < 0)
1598 			goto err5;
1599 	}
1600 
1601 	// notify team listeners
1602 	sNotificationService.Notify(TEAM_ADDED, team);
1603 
1604 	// create a kernel thread under the context of the new team
1605 	threadID = spawn_kernel_thread_etc(fork_team_thread_start,
1606 		parentThread->name, parentThread->priority, forkArgs,
1607 		team->id, team->id);
1608 	if (threadID < 0) {
1609 		status = threadID;
1610 		goto err5;
1611 	}
1612 
1613 	// notify the debugger
1614 	user_debug_team_created(team->id);
1615 
1616 	T(TeamForked(threadID));
1617 
1618 	resume_thread(threadID);
1619 	return threadID;
1620 
1621 err5:
1622 	remove_images(team);
1623 err4:
1624 	vm_delete_address_space(team->address_space);
1625 err3:
1626 	delete_realtime_sem_context(team->realtime_sem_context);
1627 err25:
1628 	vfs_put_io_context(team->io_context);
1629 err2:
1630 	free(forkArgs);
1631 err1:
1632 	// remove the team structure from the team hash table and delete the team structure
1633 	state = disable_interrupts();
1634 	GRAB_TEAM_LOCK();
1635 
1636 	remove_team_from_group(team);
1637 	remove_team_from_parent(parentTeam, team);
1638 	hash_remove(sTeamHash, team);
1639 
1640 	RELEASE_TEAM_LOCK();
1641 	restore_interrupts(state);
1642 
1643 	delete_team_struct(team);
1644 
1645 	return status;
1646 }
1647 
1648 
1649 /*!	Returns if the specified \a team has any children belonging to the
1650 	specified \a group.
1651 	Must be called with the team lock held.
1652 */
1653 static bool
1654 has_children_in_group(struct team *parent, pid_t groupID)
1655 {
1656 	struct team *team;
1657 
1658 	struct process_group *group = team_get_process_group_locked(
1659 		parent->group->session, groupID);
1660 	if (group == NULL)
1661 		return false;
1662 
1663 	for (team = group->teams; team; team = team->group_next) {
1664 		if (team->parent == parent)
1665 			return true;
1666 	}
1667 
1668 	return false;
1669 }
1670 
1671 
1672 static job_control_entry*
1673 get_job_control_entry(team_job_control_children* children, pid_t id)
1674 {
1675 	for (JobControlEntryList::Iterator it = children->entries.GetIterator();
1676 		 job_control_entry* entry = it.Next();) {
1677 
1678 		if (id > 0) {
1679 			if (entry->thread == id)
1680 				return entry;
1681 		} else if (id == -1) {
1682 			return entry;
1683 		} else {
1684 			pid_t processGroup
1685 				= (entry->team ? entry->team->group_id : entry->group_id);
1686 			if (processGroup == -id)
1687 				return entry;
1688 		}
1689 	}
1690 
1691 	return NULL;
1692 }
1693 
1694 
1695 static job_control_entry*
1696 get_job_control_entry(struct team* team, pid_t id, uint32 flags)
1697 {
1698 	job_control_entry* entry = get_job_control_entry(team->dead_children, id);
1699 
1700 	if (entry == NULL && (flags & WCONTINUED) != 0)
1701 		entry = get_job_control_entry(team->continued_children, id);
1702 
1703 	if (entry == NULL && (flags & WUNTRACED) != 0)
1704 		entry = get_job_control_entry(team->stopped_children, id);
1705 
1706 	return entry;
1707 }
1708 
1709 
1710 job_control_entry::job_control_entry()
1711 	:
1712 	has_group_ref(false)
1713 {
1714 }
1715 
1716 
1717 job_control_entry::~job_control_entry()
1718 {
1719 	if (has_group_ref) {
1720 		InterruptsSpinLocker locker(gTeamSpinlock);
1721 		release_process_group_ref(group_id);
1722 	}
1723 }
1724 
1725 
1726 /*!	Team and thread lock must be held.
1727 */
1728 void
1729 job_control_entry::InitDeadState()
1730 {
1731 	if (team != NULL) {
1732 		struct thread* thread = team->main_thread;
1733 		group_id = team->group_id;
1734 		this->thread = thread->id;
1735 		status = thread->exit.status;
1736 		reason = thread->exit.reason;
1737 		signal = thread->exit.signal;
1738 		team = NULL;
1739 		acquire_process_group_ref(group_id);
1740 		has_group_ref = true;
1741 	}
1742 }
1743 
1744 
1745 job_control_entry&
1746 job_control_entry::operator=(const job_control_entry& other)
1747 {
1748 	state = other.state;
1749 	thread = other.thread;
1750 	has_group_ref = false;
1751 	team = other.team;
1752 	group_id = other.group_id;
1753 	status = other.status;
1754 	reason = other.reason;
1755 	signal = other.signal;
1756 
1757 	return *this;
1758 }
1759 
1760 
1761 /*! This is the kernel backend for waitpid(). It is a bit more powerful when it
1762 	comes to the reason why a thread has died than waitpid() can be.
1763 */
1764 static thread_id
1765 wait_for_child(pid_t child, uint32 flags, int32 *_reason,
1766 	status_t *_returnCode)
1767 {
1768 	struct thread* thread = thread_get_current_thread();
1769 	struct team* team = thread->team;
1770 	struct job_control_entry foundEntry;
1771 	struct job_control_entry* freeDeathEntry = NULL;
1772 	status_t status = B_OK;
1773 
1774 	TRACE(("wait_for_child(child = %ld, flags = %ld)\n", child, flags));
1775 
1776 	T(WaitForChild(child, flags));
1777 
1778 	if (child == 0) {
1779 		// wait for all children in the process group of the calling team
1780 		child = -team->group_id;
1781 	}
1782 
1783 	bool ignoreFoundEntries = false;
1784 	bool ignoreFoundEntriesChecked = false;
1785 
1786 	while (true) {
1787 		InterruptsSpinLocker locker(gTeamSpinlock);
1788 
1789 		// check whether any condition holds
1790 		job_control_entry* entry = get_job_control_entry(team, child, flags);
1791 
1792 		// If we don't have an entry yet, check whether there are any children
1793 		// complying to the process group specification at all.
1794 		if (entry == NULL) {
1795 			// No success yet -- check whether there are any children we could
1796 			// wait for.
1797 			bool childrenExist = false;
1798 			if (child == -1) {
1799 				childrenExist = team->children != NULL;
1800 			} else if (child < -1) {
1801 				childrenExist = has_children_in_group(team, -child);
1802 			} else {
1803 				if (struct team* childTeam = team_get_team_struct_locked(child))
1804 					childrenExist = childTeam->parent == team;
1805 			}
1806 
1807 			if (!childrenExist) {
1808 				// there is no child we could wait for
1809 				status = ECHILD;
1810 			} else {
1811 				// the children we're waiting for are still running
1812 				status = B_WOULD_BLOCK;
1813 			}
1814 		} else {
1815 			// got something
1816 			foundEntry = *entry;
1817 			if (entry->state == JOB_CONTROL_STATE_DEAD) {
1818 				// The child is dead. Reap its death entry.
1819 				freeDeathEntry = entry;
1820 				team->dead_children->entries.Remove(entry);
1821 				team->dead_children->count--;
1822 			} else {
1823 				// The child is well. Reset its job control state.
1824 				team_set_job_control_state(entry->team,
1825 					JOB_CONTROL_STATE_NONE, 0, false);
1826 			}
1827 		}
1828 
1829 		// If we haven't got anything yet, prepare for waiting for the
1830 		// condition variable.
1831 		ConditionVariableEntry deadWaitEntry;
1832 
1833 		if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
1834 			team->dead_children->condition_variable.Add(&deadWaitEntry);
1835 
1836 		locker.Unlock();
1837 
1838 		// we got our entry and can return to our caller
1839 		if (status == B_OK) {
1840 			if (ignoreFoundEntries) {
1841 				// ... unless we shall ignore found entries
1842 				delete freeDeathEntry;
1843 				freeDeathEntry = NULL;
1844 				continue;
1845 			}
1846 
1847 			break;
1848 		}
1849 
1850 		if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
1851 			T(WaitForChildDone(status));
1852 			return status;
1853 		}
1854 
1855 		status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
1856 		if (status == B_INTERRUPTED) {
1857 			T(WaitForChildDone(status));
1858 			return status;
1859 		}
1860 
1861 		// If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
1862 		// all our children are dead and fail with ECHILD. We check the
1863 		// condition at this point.
1864 		if (!ignoreFoundEntriesChecked) {
1865 			struct sigaction& handler = thread->sig_action[SIGCHLD - 1];
1866 			if ((handler.sa_flags & SA_NOCLDWAIT) != 0
1867 				|| handler.sa_handler == SIG_IGN) {
1868 				ignoreFoundEntries = true;
1869 			}
1870 
1871 			ignoreFoundEntriesChecked = true;
1872 		}
1873 	}
1874 
1875 	delete freeDeathEntry;
1876 
1877 	// when we got here, we have a valid death entry, and
1878 	// already got unregistered from the team or group
1879 	int reason = 0;
1880 	switch (foundEntry.state) {
1881 		case JOB_CONTROL_STATE_DEAD:
1882 			reason = foundEntry.reason;
1883 			break;
1884 		case JOB_CONTROL_STATE_STOPPED:
1885 			reason = THREAD_STOPPED;
1886 			break;
1887 		case JOB_CONTROL_STATE_CONTINUED:
1888 			reason = THREAD_CONTINUED;
1889 			break;
1890 		case JOB_CONTROL_STATE_NONE:
1891 			// can't happen
1892 			break;
1893 	}
1894 
1895 	*_returnCode = foundEntry.status;
1896 	*_reason = (foundEntry.signal << 16) | reason;
1897 
1898 	// If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
1899 	// status is available.
1900 	if (is_signal_blocked(SIGCHLD)) {
1901 		InterruptsSpinLocker locker(gTeamSpinlock);
1902 
1903 		if (get_job_control_entry(team, child, flags) == NULL)
1904 			atomic_and(&thread->sig_pending, ~SIGNAL_TO_MASK(SIGCHLD));
1905 	}
1906 
1907 	// When the team is dead, the main thread continues to live in the kernel
1908 	// team for a very short time. To avoid surprises for the caller we rather
1909 	// wait until the thread is really gone.
1910 	if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
1911 		wait_for_thread(foundEntry.thread, NULL);
1912 
1913 	T(WaitForChildDone(foundEntry));
1914 
1915 	return foundEntry.thread;
1916 }
1917 
1918 
1919 /*! Fills the team_info structure with information from the specified
1920 	team.
1921 	The team lock must be held when called.
1922 */
1923 static status_t
1924 fill_team_info(struct team *team, team_info *info, size_t size)
1925 {
1926 	if (size != sizeof(team_info))
1927 		return B_BAD_VALUE;
1928 
1929 	// ToDo: Set more informations for team_info
1930 	memset(info, 0, size);
1931 
1932 	info->team = team->id;
1933 	info->thread_count = team->num_threads;
1934 	info->image_count = count_images(team);
1935 	//info->area_count =
1936 	info->debugger_nub_thread = team->debug_info.nub_thread;
1937 	info->debugger_nub_port = team->debug_info.nub_port;
1938 	//info->uid =
1939 	//info->gid =
1940 
1941 	strlcpy(info->args, team->args, sizeof(info->args));
1942 	info->argc = 1;
1943 
1944 	return B_OK;
1945 }
1946 
1947 
1948 /*!	Updates the \c orphaned field of a process_group and returns its new value.
1949 	Interrupts must be disabled and team lock be held.
1950 */
1951 static bool
1952 update_orphaned_process_group(process_group* group, pid_t dyingProcess)
1953 {
1954 	// Orphaned Process Group: "A process group in which the parent of every
1955 	// member is either itself a member of the group or is not a member of the
1956 	// group's session." (Open Group Base Specs Issue 6)
1957 
1958 	// once orphaned, things won't change (exception: cf. setpgid())
1959 	if (group->orphaned)
1960 		return true;
1961 
1962 	struct team* team = group->teams;
1963 	while (team != NULL) {
1964 		struct team* parent = team->parent;
1965 		if (team->id != dyingProcess && parent != NULL
1966 			&& parent->id != dyingProcess
1967 			&& parent->group_id != group->id
1968 			&& parent->session_id == group->session->id) {
1969 			return false;
1970 		}
1971 
1972 		team = team->group_next;
1973 	}
1974 
1975 	group->orphaned = true;
1976 	return true;
1977 }
1978 
1979 
1980 /*!	Returns whether the process group contains stopped processes.
1981 	Interrupts must be disabled and team lock be held.
1982 */
1983 static bool
1984 process_group_has_stopped_processes(process_group* group)
1985 {
1986 	SpinLocker _(gThreadSpinlock);
1987 
1988 	struct team* team = group->teams;
1989 	while (team != NULL) {
1990 		if (team->main_thread->state == B_THREAD_SUSPENDED)
1991 			return true;
1992 
1993 		team = team->group_next;
1994 	}
1995 
1996 	return false;
1997 }
1998 
1999 
2000 //	#pragma mark - Private kernel API
2001 
2002 
2003 status_t
2004 team_init(kernel_args *args)
2005 {
2006 	struct process_session *session;
2007 	struct process_group *group;
2008 
2009 	// create the team hash table
2010 	sTeamHash = hash_init(16, offsetof(struct team, next),
2011 		&team_struct_compare, &team_struct_hash);
2012 
2013 	sGroupHash = hash_init(16, offsetof(struct process_group, next),
2014 		&process_group_compare, &process_group_hash);
2015 
2016 	// create initial session and process groups
2017 
2018 	session = create_process_session(1);
2019 	if (session == NULL)
2020 		panic("Could not create initial session.\n");
2021 
2022 	group = create_process_group(1);
2023 	if (group == NULL)
2024 		panic("Could not create initial process group.\n");
2025 
2026 	insert_group_into_session(session, group);
2027 
2028 	// create the kernel team
2029 	sKernelTeam = create_team_struct("kernel_team", true);
2030 	if (sKernelTeam == NULL)
2031 		panic("could not create kernel team!\n");
2032 	strcpy(sKernelTeam->args, sKernelTeam->name);
2033 	sKernelTeam->state = TEAM_STATE_NORMAL;
2034 
2035 	sKernelTeam->saved_set_uid = 0;
2036 	sKernelTeam->real_uid = 0;
2037 	sKernelTeam->effective_uid = 0;
2038 	sKernelTeam->saved_set_gid = 0;
2039 	sKernelTeam->real_gid = 0;
2040 	sKernelTeam->effective_gid = 0;
2041 	sKernelTeam->supplementary_groups = NULL;
2042 	sKernelTeam->supplementary_group_count = 0;
2043 
2044 	insert_team_into_group(group, sKernelTeam);
2045 
2046 	sKernelTeam->io_context = vfs_new_io_context(NULL);
2047 	if (sKernelTeam->io_context == NULL)
2048 		panic("could not create io_context for kernel team!\n");
2049 
2050 	// stick it in the team hash
2051 	hash_insert(sTeamHash, sKernelTeam);
2052 
2053 	add_debugger_command_etc("team", &dump_team_info,
2054 		"Dump info about a particular team",
2055 		"[ <id> | <address> | <name> ]\n"
2056 		"Prints information about the specified team. If no argument is given\n"
2057 		"the current team is selected.\n"
2058 		"  <id>       - The ID of the team.\n"
2059 		"  <address>  - The address of the team structure.\n"
2060 		"  <name>     - The team's name.\n", 0);
2061 	add_debugger_command_etc("teams", &dump_teams, "List all teams",
2062 		"\n"
2063 		"Prints a list of all existing teams.\n", 0);
2064 
2065 	new(&sNotificationService) TeamNotificationService();
2066 
2067 	return B_OK;
2068 }
2069 
2070 
2071 int32
2072 team_max_teams(void)
2073 {
2074 	return sMaxTeams;
2075 }
2076 
2077 
2078 int32
2079 team_used_teams(void)
2080 {
2081 	return sUsedTeams;
2082 }
2083 
2084 
2085 /*!	Iterates through the list of teams. The team spinlock must be held.
2086  */
2087 struct team*
2088 team_iterate_through_teams(team_iterator_callback callback, void* cookie)
2089 {
2090 	struct hash_iterator iterator;
2091 	hash_open(sTeamHash, &iterator);
2092 
2093 	struct team* team;
2094 	while ((team = (struct team*)hash_next(sTeamHash, &iterator)) != NULL) {
2095 		if (callback(team, cookie))
2096 			break;
2097 	}
2098 
2099 	hash_close(sTeamHash, &iterator, false);
2100 
2101 	return team;
2102 }
2103 
2104 
2105 /*! Fills the provided death entry if it's in the team.
2106 	You need to have the team lock held when calling this function.
2107 */
2108 job_control_entry*
2109 team_get_death_entry(struct team *team, thread_id child, bool* _deleteEntry)
2110 {
2111 	if (child <= 0)
2112 		return NULL;
2113 
2114 	job_control_entry* entry = get_job_control_entry(team->dead_children,
2115 		child);
2116 	if (entry) {
2117 		// remove the entry only, if the caller is the parent of the found team
2118 		if (team_get_current_team_id() == entry->thread) {
2119 			team->dead_children->entries.Remove(entry);
2120 			team->dead_children->count--;
2121 			*_deleteEntry = true;
2122 		} else {
2123 			*_deleteEntry = false;
2124 		}
2125 	}
2126 
2127 	return entry;
2128 }
2129 
2130 
2131 /*! Quick check to see if we have a valid team ID. */
2132 bool
2133 team_is_valid(team_id id)
2134 {
2135 	struct team *team;
2136 	cpu_status state;
2137 
2138 	if (id <= 0)
2139 		return false;
2140 
2141 	state = disable_interrupts();
2142 	GRAB_TEAM_LOCK();
2143 
2144 	team = team_get_team_struct_locked(id);
2145 
2146 	RELEASE_TEAM_LOCK();
2147 	restore_interrupts(state);
2148 
2149 	return team != NULL;
2150 }
2151 
2152 
2153 struct team *
2154 team_get_team_struct_locked(team_id id)
2155 {
2156 	struct team_key key;
2157 	key.id = id;
2158 
2159 	return (struct team*)hash_lookup(sTeamHash, &key);
2160 }
2161 
2162 
2163 /*! This searches the session of the team for the specified group ID.
2164 	You must hold the team lock when you call this function.
2165 */
2166 struct process_group *
2167 team_get_process_group_locked(struct process_session *session, pid_t id)
2168 {
2169 	struct process_group *group;
2170 	struct team_key key;
2171 	key.id = id;
2172 
2173 	group = (struct process_group *)hash_lookup(sGroupHash, &key);
2174 	if (group != NULL && (session == NULL || session == group->session))
2175 		return group;
2176 
2177 	return NULL;
2178 }
2179 
2180 
2181 void
2182 team_delete_process_group(struct process_group *group)
2183 {
2184 	if (group == NULL)
2185 		return;
2186 
2187 	TRACE(("team_delete_process_group(id = %ld)\n", group->id));
2188 
2189 	// remove_group_from_session() keeps this pointer around
2190 	// only if the session can be freed as well
2191 	if (group->session) {
2192 		TRACE(("team_delete_process_group(): frees session %ld\n", group->session->id));
2193 		free(group->session);
2194 	}
2195 
2196 	free(group);
2197 }
2198 
2199 
2200 void
2201 team_set_controlling_tty(int32 ttyIndex)
2202 {
2203 	struct team* team = thread_get_current_thread()->team;
2204 
2205 	InterruptsSpinLocker _(gTeamSpinlock);
2206 
2207 	team->group->session->controlling_tty = ttyIndex;
2208 	team->group->session->foreground_group = -1;
2209 }
2210 
2211 
2212 int32
2213 team_get_controlling_tty()
2214 {
2215 	struct team* team = thread_get_current_thread()->team;
2216 
2217 	InterruptsSpinLocker _(gTeamSpinlock);
2218 
2219 	return team->group->session->controlling_tty;
2220 }
2221 
2222 
2223 status_t
2224 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID)
2225 {
2226 	struct thread* thread = thread_get_current_thread();
2227 	struct team* team = thread->team;
2228 
2229 	InterruptsSpinLocker locker(gTeamSpinlock);
2230 
2231 	process_session* session = team->group->session;
2232 
2233 	// must be the controlling tty of the calling process
2234 	if (session->controlling_tty != ttyIndex)
2235 		return ENOTTY;
2236 
2237 	// check process group -- must belong to our session
2238 	process_group* group = team_get_process_group_locked(session,
2239 		processGroupID);
2240 	if (group == NULL)
2241 		return B_BAD_VALUE;
2242 
2243 	// If we are a background group, we can't do that unharmed, only if we
2244 	// ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2245 	if (session->foreground_group != -1
2246 		&& session->foreground_group != team->group_id
2247 		&& thread->sig_action[SIGTTOU - 1].sa_handler != SIG_IGN
2248 		&& !is_signal_blocked(SIGTTOU)) {
2249 		pid_t groupID = team->group->id;
2250 		locker.Unlock();
2251 		send_signal(-groupID, SIGTTOU);
2252 		return B_INTERRUPTED;
2253 	}
2254 
2255 	team->group->session->foreground_group = processGroupID;
2256 
2257 	return B_OK;
2258 }
2259 
2260 
2261 /*!	Removes the specified team from the global team hash, and from its parent.
2262 	It also moves all of its children up to the parent.
2263 	You must hold the team lock when you call this function.
2264 */
2265 void
2266 team_remove_team(struct team *team)
2267 {
2268 	struct team *parent = team->parent;
2269 
2270 	// remember how long this team lasted
2271 	parent->dead_children->kernel_time += team->dead_threads_kernel_time
2272 		+ team->dead_children->kernel_time;
2273 	parent->dead_children->user_time += team->dead_threads_user_time
2274 		+ team->dead_children->user_time;
2275 
2276 	// Also grab the thread spinlock while removing the team from the hash.
2277 	// This makes the following sequence safe: grab teams lock, lookup team,
2278 	// grab threads lock, unlock teams lock,
2279 	// mutex_lock_threads_lock(<team related lock>), as used in the VFS code to
2280 	// lock another team's IO context.
2281 	GRAB_THREAD_LOCK();
2282 	hash_remove(sTeamHash, team);
2283 	RELEASE_THREAD_LOCK();
2284 	sUsedTeams--;
2285 
2286 	team->state = TEAM_STATE_DEATH;
2287 
2288 	// If we're a controlling process (i.e. a session leader with controlling
2289 	// terminal), there's a bit of signalling we have to do.
2290 	if (team->session_id == team->id
2291 		&& team->group->session->controlling_tty >= 0) {
2292 		process_session* session = team->group->session;
2293 
2294 		session->controlling_tty = -1;
2295 
2296 		// send SIGHUP to the foreground
2297 		if (session->foreground_group >= 0) {
2298 			send_signal_etc(-session->foreground_group, SIGHUP,
2299 				SIGNAL_FLAG_TEAMS_LOCKED);
2300 		}
2301 
2302 		// send SIGHUP + SIGCONT to all newly-orphaned process groups with
2303 		// stopped processes
2304 		struct team* child = team->children;
2305 		while (child != NULL) {
2306 			process_group* childGroup = child->group;
2307 			if (!childGroup->orphaned
2308 				&& update_orphaned_process_group(childGroup, team->id)
2309 				&& process_group_has_stopped_processes(childGroup)) {
2310 				send_signal_etc(-childGroup->id, SIGHUP,
2311 					SIGNAL_FLAG_TEAMS_LOCKED);
2312 				send_signal_etc(-childGroup->id, SIGCONT,
2313 					SIGNAL_FLAG_TEAMS_LOCKED);
2314 			}
2315 
2316 			child = child->siblings_next;
2317 		}
2318 	} else {
2319 		// update "orphaned" flags of all children's process groups
2320 		struct team* child = team->children;
2321 		while (child != NULL) {
2322 			process_group* childGroup = child->group;
2323 			if (!childGroup->orphaned)
2324 				update_orphaned_process_group(childGroup, team->id);
2325 
2326 			child = child->siblings_next;
2327 		}
2328 
2329 		// update "orphaned" flag of this team's process group
2330 		update_orphaned_process_group(team->group, team->id);
2331 	}
2332 
2333 	// reparent each of the team's children
2334 	reparent_children(team);
2335 
2336 	// remove us from our process group
2337 	remove_team_from_group(team);
2338 
2339 	// remove us from our parent
2340 	remove_team_from_parent(parent, team);
2341 }
2342 
2343 
2344 void
2345 team_delete_team(struct team *team)
2346 {
2347 	team_id teamID = team->id;
2348 	port_id debuggerPort = -1;
2349 	cpu_status state;
2350 
2351 	if (team->num_threads > 0) {
2352 		// there are other threads still in this team,
2353 		// cycle through and signal kill on each of the threads
2354 		// ToDo: this can be optimized. There's got to be a better solution.
2355 		struct thread *temp_thread;
2356 		char death_sem_name[B_OS_NAME_LENGTH];
2357 		sem_id deathSem;
2358 		int32 threadCount;
2359 
2360 		sprintf(death_sem_name, "team %ld death sem", teamID);
2361 		deathSem = create_sem(0, death_sem_name);
2362 		if (deathSem < 0)
2363 			panic("team_delete_team: cannot init death sem for team %ld\n", teamID);
2364 
2365 		state = disable_interrupts();
2366 		GRAB_TEAM_LOCK();
2367 
2368 		team->death_sem = deathSem;
2369 		threadCount = team->num_threads;
2370 
2371 		// If the team was being debugged, that will stop with the termination
2372 		// of the nub thread. The team structure has already been removed from
2373 		// the team hash table at this point, so noone can install a debugger
2374 		// anymore. We fetch the debugger's port to send it a message at the
2375 		// bitter end.
2376 		GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2377 
2378 		if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
2379 			debuggerPort = team->debug_info.debugger_port;
2380 
2381 		RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
2382 
2383 		// we can safely walk the list because of the lock. no new threads can be created
2384 		// because of the TEAM_STATE_DEATH flag on the team
2385 		temp_thread = team->thread_list;
2386 		while (temp_thread) {
2387 			struct thread *next = temp_thread->team_next;
2388 
2389 			send_signal_etc(temp_thread->id, SIGKILLTHR, B_DO_NOT_RESCHEDULE);
2390 			temp_thread = next;
2391 		}
2392 
2393 		RELEASE_TEAM_LOCK();
2394 		restore_interrupts(state);
2395 
2396 		// wait until all threads in team are dead.
2397 		acquire_sem_etc(team->death_sem, threadCount, 0, 0);
2398 		delete_sem(team->death_sem);
2399 	}
2400 
2401 	// If someone is waiting for this team to be loaded, but it dies
2402 	// unexpectedly before being done, we need to notify the waiting
2403 	// thread now.
2404 
2405 	state = disable_interrupts();
2406 	GRAB_TEAM_LOCK();
2407 
2408 	if (team->loading_info) {
2409 		// there's indeed someone waiting
2410 		struct team_loading_info *loadingInfo = team->loading_info;
2411 		team->loading_info = NULL;
2412 
2413 		loadingInfo->result = B_ERROR;
2414 		loadingInfo->done = true;
2415 
2416 		GRAB_THREAD_LOCK();
2417 
2418 		// wake up the waiting thread
2419 		if (loadingInfo->thread->state == B_THREAD_SUSPENDED)
2420 			scheduler_enqueue_in_run_queue(loadingInfo->thread);
2421 
2422 		RELEASE_THREAD_LOCK();
2423 	}
2424 
2425 	RELEASE_TEAM_LOCK();
2426 	restore_interrupts(state);
2427 
2428 	// notify team watchers
2429 
2430 	{
2431 		// we're not reachable from anyone anymore at this point, so we
2432 		// can safely access the list without any locking
2433 		struct team_watcher *watcher;
2434 		while ((watcher = (struct team_watcher*)list_remove_head_item(
2435 				&team->watcher_list)) != NULL) {
2436 			watcher->hook(teamID, watcher->data);
2437 			free(watcher);
2438 		}
2439 	}
2440 
2441 	sNotificationService.Notify(TEAM_REMOVED, team);
2442 
2443 	// free team resources
2444 
2445 	vfs_put_io_context(team->io_context);
2446 	delete_realtime_sem_context(team->realtime_sem_context);
2447 	xsi_sem_undo(team);
2448 	delete_owned_ports(teamID);
2449 	sem_delete_owned_sems(teamID);
2450 	remove_images(team);
2451 	vm_delete_address_space(team->address_space);
2452 
2453 	delete_team_struct(team);
2454 
2455 	// notify the debugger, that the team is gone
2456 	user_debug_team_deleted(teamID, debuggerPort);
2457 }
2458 
2459 
2460 struct team *
2461 team_get_kernel_team(void)
2462 {
2463 	return sKernelTeam;
2464 }
2465 
2466 
2467 team_id
2468 team_get_kernel_team_id(void)
2469 {
2470 	if (!sKernelTeam)
2471 		return 0;
2472 
2473 	return sKernelTeam->id;
2474 }
2475 
2476 
2477 team_id
2478 team_get_current_team_id(void)
2479 {
2480 	return thread_get_current_thread()->team->id;
2481 }
2482 
2483 
2484 status_t
2485 team_get_address_space(team_id id, vm_address_space **_addressSpace)
2486 {
2487 	cpu_status state;
2488 	struct team *team;
2489 	status_t status;
2490 
2491 	// ToDo: we need to do something about B_SYSTEM_TEAM vs. its real ID (1)
2492 	if (id == 1) {
2493 		// we're the kernel team, so we don't have to go through all
2494 		// the hassle (locking and hash lookup)
2495 		*_addressSpace = vm_get_kernel_address_space();
2496 		return B_OK;
2497 	}
2498 
2499 	state = disable_interrupts();
2500 	GRAB_TEAM_LOCK();
2501 
2502 	team = team_get_team_struct_locked(id);
2503 	if (team != NULL) {
2504 		atomic_add(&team->address_space->ref_count, 1);
2505 		*_addressSpace = team->address_space;
2506 		status = B_OK;
2507 	} else
2508 		status = B_BAD_VALUE;
2509 
2510 	RELEASE_TEAM_LOCK();
2511 	restore_interrupts(state);
2512 
2513 	return status;
2514 }
2515 
2516 
2517 /*!	Sets the team's job control state.
2518 	Interrupts must be disabled and the team lock be held.
2519 	\a threadsLocked indicates whether the thread lock is being held, too.
2520 */
2521 void
2522 team_set_job_control_state(struct team* team, job_control_state newState,
2523 	int signal, bool threadsLocked)
2524 {
2525 	if (team == NULL || team->job_control_entry == NULL)
2526 		return;
2527 
2528 	// don't touch anything, if the state stays the same or the team is already
2529 	// dead
2530 	job_control_entry* entry = team->job_control_entry;
2531 	if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
2532 		return;
2533 
2534 	T(SetJobControlState(team->id, newState, signal));
2535 
2536 	// remove from the old list
2537 	switch (entry->state) {
2538 		case JOB_CONTROL_STATE_NONE:
2539 			// entry is in no list ATM
2540 			break;
2541 		case JOB_CONTROL_STATE_DEAD:
2542 			// can't get here
2543 			break;
2544 		case JOB_CONTROL_STATE_STOPPED:
2545 			team->parent->stopped_children->entries.Remove(entry);
2546 			break;
2547 		case JOB_CONTROL_STATE_CONTINUED:
2548 			team->parent->continued_children->entries.Remove(entry);
2549 			break;
2550 	}
2551 
2552 	entry->state = newState;
2553 	entry->signal = signal;
2554 
2555 	// add to new list
2556 	team_job_control_children* childList = NULL;
2557 	switch (entry->state) {
2558 		case JOB_CONTROL_STATE_NONE:
2559 			// entry doesn't get into any list
2560 			break;
2561 		case JOB_CONTROL_STATE_DEAD:
2562 			childList = team->parent->dead_children;
2563 			team->parent->dead_children->count++;
2564 			break;
2565 		case JOB_CONTROL_STATE_STOPPED:
2566 			childList = team->parent->stopped_children;
2567 			break;
2568 		case JOB_CONTROL_STATE_CONTINUED:
2569 			childList = team->parent->continued_children;
2570 			break;
2571 	}
2572 
2573 	if (childList != NULL) {
2574 		childList->entries.Add(entry);
2575 		team->parent->dead_children->condition_variable.NotifyAll(
2576 			threadsLocked);
2577 	}
2578 }
2579 
2580 
2581 /*! Adds a hook to the team that is called as soon as this
2582 	team goes away.
2583 	This call might get public in the future.
2584 */
2585 status_t
2586 start_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2587 {
2588 	struct team_watcher *watcher;
2589 	struct team *team;
2590 	cpu_status state;
2591 
2592 	if (hook == NULL || teamID < B_OK)
2593 		return B_BAD_VALUE;
2594 
2595 	watcher = (struct team_watcher*)malloc(sizeof(struct team_watcher));
2596 	if (watcher == NULL)
2597 		return B_NO_MEMORY;
2598 
2599 	watcher->hook = hook;
2600 	watcher->data = data;
2601 
2602 	// find team and add watcher
2603 
2604 	state = disable_interrupts();
2605 	GRAB_TEAM_LOCK();
2606 
2607 	team = team_get_team_struct_locked(teamID);
2608 	if (team != NULL)
2609 		list_add_item(&team->watcher_list, watcher);
2610 
2611 	RELEASE_TEAM_LOCK();
2612 	restore_interrupts(state);
2613 
2614 	if (team == NULL) {
2615 		free(watcher);
2616 		return B_BAD_TEAM_ID;
2617 	}
2618 
2619 	return B_OK;
2620 }
2621 
2622 
2623 status_t
2624 stop_watching_team(team_id teamID, void (*hook)(team_id, void *), void *data)
2625 {
2626 	struct team_watcher *watcher = NULL;
2627 	struct team *team;
2628 	cpu_status state;
2629 
2630 	if (hook == NULL || teamID < B_OK)
2631 		return B_BAD_VALUE;
2632 
2633 	// find team and remove watcher (if present)
2634 
2635 	state = disable_interrupts();
2636 	GRAB_TEAM_LOCK();
2637 
2638 	team = team_get_team_struct_locked(teamID);
2639 	if (team != NULL) {
2640 		// search for watcher
2641 		while ((watcher = (struct team_watcher*)list_get_next_item(
2642 				&team->watcher_list, watcher)) != NULL) {
2643 			if (watcher->hook == hook && watcher->data == data) {
2644 				// got it!
2645 				list_remove_item(&team->watcher_list, watcher);
2646 				break;
2647 			}
2648 		}
2649 	}
2650 
2651 	RELEASE_TEAM_LOCK();
2652 	restore_interrupts(state);
2653 
2654 	if (watcher == NULL)
2655 		return B_ENTRY_NOT_FOUND;
2656 
2657 	free(watcher);
2658 	return B_OK;
2659 }
2660 
2661 
2662 /*!	The team lock must be held or the team must still be single threaded.
2663 */
2664 struct user_thread*
2665 team_allocate_user_thread(struct team* team)
2666 {
2667 	if (team->user_data == 0)
2668 		return NULL;
2669 
2670 	user_thread* thread = NULL;
2671 
2672 	// take an entry from the free list, if any
2673 	if (struct free_user_thread* entry = team->free_user_threads) {
2674 		thread = entry->thread;
2675 		team->free_user_threads = entry->next;
2676 		deferred_free(entry);
2677 		return thread;
2678 	} else {
2679 		// enough space left?
2680 		size_t needed = _ALIGN(sizeof(user_thread));
2681 		if (team->user_data_size - team->used_user_data < needed)
2682 			return NULL;
2683 		// TODO: This imposes a per team thread limit! We should resize the
2684 		// area, if necessary. That's problematic at this point, though, since
2685 		// we've got the team lock.
2686 
2687 		thread = (user_thread*)(team->user_data + team->used_user_data);
2688 		team->used_user_data += needed;
2689 	}
2690 
2691 	thread->defer_signals = 0;
2692 	thread->pending_signals = 0;
2693 	thread->wait_status = B_OK;
2694 
2695 	return thread;
2696 }
2697 
2698 
2699 /*!	The team lock must not be held. \a thread must be the current thread.
2700 */
2701 void
2702 team_free_user_thread(struct thread* thread)
2703 {
2704 	user_thread* userThread = thread->user_thread;
2705 	if (userThread == NULL)
2706 		return;
2707 
2708 	// create a free list entry
2709 	free_user_thread* entry
2710 		= (free_user_thread*)malloc(sizeof(free_user_thread));
2711 	if (entry == NULL) {
2712 		// we have to leak the user thread :-/
2713 		return;
2714 	}
2715 
2716 	InterruptsSpinLocker _(gTeamSpinlock);
2717 
2718 	// detach from thread
2719 	SpinLocker threadLocker(gThreadSpinlock);
2720 	thread->user_thread = NULL;
2721 	threadLocker.Unlock();
2722 
2723 	entry->thread = userThread;
2724 	entry->next = thread->team->free_user_threads;
2725 	thread->team->free_user_threads = entry;
2726 }
2727 
2728 
2729 //	#pragma mark - Public kernel API
2730 
2731 
2732 thread_id
2733 load_image(int32 argCount, const char **args, const char **env)
2734 {
2735 	return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
2736 		B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
2737 }
2738 
2739 
2740 thread_id
2741 load_image_etc(int32 argCount, const char* const* args,
2742 	const char* const* env, int32 priority, team_id parentID, uint32 flags)
2743 {
2744 	// we need to flatten the args and environment
2745 
2746 	if (args == NULL)
2747 		return B_BAD_VALUE;
2748 
2749 	// determine total needed size
2750 	int32 argSize = 0;
2751 	for (int32 i = 0; i < argCount; i++)
2752 		argSize += strlen(args[i]) + 1;
2753 
2754 	int32 envCount = 0;
2755 	int32 envSize = 0;
2756 	while (env != NULL && env[envCount] != NULL)
2757 		envSize += strlen(env[envCount++]) + 1;
2758 
2759 	int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
2760 	if (size > MAX_PROCESS_ARGS_SIZE)
2761 		return B_TOO_MANY_ARGS;
2762 
2763 	// allocate space
2764 	char** flatArgs = (char**)malloc(size);
2765 	if (flatArgs == NULL)
2766 		return B_NO_MEMORY;
2767 
2768 	char** slot = flatArgs;
2769 	char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
2770 
2771 	// copy arguments and environment
2772 	for (int32 i = 0; i < argCount; i++) {
2773 		int32 argSize = strlen(args[i]) + 1;
2774 		memcpy(stringSpace, args[i], argSize);
2775 		*slot++ = stringSpace;
2776 		stringSpace += argSize;
2777 	}
2778 
2779 	*slot++ = NULL;
2780 
2781 	for (int32 i = 0; i < envCount; i++) {
2782 		int32 envSize = strlen(env[i]) + 1;
2783 		memcpy(stringSpace, env[i], envSize);
2784 		*slot++ = stringSpace;
2785 		stringSpace += envSize;
2786 	}
2787 
2788 	*slot++ = NULL;
2789 
2790 	thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
2791 		B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
2792 
2793 	free(flatArgs);
2794 		// load_image_internal() unset our variable if it took over ownership
2795 
2796 	return thread;
2797 }
2798 
2799 
2800 status_t
2801 wait_for_team(team_id id, status_t *_returnCode)
2802 {
2803 	struct team *team;
2804 	thread_id thread;
2805 	cpu_status state;
2806 
2807 	// find main thread and wait for that
2808 
2809 	state = disable_interrupts();
2810 	GRAB_TEAM_LOCK();
2811 
2812 	team = team_get_team_struct_locked(id);
2813 	if (team != NULL && team->main_thread != NULL)
2814 		thread = team->main_thread->id;
2815 	else
2816 		thread = B_BAD_THREAD_ID;
2817 
2818 	RELEASE_TEAM_LOCK();
2819 	restore_interrupts(state);
2820 
2821 	if (thread < 0)
2822 		return thread;
2823 
2824 	return wait_for_thread(thread, _returnCode);
2825 }
2826 
2827 
2828 status_t
2829 kill_team(team_id id)
2830 {
2831 	status_t status = B_OK;
2832 	thread_id threadID = -1;
2833 	struct team *team;
2834 	cpu_status state;
2835 
2836 	state = disable_interrupts();
2837 	GRAB_TEAM_LOCK();
2838 
2839 	team = team_get_team_struct_locked(id);
2840 	if (team != NULL) {
2841 		if (team != sKernelTeam) {
2842 			threadID = team->id;
2843 				// the team ID is the same as the ID of its main thread
2844 		} else
2845 			status = B_NOT_ALLOWED;
2846 	} else
2847 		status = B_BAD_THREAD_ID;
2848 
2849 	RELEASE_TEAM_LOCK();
2850 	restore_interrupts(state);
2851 
2852 	if (status < B_OK)
2853 		return status;
2854 
2855 	// just kill the main thread in the team. The cleanup code there will
2856 	// take care of the team
2857 	return kill_thread(threadID);
2858 }
2859 
2860 
2861 status_t
2862 _get_team_info(team_id id, team_info *info, size_t size)
2863 {
2864 	cpu_status state;
2865 	status_t status = B_OK;
2866 	struct team *team;
2867 
2868 	state = disable_interrupts();
2869 	GRAB_TEAM_LOCK();
2870 
2871 	if (id == B_CURRENT_TEAM)
2872 		team = thread_get_current_thread()->team;
2873 	else
2874 		team = team_get_team_struct_locked(id);
2875 
2876 	if (team == NULL) {
2877 		status = B_BAD_TEAM_ID;
2878 		goto err;
2879 	}
2880 
2881 	status = fill_team_info(team, info, size);
2882 
2883 err:
2884 	RELEASE_TEAM_LOCK();
2885 	restore_interrupts(state);
2886 
2887 	return status;
2888 }
2889 
2890 
2891 status_t
2892 _get_next_team_info(int32 *cookie, team_info *info, size_t size)
2893 {
2894 	status_t status = B_BAD_TEAM_ID;
2895 	struct team *team = NULL;
2896 	int32 slot = *cookie;
2897 	team_id lastTeamID;
2898 	cpu_status state;
2899 
2900 	if (slot < 1)
2901 		slot = 1;
2902 
2903 	state = disable_interrupts();
2904 	GRAB_TEAM_LOCK();
2905 
2906 	lastTeamID = peek_next_thread_id();
2907 	if (slot >= lastTeamID)
2908 		goto err;
2909 
2910 	// get next valid team
2911 	while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
2912 		slot++;
2913 
2914 	if (team) {
2915 		status = fill_team_info(team, info, size);
2916 		*cookie = ++slot;
2917 	}
2918 
2919 err:
2920 	RELEASE_TEAM_LOCK();
2921 	restore_interrupts(state);
2922 
2923 	return status;
2924 }
2925 
2926 
2927 status_t
2928 _get_team_usage_info(team_id id, int32 who, team_usage_info *info, size_t size)
2929 {
2930 	bigtime_t kernelTime = 0, userTime = 0;
2931 	status_t status = B_OK;
2932 	struct team *team;
2933 	cpu_status state;
2934 
2935 	if (size != sizeof(team_usage_info)
2936 		|| (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN))
2937 		return B_BAD_VALUE;
2938 
2939 	state = disable_interrupts();
2940 	GRAB_TEAM_LOCK();
2941 
2942 	if (id == B_CURRENT_TEAM)
2943 		team = thread_get_current_thread()->team;
2944 	else
2945 		team = team_get_team_struct_locked(id);
2946 
2947 	if (team == NULL) {
2948 		status = B_BAD_TEAM_ID;
2949 		goto out;
2950 	}
2951 
2952 	switch (who) {
2953 		case B_TEAM_USAGE_SELF:
2954 		{
2955 			struct thread *thread = team->thread_list;
2956 
2957 			for (; thread != NULL; thread = thread->team_next) {
2958 				kernelTime += thread->kernel_time;
2959 				userTime += thread->user_time;
2960 			}
2961 
2962 			kernelTime += team->dead_threads_kernel_time;
2963 			userTime += team->dead_threads_user_time;
2964 			break;
2965 		}
2966 
2967 		case B_TEAM_USAGE_CHILDREN:
2968 		{
2969 			struct team *child = team->children;
2970 			for (; child != NULL; child = child->siblings_next) {
2971 				struct thread *thread = team->thread_list;
2972 
2973 				for (; thread != NULL; thread = thread->team_next) {
2974 					kernelTime += thread->kernel_time;
2975 					userTime += thread->user_time;
2976 				}
2977 
2978 				kernelTime += child->dead_threads_kernel_time;
2979 				userTime += child->dead_threads_user_time;
2980 			}
2981 
2982 			kernelTime += team->dead_children->kernel_time;
2983 			userTime += team->dead_children->user_time;
2984 			break;
2985 		}
2986 	}
2987 
2988 out:
2989 	RELEASE_TEAM_LOCK();
2990 	restore_interrupts(state);
2991 
2992 	if (status == B_OK) {
2993 		info->kernel_time = kernelTime;
2994 		info->user_time = userTime;
2995 	}
2996 
2997 	return status;
2998 }
2999 
3000 
3001 pid_t
3002 getpid(void)
3003 {
3004 	return thread_get_current_thread()->team->id;
3005 }
3006 
3007 
3008 pid_t
3009 getppid(void)
3010 {
3011 	struct team *team = thread_get_current_thread()->team;
3012 	cpu_status state;
3013 	pid_t parent;
3014 
3015 	state = disable_interrupts();
3016 	GRAB_TEAM_LOCK();
3017 
3018 	parent = team->parent->id;
3019 
3020 	RELEASE_TEAM_LOCK();
3021 	restore_interrupts(state);
3022 
3023 	return parent;
3024 }
3025 
3026 
3027 pid_t
3028 getpgid(pid_t process)
3029 {
3030 	struct thread *thread;
3031 	pid_t result = -1;
3032 	cpu_status state;
3033 
3034 	if (process == 0)
3035 		process = thread_get_current_thread()->team->id;
3036 
3037 	state = disable_interrupts();
3038 	GRAB_THREAD_LOCK();
3039 
3040 	thread = thread_get_thread_struct_locked(process);
3041 	if (thread != NULL)
3042 		result = thread->team->group_id;
3043 
3044 	RELEASE_THREAD_LOCK();
3045 	restore_interrupts(state);
3046 
3047 	return thread != NULL ? result : B_BAD_VALUE;
3048 }
3049 
3050 
3051 pid_t
3052 getsid(pid_t process)
3053 {
3054 	struct thread *thread;
3055 	pid_t result = -1;
3056 	cpu_status state;
3057 
3058 	if (process == 0)
3059 		process = thread_get_current_thread()->team->id;
3060 
3061 	state = disable_interrupts();
3062 	GRAB_THREAD_LOCK();
3063 
3064 	thread = thread_get_thread_struct_locked(process);
3065 	if (thread != NULL)
3066 		result = thread->team->session_id;
3067 
3068 	RELEASE_THREAD_LOCK();
3069 	restore_interrupts(state);
3070 
3071 	return thread != NULL ? result : B_BAD_VALUE;
3072 }
3073 
3074 
3075 //	#pragma mark - User syscalls
3076 
3077 
3078 status_t
3079 _user_exec(const char *userPath, const char* const* userFlatArgs,
3080 	size_t flatArgsSize, int32 argCount, int32 envCount)
3081 {
3082 	// NOTE: Since this function normally doesn't return, don't use automatic
3083 	// variables that need destruction in the function scope.
3084 	char path[B_PATH_NAME_LENGTH];
3085 
3086 	if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
3087 		|| user_strlcpy(path, userPath, sizeof(path)) < B_OK)
3088 		return B_BAD_ADDRESS;
3089 
3090 	// copy and relocate the flat arguments
3091 	char** flatArgs;
3092 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3093 		argCount, envCount, flatArgs);
3094 
3095 	if (error == B_OK) {
3096 		error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
3097 			envCount);
3098 			// this one only returns in case of error
3099 	}
3100 
3101 	free(flatArgs);
3102 	return error;
3103 }
3104 
3105 
3106 thread_id
3107 _user_fork(void)
3108 {
3109 	return fork_team();
3110 }
3111 
3112 
3113 thread_id
3114 _user_wait_for_child(thread_id child, uint32 flags, int32 *_userReason, status_t *_userReturnCode)
3115 {
3116 	status_t returnCode;
3117 	int32 reason;
3118 	thread_id deadChild;
3119 
3120 	if ((_userReason != NULL && !IS_USER_ADDRESS(_userReason))
3121 		|| (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)))
3122 		return B_BAD_ADDRESS;
3123 
3124 	deadChild = wait_for_child(child, flags, &reason, &returnCode);
3125 
3126 	if (deadChild >= B_OK) {
3127 		// copy result data on successful completion
3128 		if ((_userReason != NULL
3129 				&& user_memcpy(_userReason, &reason, sizeof(int32)) < B_OK)
3130 			|| (_userReturnCode != NULL
3131 				&& user_memcpy(_userReturnCode, &returnCode, sizeof(status_t))
3132 					< B_OK)) {
3133 			return B_BAD_ADDRESS;
3134 		}
3135 
3136 		return deadChild;
3137 	}
3138 
3139 	return syscall_restart_handle_post(deadChild);
3140 }
3141 
3142 
3143 pid_t
3144 _user_process_info(pid_t process, int32 which)
3145 {
3146 	// we only allow to return the parent of the current process
3147 	if (which == PARENT_ID
3148 		&& process != 0 && process != thread_get_current_thread()->team->id)
3149 		return B_BAD_VALUE;
3150 
3151 	switch (which) {
3152 		case SESSION_ID:
3153 			return getsid(process);
3154 		case GROUP_ID:
3155 			return getpgid(process);
3156 		case PARENT_ID:
3157 			return getppid();
3158 	}
3159 
3160 	return B_BAD_VALUE;
3161 }
3162 
3163 
3164 pid_t
3165 _user_setpgid(pid_t processID, pid_t groupID)
3166 {
3167 	struct thread *thread = thread_get_current_thread();
3168 	struct team *currentTeam = thread->team;
3169 	struct team *team;
3170 
3171 	if (groupID < 0)
3172 		return B_BAD_VALUE;
3173 
3174 	if (processID == 0)
3175 		processID = currentTeam->id;
3176 
3177 	// if the group ID is not specified, use the target process' ID
3178 	if (groupID == 0)
3179 		groupID = processID;
3180 
3181 	if (processID == currentTeam->id) {
3182 		// we set our own group
3183 
3184 		// we must not change our process group ID if we're a session leader
3185 		if (is_session_leader(currentTeam))
3186 			return B_NOT_ALLOWED;
3187 	} else {
3188 		// another team is the target of the call -- check it out
3189 		InterruptsSpinLocker _(gTeamSpinlock);
3190 
3191 		team = team_get_team_struct_locked(processID);
3192 		if (team == NULL)
3193 			return ESRCH;
3194 
3195 		// The team must be a child of the calling team and in the same session.
3196 		// (If that's the case it isn't a session leader either.)
3197 		if (team->parent != currentTeam
3198 			|| team->session_id != currentTeam->session_id) {
3199 			return B_NOT_ALLOWED;
3200 		}
3201 
3202 		if (team->group_id == groupID)
3203 			return groupID;
3204 
3205 		// The call is also supposed to fail on a child, when the child already
3206 		// has executed exec*() [EACCES].
3207 		if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
3208 			return EACCES;
3209 	}
3210 
3211 	struct process_group *group = NULL;
3212 	if (groupID == processID) {
3213 		// A new process group might be needed.
3214 		group = create_process_group(groupID);
3215 		if (group == NULL)
3216 			return B_NO_MEMORY;
3217 
3218 		// Assume orphaned. We consider the situation of the team's parent
3219 		// below.
3220 		group->orphaned = true;
3221 	}
3222 
3223 	status_t status = B_OK;
3224 	struct process_group *freeGroup = NULL;
3225 
3226 	InterruptsSpinLocker locker(gTeamSpinlock);
3227 
3228 	team = team_get_team_struct_locked(processID);
3229 	if (team != NULL) {
3230 		// check the conditions again -- they might have changed in the meantime
3231 		if (is_session_leader(team)
3232 			|| team->session_id != currentTeam->session_id) {
3233 			status = B_NOT_ALLOWED;
3234 		} else if (team != currentTeam
3235 				&& (team->flags & TEAM_FLAG_EXEC_DONE) != 0) {
3236 			status = EACCES;
3237 		} else if (team->group_id == groupID) {
3238 			// the team is already in the desired process group
3239 			freeGroup = group;
3240 		} else {
3241 			// Check if a process group with the requested ID already exists.
3242 			struct process_group *targetGroup
3243 				= team_get_process_group_locked(team->group->session, groupID);
3244 			if (targetGroup != NULL) {
3245 				// In case of processID == groupID we have to free the
3246 				// allocated group.
3247 				freeGroup = group;
3248 			} else if (processID == groupID) {
3249 				// We created a new process group, let us insert it into the
3250 				// team's session.
3251 				insert_group_into_session(team->group->session, group);
3252 				targetGroup = group;
3253 			}
3254 
3255 			if (targetGroup != NULL) {
3256 				// we got a group, let's move the team there
3257 				process_group* oldGroup = team->group;
3258 
3259 				remove_team_from_group(team);
3260 				insert_team_into_group(targetGroup, team);
3261 
3262 				// Update the "orphaned" flag of all potentially affected
3263 				// groups.
3264 
3265 				// the team's old group
3266 				if (oldGroup->teams != NULL) {
3267 					oldGroup->orphaned = false;
3268 					update_orphaned_process_group(oldGroup, -1);
3269 				}
3270 
3271 				// the team's new group
3272 				struct team* parent = team->parent;
3273 				targetGroup->orphaned &= parent == NULL
3274 					|| parent->group == targetGroup
3275 					|| team->parent->session_id != team->session_id;
3276 
3277 				// children's groups
3278 				struct team* child = team->children;
3279 				while (child != NULL) {
3280 					child->group->orphaned = false;
3281 					update_orphaned_process_group(child->group, -1);
3282 
3283 					child = child->siblings_next;
3284 				}
3285 			} else
3286 				status = B_NOT_ALLOWED;
3287 		}
3288 	} else
3289 		status = B_NOT_ALLOWED;
3290 
3291 	// Changing the process group might have changed the situation for a parent
3292 	// waiting in wait_for_child(). Hence we notify it.
3293 	if (status == B_OK)
3294 		team->parent->dead_children->condition_variable.NotifyAll(false);
3295 
3296 	locker.Unlock();
3297 
3298 	if (status != B_OK) {
3299 		// in case of error, the group hasn't been added into the hash
3300 		team_delete_process_group(group);
3301 	}
3302 
3303 	team_delete_process_group(freeGroup);
3304 
3305 	return status == B_OK ? groupID : status;
3306 }
3307 
3308 
3309 pid_t
3310 _user_setsid(void)
3311 {
3312 	struct team *team = thread_get_current_thread()->team;
3313 	struct process_session *session;
3314 	struct process_group *group;
3315 	cpu_status state;
3316 	bool failed = false;
3317 
3318 	// the team must not already be a process group leader
3319 	if (is_process_group_leader(team))
3320 		return B_NOT_ALLOWED;
3321 
3322 	group = create_process_group(team->id);
3323 	if (group == NULL)
3324 		return B_NO_MEMORY;
3325 
3326 	session = create_process_session(group->id);
3327 	if (session == NULL) {
3328 		team_delete_process_group(group);
3329 		return B_NO_MEMORY;
3330 	}
3331 
3332 	state = disable_interrupts();
3333 	GRAB_TEAM_LOCK();
3334 
3335 	// this may have changed since the check above
3336 	if (!is_process_group_leader(team)) {
3337 		remove_team_from_group(team);
3338 
3339 		insert_group_into_session(session, group);
3340 		insert_team_into_group(group, team);
3341 	} else
3342 		failed = true;
3343 
3344 	RELEASE_TEAM_LOCK();
3345 	restore_interrupts(state);
3346 
3347 	if (failed) {
3348 		team_delete_process_group(group);
3349 		free(session);
3350 		return B_NOT_ALLOWED;
3351 	}
3352 
3353 	return team->group_id;
3354 }
3355 
3356 
3357 status_t
3358 _user_wait_for_team(team_id id, status_t *_userReturnCode)
3359 {
3360 	status_t returnCode;
3361 	status_t status;
3362 
3363 	if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
3364 		return B_BAD_ADDRESS;
3365 
3366 	status = wait_for_team(id, &returnCode);
3367 	if (status >= B_OK && _userReturnCode != NULL) {
3368 		if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) < B_OK)
3369 			return B_BAD_ADDRESS;
3370 		return B_OK;
3371 	}
3372 
3373 	return syscall_restart_handle_post(status);
3374 }
3375 
3376 
3377 thread_id
3378 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
3379 	int32 argCount, int32 envCount, int32 priority, uint32 flags,
3380 	port_id errorPort, uint32 errorToken)
3381 {
3382 	TRACE(("_user_load_image: argc = %ld\n", argCount));
3383 
3384 	if (argCount < 1)
3385 		return B_BAD_VALUE;
3386 
3387 	// copy and relocate the flat arguments
3388 	char** flatArgs;
3389 	status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
3390 		argCount, envCount, flatArgs);
3391 	if (error != B_OK)
3392 		return error;
3393 
3394 	thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
3395 		argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
3396 		errorToken);
3397 
3398 	free(flatArgs);
3399 		// load_image_internal() unset our variable if it took over ownership
3400 
3401 	return thread;
3402 }
3403 
3404 
3405 void
3406 _user_exit_team(status_t returnValue)
3407 {
3408 	struct thread *thread = thread_get_current_thread();
3409 
3410 	thread->exit.status = returnValue;
3411 	thread->exit.reason = THREAD_RETURN_EXIT;
3412 
3413 	send_signal(thread->id, SIGKILL);
3414 }
3415 
3416 
3417 status_t
3418 _user_kill_team(team_id team)
3419 {
3420 	return kill_team(team);
3421 }
3422 
3423 
3424 status_t
3425 _user_get_team_info(team_id id, team_info *userInfo)
3426 {
3427 	status_t status;
3428 	team_info info;
3429 
3430 	if (!IS_USER_ADDRESS(userInfo))
3431 		return B_BAD_ADDRESS;
3432 
3433 	status = _get_team_info(id, &info, sizeof(team_info));
3434 	if (status == B_OK) {
3435 		if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3436 			return B_BAD_ADDRESS;
3437 	}
3438 
3439 	return status;
3440 }
3441 
3442 
3443 status_t
3444 _user_get_next_team_info(int32 *userCookie, team_info *userInfo)
3445 {
3446 	status_t status;
3447 	team_info info;
3448 	int32 cookie;
3449 
3450 	if (!IS_USER_ADDRESS(userCookie)
3451 		|| !IS_USER_ADDRESS(userInfo)
3452 		|| user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3453 		return B_BAD_ADDRESS;
3454 
3455 	status = _get_next_team_info(&cookie, &info, sizeof(team_info));
3456 	if (status != B_OK)
3457 		return status;
3458 
3459 	if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3460 		|| user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK)
3461 		return B_BAD_ADDRESS;
3462 
3463 	return status;
3464 }
3465 
3466 
3467 team_id
3468 _user_get_current_team(void)
3469 {
3470 	return team_get_current_team_id();
3471 }
3472 
3473 
3474 status_t
3475 _user_get_team_usage_info(team_id team, int32 who, team_usage_info *userInfo, size_t size)
3476 {
3477 	team_usage_info info;
3478 	status_t status;
3479 
3480 	if (!IS_USER_ADDRESS(userInfo))
3481 		return B_BAD_ADDRESS;
3482 
3483 	status = _get_team_usage_info(team, who, &info, size);
3484 	if (status != B_OK)
3485 		return status;
3486 
3487 	if (user_memcpy(userInfo, &info, size) < B_OK)
3488 		return B_BAD_ADDRESS;
3489 
3490 	return status;
3491 }
3492 
3493