1 /*
2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12 /*! Team functions */
13
14
15 #include <team.h>
16
17 #include <errno.h>
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <string.h>
21 #include <sys/wait.h>
22
23 #include <OS.h>
24
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
27
28 #include <extended_system_info_defs.h>
29
30 #include <commpage.h>
31 #include <boot_device.h>
32 #include <elf.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
35 #include <fs/KPath.h>
36 #include <heap.h>
37 #include <int.h>
38 #include <kernel.h>
39 #include <kimage.h>
40 #include <kscheduler.h>
41 #include <ksignal.h>
42 #include <Notifications.h>
43 #include <port.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
46 #include <safemode.h>
47 #include <sem.h>
48 #include <syscall_process_info.h>
49 #include <syscall_load_image.h>
50 #include <syscall_restart.h>
51 #include <syscalls.h>
52 #include <tls.h>
53 #include <tracing.h>
54 #include <user_mutex.h>
55 #include <user_runtime.h>
56 #include <user_thread.h>
57 #include <usergroup.h>
58 #include <vfs.h>
59 #include <vm/vm.h>
60 #include <vm/VMAddressSpace.h>
61 #include <util/AutoLock.h>
62 #include <util/ThreadAutoLock.h>
63
64 #include "TeamThreadTables.h"
65
66
67 //#define TRACE_TEAM
68 #ifdef TRACE_TEAM
69 # define TRACE(x) dprintf x
70 #else
71 # define TRACE(x) ;
72 #endif
73
74
75 struct team_key {
76 team_id id;
77 };
78
79 struct team_arg {
80 char *path;
81 char **flat_args;
82 size_t flat_args_size;
83 uint32 arg_count;
84 uint32 env_count;
85 mode_t umask;
86 uint32 flags;
87 port_id error_port;
88 uint32 error_token;
89 };
90
91 #define TEAM_ARGS_FLAG_NO_ASLR 0x01
92
93
94 namespace {
95
96
97 class TeamNotificationService : public DefaultNotificationService {
98 public:
99 TeamNotificationService();
100
101 void Notify(uint32 eventCode, Team* team);
102 };
103
104
105 // #pragma mark - TeamTable
106
107
108 typedef BKernel::TeamThreadTable<Team> TeamTable;
109
110
111 // #pragma mark - ProcessGroupHashDefinition
112
113
114 struct ProcessGroupHashDefinition {
115 typedef pid_t KeyType;
116 typedef ProcessGroup ValueType;
117
HashKey__anon6ae993530111::ProcessGroupHashDefinition118 size_t HashKey(pid_t key) const
119 {
120 return key;
121 }
122
Hash__anon6ae993530111::ProcessGroupHashDefinition123 size_t Hash(ProcessGroup* value) const
124 {
125 return HashKey(value->id);
126 }
127
Compare__anon6ae993530111::ProcessGroupHashDefinition128 bool Compare(pid_t key, ProcessGroup* value) const
129 {
130 return value->id == key;
131 }
132
GetLink__anon6ae993530111::ProcessGroupHashDefinition133 ProcessGroup*& GetLink(ProcessGroup* value) const
134 {
135 return value->next;
136 }
137 };
138
139 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable;
140
141
142 } // unnamed namespace
143
144
145 // #pragma mark -
146
147
148 // the team_id -> Team hash table and the lock protecting it
149 static TeamTable sTeamHash;
150 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER;
151
152 // the pid_t -> ProcessGroup hash table and the lock protecting it
153 static ProcessGroupHashTable sGroupHash;
154 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER;
155
156 static Team* sKernelTeam = NULL;
157 static bool sDisableUserAddOns = false;
158
159 // A list of process groups of children of dying session leaders that need to
160 // be signalled, if they have become orphaned and contain stopped processes.
161 static ProcessGroupList sOrphanedCheckProcessGroups;
162 static mutex sOrphanedCheckLock
163 = MUTEX_INITIALIZER("orphaned process group check");
164
165 // some arbitrarily chosen limits -- should probably depend on the available
166 // memory (the limit is not yet enforced)
167 static int32 sMaxTeams = 2048;
168 static int32 sUsedTeams = 1;
169
170 static TeamNotificationService sNotificationService;
171
172 static const size_t kTeamUserDataReservedSize = 128 * B_PAGE_SIZE;
173 static const size_t kTeamUserDataInitialSize = 4 * B_PAGE_SIZE;
174
175
176 // #pragma mark - TeamListIterator
177
178
TeamListIterator()179 TeamListIterator::TeamListIterator()
180 {
181 // queue the entry
182 InterruptsWriteSpinLocker locker(sTeamHashLock);
183 sTeamHash.InsertIteratorEntry(&fEntry);
184 }
185
186
~TeamListIterator()187 TeamListIterator::~TeamListIterator()
188 {
189 // remove the entry
190 InterruptsWriteSpinLocker locker(sTeamHashLock);
191 sTeamHash.RemoveIteratorEntry(&fEntry);
192 }
193
194
195 Team*
Next()196 TeamListIterator::Next()
197 {
198 // get the next team -- if there is one, get reference for it
199 InterruptsWriteSpinLocker locker(sTeamHashLock);
200 Team* team = sTeamHash.NextElement(&fEntry);
201 if (team != NULL)
202 team->AcquireReference();
203
204 return team;
205 }
206
207
208 // #pragma mark - Tracing
209
210
211 #if TEAM_TRACING
212 namespace TeamTracing {
213
214 class TeamForked : public AbstractTraceEntry {
215 public:
TeamForked(thread_id forkedThread)216 TeamForked(thread_id forkedThread)
217 :
218 fForkedThread(forkedThread)
219 {
220 Initialized();
221 }
222
AddDump(TraceOutput & out)223 virtual void AddDump(TraceOutput& out)
224 {
225 out.Print("team forked, new thread %" B_PRId32, fForkedThread);
226 }
227
228 private:
229 thread_id fForkedThread;
230 };
231
232
233 class ExecTeam : public AbstractTraceEntry {
234 public:
ExecTeam(const char * path,int32 argCount,const char * const * args,int32 envCount,const char * const * env)235 ExecTeam(const char* path, int32 argCount, const char* const* args,
236 int32 envCount, const char* const* env)
237 :
238 fArgCount(argCount),
239 fArgs(NULL)
240 {
241 fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH,
242 false);
243
244 // determine the buffer size we need for the args
245 size_t argBufferSize = 0;
246 for (int32 i = 0; i < argCount; i++)
247 argBufferSize += strlen(args[i]) + 1;
248
249 // allocate a buffer
250 fArgs = (char*)alloc_tracing_buffer(argBufferSize);
251 if (fArgs) {
252 char* buffer = fArgs;
253 for (int32 i = 0; i < argCount; i++) {
254 size_t argSize = strlen(args[i]) + 1;
255 memcpy(buffer, args[i], argSize);
256 buffer += argSize;
257 }
258 }
259
260 // ignore env for the time being
261 (void)envCount;
262 (void)env;
263
264 Initialized();
265 }
266
AddDump(TraceOutput & out)267 virtual void AddDump(TraceOutput& out)
268 {
269 out.Print("team exec, \"%p\", args:", fPath);
270
271 if (fArgs != NULL) {
272 char* args = fArgs;
273 for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) {
274 out.Print(" \"%s\"", args);
275 args += strlen(args) + 1;
276 }
277 } else
278 out.Print(" <too long>");
279 }
280
281 private:
282 char* fPath;
283 int32 fArgCount;
284 char* fArgs;
285 };
286
287
288 static const char*
job_control_state_name(job_control_state state)289 job_control_state_name(job_control_state state)
290 {
291 switch (state) {
292 case JOB_CONTROL_STATE_NONE:
293 return "none";
294 case JOB_CONTROL_STATE_STOPPED:
295 return "stopped";
296 case JOB_CONTROL_STATE_CONTINUED:
297 return "continued";
298 case JOB_CONTROL_STATE_DEAD:
299 return "dead";
300 default:
301 return "invalid";
302 }
303 }
304
305
306 class SetJobControlState : public AbstractTraceEntry {
307 public:
SetJobControlState(team_id team,job_control_state newState,Signal * signal)308 SetJobControlState(team_id team, job_control_state newState, Signal* signal)
309 :
310 fTeam(team),
311 fNewState(newState),
312 fSignal(signal != NULL ? signal->Number() : 0)
313 {
314 Initialized();
315 }
316
AddDump(TraceOutput & out)317 virtual void AddDump(TraceOutput& out)
318 {
319 out.Print("team set job control state, team %" B_PRId32 ", "
320 "new state: %s, signal: %d",
321 fTeam, job_control_state_name(fNewState), fSignal);
322 }
323
324 private:
325 team_id fTeam;
326 job_control_state fNewState;
327 int fSignal;
328 };
329
330
331 class WaitForChild : public AbstractTraceEntry {
332 public:
WaitForChild(pid_t child,uint32 flags)333 WaitForChild(pid_t child, uint32 flags)
334 :
335 fChild(child),
336 fFlags(flags)
337 {
338 Initialized();
339 }
340
AddDump(TraceOutput & out)341 virtual void AddDump(TraceOutput& out)
342 {
343 out.Print("team wait for child, child: %" B_PRId32 ", "
344 "flags: %#" B_PRIx32, fChild, fFlags);
345 }
346
347 private:
348 pid_t fChild;
349 uint32 fFlags;
350 };
351
352
353 class WaitForChildDone : public AbstractTraceEntry {
354 public:
WaitForChildDone(const job_control_entry & entry)355 WaitForChildDone(const job_control_entry& entry)
356 :
357 fState(entry.state),
358 fTeam(entry.thread),
359 fStatus(entry.status),
360 fReason(entry.reason),
361 fSignal(entry.signal)
362 {
363 Initialized();
364 }
365
WaitForChildDone(status_t error)366 WaitForChildDone(status_t error)
367 :
368 fTeam(error)
369 {
370 Initialized();
371 }
372
AddDump(TraceOutput & out)373 virtual void AddDump(TraceOutput& out)
374 {
375 if (fTeam >= 0) {
376 out.Print("team wait for child done, team: %" B_PRId32 ", "
377 "state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n",
378 fTeam, job_control_state_name(fState), fStatus, fReason,
379 fSignal);
380 } else {
381 out.Print("team wait for child failed, error: "
382 "%#" B_PRIx32 ", ", fTeam);
383 }
384 }
385
386 private:
387 job_control_state fState;
388 team_id fTeam;
389 status_t fStatus;
390 uint16 fReason;
391 uint16 fSignal;
392 };
393
394 } // namespace TeamTracing
395
396 # define T(x) new(std::nothrow) TeamTracing::x;
397 #else
398 # define T(x) ;
399 #endif
400
401
402 // #pragma mark - TeamNotificationService
403
404
TeamNotificationService()405 TeamNotificationService::TeamNotificationService()
406 : DefaultNotificationService("teams")
407 {
408 }
409
410
411 void
Notify(uint32 eventCode,Team * team)412 TeamNotificationService::Notify(uint32 eventCode, Team* team)
413 {
414 char eventBuffer[128];
415 KMessage event;
416 event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR);
417 event.AddInt32("event", eventCode);
418 event.AddInt32("team", team->id);
419 event.AddPointer("teamStruct", team);
420
421 DefaultNotificationService::Notify(event, eventCode);
422 }
423
424
425 // #pragma mark - Team
426
427
Team(team_id id,bool kernel)428 Team::Team(team_id id, bool kernel)
429 {
430 // allocate an ID
431 this->id = id;
432 visible = true;
433
434 hash_next = siblings_next = parent = children = group_next = NULL;
435 serial_number = -1;
436
437 group_id = session_id = -1;
438 group = NULL;
439
440 num_threads = 0;
441 state = TEAM_STATE_BIRTH;
442 flags = 0;
443 io_context = NULL;
444 user_mutex_context = NULL;
445 realtime_sem_context = NULL;
446 xsi_sem_context = NULL;
447 death_entry = NULL;
448 list_init(&dead_threads);
449
450 dead_children.condition_variable.Init(&dead_children, "team children");
451 dead_children.count = 0;
452 dead_children.kernel_time = 0;
453 dead_children.user_time = 0;
454
455 job_control_entry = new(nothrow) ::job_control_entry;
456 if (job_control_entry != NULL) {
457 job_control_entry->state = JOB_CONTROL_STATE_NONE;
458 job_control_entry->thread = id;
459 job_control_entry->team = this;
460 }
461
462 address_space = NULL;
463 main_thread = NULL;
464 thread_list = NULL;
465 loading_info = NULL;
466
467 list_init(&image_list);
468 list_init(&watcher_list);
469 list_init(&sem_list);
470 list_init_etc(&port_list, port_team_link_offset());
471
472 user_data = 0;
473 user_data_area = -1;
474 used_user_data = 0;
475 user_data_size = 0;
476 free_user_threads = NULL;
477
478 commpage_address = NULL;
479
480 clear_team_debug_info(&debug_info, true);
481
482 dead_threads_kernel_time = 0;
483 dead_threads_user_time = 0;
484 cpu_clock_offset = 0;
485 B_INITIALIZE_SPINLOCK(&time_lock);
486
487 saved_set_uid = real_uid = effective_uid = -1;
488 saved_set_gid = real_gid = effective_gid = -1;
489
490 // exit status -- setting initialized to false suffices
491 exit.initialized = false;
492
493 B_INITIALIZE_SPINLOCK(&signal_lock);
494
495 // init mutex
496 if (kernel) {
497 mutex_init(&fLock, "Team:kernel");
498 } else {
499 char lockName[16];
500 snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id);
501 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
502 }
503
504 fName[0] = '\0';
505 fArgs[0] = '\0';
506
507 fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter(
508 kernel ? -1 : MAX_QUEUED_SIGNALS);
509 memset(fSignalActions, 0, sizeof(fSignalActions));
510 fUserDefinedTimerCount = 0;
511
512 fCoreDumpCondition = NULL;
513 }
514
515
~Team()516 Team::~Team()
517 {
518 // get rid of all associated data
519 PrepareForDeletion();
520
521 if (io_context != NULL)
522 vfs_put_io_context(io_context);
523 delete_owned_ports(this);
524 sem_delete_owned_sems(this);
525
526 DeleteUserTimers(false);
527
528 fPendingSignals.Clear();
529
530 if (fQueuedSignalsCounter != NULL)
531 fQueuedSignalsCounter->ReleaseReference();
532
533 while (thread_death_entry* threadDeathEntry
534 = (thread_death_entry*)list_remove_head_item(&dead_threads)) {
535 free(threadDeathEntry);
536 }
537
538 while (::job_control_entry* entry = dead_children.entries.RemoveHead())
539 delete entry;
540
541 while (free_user_thread* entry = free_user_threads) {
542 free_user_threads = entry->next;
543 free(entry);
544 }
545
546 delete job_control_entry;
547 // usually already NULL and transferred to the parent
548
549 mutex_destroy(&fLock);
550 }
551
552
553 /*static*/ Team*
Create(team_id id,const char * name,bool kernel)554 Team::Create(team_id id, const char* name, bool kernel)
555 {
556 // create the team object
557 Team* team = new(std::nothrow) Team(id, kernel);
558 if (team == NULL)
559 return NULL;
560 ObjectDeleter<Team> teamDeleter(team);
561
562 if (name != NULL)
563 team->SetName(name);
564
565 // check initialization
566 if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL)
567 return NULL;
568
569 // finish initialization (arch specifics)
570 if (arch_team_init_team_struct(team, kernel) != B_OK)
571 return NULL;
572
573 if (!kernel) {
574 status_t error = user_timer_create_team_timers(team);
575 if (error != B_OK)
576 return NULL;
577 }
578
579 team->start_time = system_time();
580
581 // everything went fine
582 return teamDeleter.Detach();
583 }
584
585
586 /*! \brief Returns the team with the given ID.
587 Returns a reference to the team.
588 Team and thread spinlock must not be held.
589 */
590 /*static*/ Team*
Get(team_id id)591 Team::Get(team_id id)
592 {
593 if (id == B_CURRENT_TEAM) {
594 Team* team = thread_get_current_thread()->team;
595 team->AcquireReference();
596 return team;
597 }
598
599 InterruptsReadSpinLocker locker(sTeamHashLock);
600 Team* team = sTeamHash.Lookup(id);
601 if (team != NULL)
602 team->AcquireReference();
603 return team;
604 }
605
606
607 /*! \brief Returns the team with the given ID in a locked state.
608 Returns a reference to the team.
609 Team and thread spinlock must not be held.
610 */
611 /*static*/ Team*
GetAndLock(team_id id)612 Team::GetAndLock(team_id id)
613 {
614 // get the team
615 Team* team = Get(id);
616 if (team == NULL)
617 return NULL;
618
619 // lock it
620 team->Lock();
621
622 // only return the team, when it isn't already dying
623 if (team->state >= TEAM_STATE_SHUTDOWN) {
624 team->Unlock();
625 team->ReleaseReference();
626 return NULL;
627 }
628
629 return team;
630 }
631
632
633 /*! Locks the team and its parent team (if any).
634 The caller must hold a reference to the team or otherwise make sure that
635 it won't be deleted.
636 If the team doesn't have a parent, only the team itself is locked. If the
637 team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
638 only the team itself is locked.
639
640 \param dontLockParentIfKernel If \c true, the team's parent team is only
641 locked, if it is not the kernel team.
642 */
643 void
LockTeamAndParent(bool dontLockParentIfKernel)644 Team::LockTeamAndParent(bool dontLockParentIfKernel)
645 {
646 // The locking order is parent -> child. Since the parent can change as long
647 // as we don't lock the team, we need to do a trial and error loop.
648 Lock();
649
650 while (true) {
651 // If the team doesn't have a parent, we're done. Otherwise try to lock
652 // the parent.This will succeed in most cases, simplifying things.
653 Team* parent = this->parent;
654 if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam)
655 || parent->TryLock()) {
656 return;
657 }
658
659 // get a temporary reference to the parent, unlock this team, lock the
660 // parent, and re-lock this team
661 BReference<Team> parentReference(parent);
662
663 Unlock();
664 parent->Lock();
665 Lock();
666
667 // If the parent hasn't changed in the meantime, we're done.
668 if (this->parent == parent)
669 return;
670
671 // The parent has changed -- unlock and retry.
672 parent->Unlock();
673 }
674 }
675
676
677 /*! Unlocks the team and its parent team (if any).
678 */
679 void
UnlockTeamAndParent()680 Team::UnlockTeamAndParent()
681 {
682 if (parent != NULL)
683 parent->Unlock();
684
685 Unlock();
686 }
687
688
689 /*! Locks the team, its parent team (if any), and the team's process group.
690 The caller must hold a reference to the team or otherwise make sure that
691 it won't be deleted.
692 If the team doesn't have a parent, only the team itself is locked.
693 */
694 void
LockTeamParentAndProcessGroup()695 Team::LockTeamParentAndProcessGroup()
696 {
697 LockTeamAndProcessGroup();
698
699 // We hold the group's and the team's lock, but not the parent team's lock.
700 // If we have a parent, try to lock it.
701 if (this->parent == NULL || this->parent->TryLock())
702 return;
703
704 // No success -- unlock the team and let LockTeamAndParent() do the rest of
705 // the job.
706 Unlock();
707 LockTeamAndParent(false);
708 }
709
710
711 /*! Unlocks the team, its parent team (if any), and the team's process group.
712 */
713 void
UnlockTeamParentAndProcessGroup()714 Team::UnlockTeamParentAndProcessGroup()
715 {
716 group->Unlock();
717
718 if (parent != NULL)
719 parent->Unlock();
720
721 Unlock();
722 }
723
724
725 void
LockTeamAndProcessGroup()726 Team::LockTeamAndProcessGroup()
727 {
728 // The locking order is process group -> child. Since the process group can
729 // change as long as we don't lock the team, we need to do a trial and error
730 // loop.
731 Lock();
732
733 while (true) {
734 // Try to lock the group. This will succeed in most cases, simplifying
735 // things.
736 ProcessGroup* group = this->group;
737 if (group == NULL)
738 return;
739
740 if (group->TryLock())
741 return;
742
743 // get a temporary reference to the group, unlock this team, lock the
744 // group, and re-lock this team
745 BReference<ProcessGroup> groupReference(group);
746
747 Unlock();
748 group->Lock();
749 Lock();
750
751 // If the group hasn't changed in the meantime, we're done.
752 if (this->group == group)
753 return;
754
755 // The group has changed -- unlock and retry.
756 group->Unlock();
757 }
758 }
759
760
761 void
UnlockTeamAndProcessGroup()762 Team::UnlockTeamAndProcessGroup()
763 {
764 group->Unlock();
765 Unlock();
766 }
767
768
769 void
SetName(const char * name)770 Team::SetName(const char* name)
771 {
772 if (const char* lastSlash = strrchr(name, '/'))
773 name = lastSlash + 1;
774
775 strlcpy(fName, name, B_OS_NAME_LENGTH);
776 }
777
778
779 void
SetArgs(const char * args)780 Team::SetArgs(const char* args)
781 {
782 strlcpy(fArgs, args, sizeof(fArgs));
783 }
784
785
786 void
SetArgs(const char * path,const char * const * otherArgs,int otherArgCount)787 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount)
788 {
789 fArgs[0] = '\0';
790 strlcpy(fArgs, path, sizeof(fArgs));
791 for (int i = 0; i < otherArgCount; i++) {
792 strlcat(fArgs, " ", sizeof(fArgs));
793 strlcat(fArgs, otherArgs[i], sizeof(fArgs));
794 }
795 }
796
797
798 void
ResetSignalsOnExec()799 Team::ResetSignalsOnExec()
800 {
801 // We are supposed to keep pending signals. Signal actions shall be reset
802 // partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
803 // (for SIGCHLD it's implementation-defined). Others shall be reset to
804 // SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
805 // flags, but since there aren't any handlers, they make little sense, so
806 // we clear them.
807
808 for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) {
809 struct sigaction& action = SignalActionFor(i);
810 if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL)
811 action.sa_handler = SIG_DFL;
812
813 action.sa_mask = 0;
814 action.sa_flags = 0;
815 action.sa_userdata = NULL;
816 }
817 }
818
819
820 void
InheritSignalActions(Team * parent)821 Team::InheritSignalActions(Team* parent)
822 {
823 memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions));
824 }
825
826
827 /*! Adds the given user timer to the team and, if user-defined, assigns it an
828 ID.
829
830 The caller must hold the team's lock.
831
832 \param timer The timer to be added. If it doesn't have an ID yet, it is
833 considered user-defined and will be assigned an ID.
834 \return \c B_OK, if the timer was added successfully, another error code
835 otherwise.
836 */
837 status_t
AddUserTimer(UserTimer * timer)838 Team::AddUserTimer(UserTimer* timer)
839 {
840 // don't allow addition of timers when already shutting the team down
841 if (state >= TEAM_STATE_SHUTDOWN)
842 return B_BAD_TEAM_ID;
843
844 // If the timer is user-defined, check timer limit and increment
845 // user-defined count.
846 if (timer->ID() < 0 && !CheckAddUserDefinedTimer())
847 return EAGAIN;
848
849 fUserTimers.AddTimer(timer);
850
851 return B_OK;
852 }
853
854
855 /*! Removes the given user timer from the team.
856
857 The caller must hold the team's lock.
858
859 \param timer The timer to be removed.
860
861 */
862 void
RemoveUserTimer(UserTimer * timer)863 Team::RemoveUserTimer(UserTimer* timer)
864 {
865 fUserTimers.RemoveTimer(timer);
866
867 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
868 UserDefinedTimersRemoved(1);
869 }
870
871
872 /*! Deletes all (or all user-defined) user timers of the team.
873
874 Timer's belonging to the team's threads are not affected.
875 The caller must hold the team's lock.
876
877 \param userDefinedOnly If \c true, only the user-defined timers are deleted,
878 otherwise all timers are deleted.
879 */
880 void
DeleteUserTimers(bool userDefinedOnly)881 Team::DeleteUserTimers(bool userDefinedOnly)
882 {
883 int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
884 UserDefinedTimersRemoved(count);
885 }
886
887
888 /*! If not at the limit yet, increments the team's user-defined timer count.
889 \return \c true, if the limit wasn't reached yet, \c false otherwise.
890 */
891 bool
CheckAddUserDefinedTimer()892 Team::CheckAddUserDefinedTimer()
893 {
894 int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1);
895 if (oldCount >= MAX_USER_TIMERS_PER_TEAM) {
896 atomic_add(&fUserDefinedTimerCount, -1);
897 return false;
898 }
899
900 return true;
901 }
902
903
904 /*! Subtracts the given count for the team's user-defined timer count.
905 \param count The count to subtract.
906 */
907 void
UserDefinedTimersRemoved(int32 count)908 Team::UserDefinedTimersRemoved(int32 count)
909 {
910 atomic_add(&fUserDefinedTimerCount, -count);
911 }
912
913
914 void
DeactivateCPUTimeUserTimers()915 Team::DeactivateCPUTimeUserTimers()
916 {
917 while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head())
918 timer->Deactivate();
919
920 while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head())
921 timer->Deactivate();
922 }
923
924
925 /*! Returns the team's current total CPU time (kernel + user + offset).
926
927 The caller must hold \c time_lock.
928
929 \param ignoreCurrentRun If \c true and the current thread is one team's
930 threads, don't add the time since the last time \c last_time was
931 updated. Should be used in "thread unscheduled" scheduler callbacks,
932 since although the thread is still running at that time, its time has
933 already been stopped.
934 \return The team's current total CPU time.
935 */
936 bigtime_t
CPUTime(bool ignoreCurrentRun,Thread * lockedThread) const937 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const
938 {
939 bigtime_t time = cpu_clock_offset + dead_threads_kernel_time
940 + dead_threads_user_time;
941
942 Thread* currentThread = thread_get_current_thread();
943 bigtime_t now = system_time();
944
945 for (Thread* thread = thread_list; thread != NULL;
946 thread = thread->team_next) {
947 bool alreadyLocked = thread == lockedThread;
948 SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked);
949 time += thread->kernel_time + thread->user_time;
950
951 if (thread->last_time != 0) {
952 if (!ignoreCurrentRun || thread != currentThread)
953 time += now - thread->last_time;
954 }
955
956 if (alreadyLocked)
957 threadTimeLocker.Detach();
958 }
959
960 return time;
961 }
962
963
964 /*! Returns the team's current user CPU time.
965
966 The caller must hold \c time_lock.
967
968 \return The team's current user CPU time.
969 */
970 bigtime_t
UserCPUTime() const971 Team::UserCPUTime() const
972 {
973 bigtime_t time = dead_threads_user_time;
974
975 bigtime_t now = system_time();
976
977 for (Thread* thread = thread_list; thread != NULL;
978 thread = thread->team_next) {
979 SpinLocker threadTimeLocker(thread->time_lock);
980 time += thread->user_time;
981
982 if (thread->last_time != 0 && !thread->in_kernel)
983 time += now - thread->last_time;
984 }
985
986 return time;
987 }
988
989
990 // #pragma mark - ProcessGroup
991
992
ProcessGroup(pid_t id)993 ProcessGroup::ProcessGroup(pid_t id)
994 :
995 id(id),
996 teams(NULL),
997 fSession(NULL),
998 fInOrphanedCheckList(false)
999 {
1000 char lockName[32];
1001 snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id);
1002 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1003 }
1004
1005
~ProcessGroup()1006 ProcessGroup::~ProcessGroup()
1007 {
1008 TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id));
1009
1010 // If the group is in the orphaned check list, remove it.
1011 MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1012
1013 if (fInOrphanedCheckList)
1014 sOrphanedCheckProcessGroups.Remove(this);
1015
1016 orphanedCheckLocker.Unlock();
1017
1018 // remove group from the hash table and from the session
1019 if (fSession != NULL) {
1020 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1021 sGroupHash.RemoveUnchecked(this);
1022 groupHashLocker.Unlock();
1023
1024 fSession->ReleaseReference();
1025 }
1026
1027 mutex_destroy(&fLock);
1028 }
1029
1030
1031 /*static*/ ProcessGroup*
Get(pid_t id)1032 ProcessGroup::Get(pid_t id)
1033 {
1034 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1035 ProcessGroup* group = sGroupHash.Lookup(id);
1036 if (group != NULL)
1037 group->AcquireReference();
1038 return group;
1039 }
1040
1041
1042 /*! Adds the group the given session and makes it publicly accessible.
1043 The caller must not hold the process group hash lock.
1044 */
1045 void
Publish(ProcessSession * session)1046 ProcessGroup::Publish(ProcessSession* session)
1047 {
1048 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
1049 PublishLocked(session);
1050 }
1051
1052
1053 /*! Adds the group to the given session and makes it publicly accessible.
1054 The caller must hold the process group hash lock.
1055 */
1056 void
PublishLocked(ProcessSession * session)1057 ProcessGroup::PublishLocked(ProcessSession* session)
1058 {
1059 ASSERT(sGroupHash.Lookup(this->id) == NULL);
1060
1061 fSession = session;
1062 fSession->AcquireReference();
1063
1064 sGroupHash.InsertUnchecked(this);
1065 }
1066
1067
1068 /*! Checks whether the process group is orphaned.
1069 The caller must hold the group's lock.
1070 \return \c true, if the group is orphaned, \c false otherwise.
1071 */
1072 bool
IsOrphaned() const1073 ProcessGroup::IsOrphaned() const
1074 {
1075 // Orphaned Process Group: "A process group in which the parent of every
1076 // member is either itself a member of the group or is not a member of the
1077 // group's session." (Open Group Base Specs Issue 7)
1078 bool orphaned = true;
1079
1080 Team* team = teams;
1081 while (orphaned && team != NULL) {
1082 team->LockTeamAndParent(false);
1083
1084 Team* parent = team->parent;
1085 if (parent != NULL && parent->group_id != id
1086 && parent->session_id == fSession->id) {
1087 orphaned = false;
1088 }
1089
1090 team->UnlockTeamAndParent();
1091
1092 team = team->group_next;
1093 }
1094
1095 return orphaned;
1096 }
1097
1098
1099 void
ScheduleOrphanedCheck()1100 ProcessGroup::ScheduleOrphanedCheck()
1101 {
1102 MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
1103
1104 if (!fInOrphanedCheckList) {
1105 sOrphanedCheckProcessGroups.Add(this);
1106 fInOrphanedCheckList = true;
1107 }
1108 }
1109
1110
1111 void
UnsetOrphanedCheck()1112 ProcessGroup::UnsetOrphanedCheck()
1113 {
1114 fInOrphanedCheckList = false;
1115 }
1116
1117
1118 // #pragma mark - ProcessSession
1119
1120
ProcessSession(pid_t id)1121 ProcessSession::ProcessSession(pid_t id)
1122 :
1123 id(id),
1124 controlling_tty(NULL),
1125 foreground_group(-1)
1126 {
1127 char lockName[32];
1128 snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id);
1129 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
1130 }
1131
1132
~ProcessSession()1133 ProcessSession::~ProcessSession()
1134 {
1135 mutex_destroy(&fLock);
1136 }
1137
1138
1139 // #pragma mark - KDL functions
1140
1141
1142 static void
_dump_team_info(Team * team)1143 _dump_team_info(Team* team)
1144 {
1145 kprintf("TEAM: %p\n", team);
1146 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id,
1147 team->id);
1148 kprintf("serial_number: %" B_PRId64 "\n", team->serial_number);
1149 kprintf("name: '%s'\n", team->Name());
1150 kprintf("args: '%s'\n", team->Args());
1151 kprintf("hash_next: %p\n", team->hash_next);
1152 kprintf("parent: %p", team->parent);
1153 if (team->parent != NULL) {
1154 kprintf(" (id = %" B_PRId32 ")\n", team->parent->id);
1155 } else
1156 kprintf("\n");
1157
1158 kprintf("children: %p\n", team->children);
1159 kprintf("num_threads: %d\n", team->num_threads);
1160 kprintf("state: %d\n", team->state);
1161 kprintf("flags: 0x%" B_PRIx32 "\n", team->flags);
1162 kprintf("io_context: %p\n", team->io_context);
1163 if (team->address_space)
1164 kprintf("address_space: %p\n", team->address_space);
1165 kprintf("user data: %p (area %" B_PRId32 ")\n",
1166 (void*)team->user_data, team->user_data_area);
1167 kprintf("free user thread: %p\n", team->free_user_threads);
1168 kprintf("main_thread: %p\n", team->main_thread);
1169 kprintf("thread_list: %p\n", team->thread_list);
1170 kprintf("group_id: %" B_PRId32 "\n", team->group_id);
1171 kprintf("session_id: %" B_PRId32 "\n", team->session_id);
1172 }
1173
1174
1175 static int
dump_team_info(int argc,char ** argv)1176 dump_team_info(int argc, char** argv)
1177 {
1178 ulong arg;
1179 bool found = false;
1180
1181 if (argc < 2) {
1182 Thread* thread = thread_get_current_thread();
1183 if (thread != NULL && thread->team != NULL)
1184 _dump_team_info(thread->team);
1185 else
1186 kprintf("No current team!\n");
1187 return 0;
1188 }
1189
1190 arg = strtoul(argv[1], NULL, 0);
1191 if (IS_KERNEL_ADDRESS(arg)) {
1192 // semi-hack
1193 _dump_team_info((Team*)arg);
1194 return 0;
1195 }
1196
1197 // walk through the thread list, trying to match name or id
1198 for (TeamTable::Iterator it = sTeamHash.GetIterator();
1199 Team* team = it.Next();) {
1200 if ((team->Name() && strcmp(argv[1], team->Name()) == 0)
1201 || team->id == (team_id)arg) {
1202 _dump_team_info(team);
1203 found = true;
1204 break;
1205 }
1206 }
1207
1208 if (!found)
1209 kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg);
1210 return 0;
1211 }
1212
1213
1214 static int
dump_teams(int argc,char ** argv)1215 dump_teams(int argc, char** argv)
1216 {
1217 kprintf("%-*s id %-*s name\n", B_PRINTF_POINTER_WIDTH, "team",
1218 B_PRINTF_POINTER_WIDTH, "parent");
1219
1220 for (TeamTable::Iterator it = sTeamHash.GetIterator();
1221 Team* team = it.Next();) {
1222 kprintf("%p%7" B_PRId32 " %p %s\n", team, team->id, team->parent, team->Name());
1223 }
1224
1225 return 0;
1226 }
1227
1228
1229 // #pragma mark - Private functions
1230
1231
1232 /*! Get the parent of a given process.
1233
1234 Used in the implementation of getppid (where a process can get its own
1235 parent, only) as well as in user_process_info where the information is
1236 available to anyone (allowing to display a tree of running processes)
1237 */
1238 static pid_t
_getppid(pid_t id)1239 _getppid(pid_t id)
1240 {
1241 if (id < 0) {
1242 errno = EINVAL;
1243 return -1;
1244 }
1245
1246 if (id == 0) {
1247 Team* team = thread_get_current_thread()->team;
1248 TeamLocker teamLocker(team);
1249 if (team->parent == NULL) {
1250 errno = EINVAL;
1251 return -1;
1252 }
1253 return team->parent->id;
1254 }
1255
1256 Team* team = Team::GetAndLock(id);
1257 if (team == NULL) {
1258 errno = ESRCH;
1259 return -1;
1260 }
1261
1262 pid_t parentID;
1263
1264 if (team->parent == NULL) {
1265 errno = EINVAL;
1266 parentID = -1;
1267 } else
1268 parentID = team->parent->id;
1269
1270 team->UnlockAndReleaseReference();
1271
1272 return parentID;
1273 }
1274
1275
1276 /*! Inserts team \a team into the child list of team \a parent.
1277
1278 The caller must hold the lock of both \a parent and \a team.
1279
1280 \param parent The parent team.
1281 \param team The team to be inserted into \a parent's child list.
1282 */
1283 static void
insert_team_into_parent(Team * parent,Team * team)1284 insert_team_into_parent(Team* parent, Team* team)
1285 {
1286 ASSERT(parent != NULL);
1287
1288 team->siblings_next = parent->children;
1289 parent->children = team;
1290 team->parent = parent;
1291 }
1292
1293
1294 /*! Removes team \a team from the child list of team \a parent.
1295
1296 The caller must hold the lock of both \a parent and \a team.
1297
1298 \param parent The parent team.
1299 \param team The team to be removed from \a parent's child list.
1300 */
1301 static void
remove_team_from_parent(Team * parent,Team * team)1302 remove_team_from_parent(Team* parent, Team* team)
1303 {
1304 Team* child;
1305 Team* last = NULL;
1306
1307 for (child = parent->children; child != NULL;
1308 child = child->siblings_next) {
1309 if (child == team) {
1310 if (last == NULL)
1311 parent->children = child->siblings_next;
1312 else
1313 last->siblings_next = child->siblings_next;
1314
1315 team->parent = NULL;
1316 break;
1317 }
1318 last = child;
1319 }
1320 }
1321
1322
1323 /*! Returns whether the given team is a session leader.
1324 The caller must hold the team's lock or its process group's lock.
1325 */
1326 static bool
is_session_leader(Team * team)1327 is_session_leader(Team* team)
1328 {
1329 return team->session_id == team->id;
1330 }
1331
1332
1333 /*! Returns whether the given team is a process group leader.
1334 The caller must hold the team's lock or its process group's lock.
1335 */
1336 static bool
is_process_group_leader(Team * team)1337 is_process_group_leader(Team* team)
1338 {
1339 return team->group_id == team->id;
1340 }
1341
1342
1343 /*! Inserts the given team into the given process group.
1344 The caller must hold the process group's lock, the team's lock, and the
1345 team's parent's lock.
1346 */
1347 static void
insert_team_into_group(ProcessGroup * group,Team * team)1348 insert_team_into_group(ProcessGroup* group, Team* team)
1349 {
1350 team->group = group;
1351 team->group_id = group->id;
1352 team->session_id = group->Session()->id;
1353
1354 team->group_next = group->teams;
1355 group->teams = team;
1356 group->AcquireReference();
1357 }
1358
1359
1360 /*! Removes the given team from its process group.
1361
1362 The caller must hold the process group's lock, the team's lock, and the
1363 team's parent's lock. Interrupts must be enabled.
1364
1365 \param team The team that'll be removed from its process group.
1366 */
1367 static void
remove_team_from_group(Team * team)1368 remove_team_from_group(Team* team)
1369 {
1370 ProcessGroup* group = team->group;
1371 Team* current;
1372 Team* last = NULL;
1373
1374 // the team must be in a process group to let this function have any effect
1375 if (group == NULL)
1376 return;
1377
1378 for (current = group->teams; current != NULL;
1379 current = current->group_next) {
1380 if (current == team) {
1381 if (last == NULL)
1382 group->teams = current->group_next;
1383 else
1384 last->group_next = current->group_next;
1385
1386 break;
1387 }
1388 last = current;
1389 }
1390
1391 team->group = NULL;
1392 team->group_next = NULL;
1393 team->group_id = -1;
1394
1395 group->ReleaseReference();
1396 }
1397
1398
1399 static status_t
create_team_user_data(Team * team,void * exactAddress=NULL)1400 create_team_user_data(Team* team, void* exactAddress = NULL)
1401 {
1402 void* address;
1403 uint32 addressSpec;
1404
1405 if (exactAddress != NULL) {
1406 address = exactAddress;
1407 addressSpec = B_EXACT_ADDRESS;
1408 } else {
1409 address = (void*)KERNEL_USER_DATA_BASE;
1410 addressSpec = B_RANDOMIZED_BASE_ADDRESS;
1411 }
1412
1413 status_t result = vm_reserve_address_range(team->id, &address, addressSpec,
1414 kTeamUserDataReservedSize, RESERVED_AVOID_BASE);
1415
1416 virtual_address_restrictions virtualRestrictions = {};
1417 if (result == B_OK || exactAddress != NULL) {
1418 if (exactAddress != NULL)
1419 virtualRestrictions.address = exactAddress;
1420 else
1421 virtualRestrictions.address = address;
1422 virtualRestrictions.address_specification = B_EXACT_ADDRESS;
1423 } else {
1424 virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE;
1425 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
1426 }
1427
1428 physical_address_restrictions physicalRestrictions = {};
1429 team->user_data_area = create_area_etc(team->id, "user area",
1430 kTeamUserDataInitialSize, B_FULL_LOCK,
1431 B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0,
1432 &virtualRestrictions, &physicalRestrictions, &address);
1433 if (team->user_data_area < 0)
1434 return team->user_data_area;
1435
1436 team->user_data = (addr_t)address;
1437 team->used_user_data = 0;
1438 team->user_data_size = kTeamUserDataInitialSize;
1439 team->free_user_threads = NULL;
1440
1441 return B_OK;
1442 }
1443
1444
1445 static void
delete_team_user_data(Team * team)1446 delete_team_user_data(Team* team)
1447 {
1448 if (team->user_data_area >= 0) {
1449 vm_delete_area(team->id, team->user_data_area, true);
1450 vm_unreserve_address_range(team->id, (void*)team->user_data,
1451 kTeamUserDataReservedSize);
1452
1453 team->user_data = 0;
1454 team->used_user_data = 0;
1455 team->user_data_size = 0;
1456 team->user_data_area = -1;
1457 while (free_user_thread* entry = team->free_user_threads) {
1458 team->free_user_threads = entry->next;
1459 free(entry);
1460 }
1461 }
1462 }
1463
1464
1465 static status_t
copy_user_process_args(const char * const * userFlatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,char ** & _flatArgs)1466 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize,
1467 int32 argCount, int32 envCount, char**& _flatArgs)
1468 {
1469 if (argCount < 0 || envCount < 0)
1470 return B_BAD_VALUE;
1471
1472 if (flatArgsSize > MAX_PROCESS_ARGS_SIZE)
1473 return B_TOO_MANY_ARGS;
1474 if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize)
1475 return B_BAD_VALUE;
1476
1477 if (!IS_USER_ADDRESS(userFlatArgs))
1478 return B_BAD_ADDRESS;
1479
1480 // allocate kernel memory
1481 char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize));
1482 if (flatArgs == NULL)
1483 return B_NO_MEMORY;
1484
1485 if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) {
1486 free(flatArgs);
1487 return B_BAD_ADDRESS;
1488 }
1489
1490 // check and relocate the array
1491 status_t error = B_OK;
1492 const char* stringBase = (char*)flatArgs + argCount + envCount + 2;
1493 const char* stringEnd = (char*)flatArgs + flatArgsSize;
1494 for (int32 i = 0; i < argCount + envCount + 2; i++) {
1495 if (i == argCount || i == argCount + envCount + 1) {
1496 // check array null termination
1497 if (flatArgs[i] != NULL) {
1498 error = B_BAD_VALUE;
1499 break;
1500 }
1501 } else {
1502 // check string
1503 char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs);
1504 size_t maxLen = stringEnd - arg;
1505 if (arg < stringBase || arg >= stringEnd
1506 || strnlen(arg, maxLen) == maxLen) {
1507 error = B_BAD_VALUE;
1508 break;
1509 }
1510
1511 flatArgs[i] = arg;
1512 }
1513 }
1514
1515 if (error == B_OK)
1516 _flatArgs = flatArgs;
1517 else
1518 free(flatArgs);
1519
1520 return error;
1521 }
1522
1523
1524 static void
free_team_arg(struct team_arg * teamArg)1525 free_team_arg(struct team_arg* teamArg)
1526 {
1527 if (teamArg != NULL) {
1528 free(teamArg->flat_args);
1529 free(teamArg->path);
1530 free(teamArg);
1531 }
1532 }
1533
1534
1535 static status_t
create_team_arg(struct team_arg ** _teamArg,const char * path,char ** flatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,mode_t umask,port_id port,uint32 token)1536 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs,
1537 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask,
1538 port_id port, uint32 token)
1539 {
1540 struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg));
1541 if (teamArg == NULL)
1542 return B_NO_MEMORY;
1543
1544 teamArg->path = strdup(path);
1545 if (teamArg->path == NULL) {
1546 free(teamArg);
1547 return B_NO_MEMORY;
1548 }
1549
1550 // copy the args over
1551 teamArg->flat_args = flatArgs;
1552 teamArg->flat_args_size = flatArgsSize;
1553 teamArg->arg_count = argCount;
1554 teamArg->env_count = envCount;
1555 teamArg->flags = 0;
1556 teamArg->umask = umask;
1557 teamArg->error_port = port;
1558 teamArg->error_token = token;
1559
1560 // determine the flags from the environment
1561 const char* const* env = flatArgs + argCount + 1;
1562 for (int32 i = 0; i < envCount; i++) {
1563 if (strcmp(env[i], "DISABLE_ASLR=1") == 0) {
1564 teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR;
1565 break;
1566 }
1567 }
1568
1569 *_teamArg = teamArg;
1570 return B_OK;
1571 }
1572
1573
1574 static status_t
team_create_thread_start_internal(void * args)1575 team_create_thread_start_internal(void* args)
1576 {
1577 status_t err;
1578 Thread* thread;
1579 Team* team;
1580 struct team_arg* teamArgs = (struct team_arg*)args;
1581 const char* path;
1582 addr_t entry;
1583 char** userArgs;
1584 char** userEnv;
1585 struct user_space_program_args* programArgs;
1586 uint32 argCount, envCount;
1587
1588 thread = thread_get_current_thread();
1589 team = thread->team;
1590 cache_node_launched(teamArgs->arg_count, teamArgs->flat_args);
1591
1592 TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n",
1593 thread->id));
1594
1595 // Main stack area layout is currently as follows (starting from 0):
1596 //
1597 // size | usage
1598 // ---------------------------------+--------------------------------
1599 // USER_MAIN_THREAD_STACK_SIZE | actual stack
1600 // TLS_SIZE | TLS data
1601 // sizeof(user_space_program_args) | argument structure for the runtime
1602 // | loader
1603 // flat arguments size | flat process arguments and environment
1604
1605 // TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1606 // the heap
1607 // TODO: we could reserve the whole USER_STACK_REGION upfront...
1608
1609 argCount = teamArgs->arg_count;
1610 envCount = teamArgs->env_count;
1611
1612 programArgs = (struct user_space_program_args*)(thread->user_stack_base
1613 + thread->user_stack_size + TLS_SIZE);
1614
1615 userArgs = (char**)(programArgs + 1);
1616 userEnv = userArgs + argCount + 1;
1617 path = teamArgs->path;
1618
1619 if (user_strlcpy(programArgs->program_path, path,
1620 sizeof(programArgs->program_path)) < B_OK
1621 || user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK
1622 || user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK
1623 || user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK
1624 || user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK
1625 || user_memcpy(&programArgs->error_port, &teamArgs->error_port,
1626 sizeof(port_id)) < B_OK
1627 || user_memcpy(&programArgs->error_token, &teamArgs->error_token,
1628 sizeof(uint32)) < B_OK
1629 || user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK
1630 || user_memcpy(&programArgs->disable_user_addons,
1631 &sDisableUserAddOns, sizeof(bool)) < B_OK
1632 || user_memcpy(userArgs, teamArgs->flat_args,
1633 teamArgs->flat_args_size) < B_OK) {
1634 // the team deletion process will clean this mess
1635 free_team_arg(teamArgs);
1636 return B_BAD_ADDRESS;
1637 }
1638
1639 free_team_arg(teamArgs);
1640 // the arguments are already on the user stack, we no longer need
1641 // them in this form
1642
1643 TRACE(("team_create_thread_start: loading elf binary '%s'\n", path));
1644
1645 // update state
1646 team->Lock();
1647 team->state = TEAM_STATE_NORMAL;
1648 team->Unlock();
1649
1650 // Clone commpage area
1651 area_id commPageArea = clone_commpage_area(team->id,
1652 &team->commpage_address);
1653 if (commPageArea < B_OK) {
1654 TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1655 strerror(commPageArea)));
1656 return commPageArea;
1657 }
1658
1659 // Register commpage image
1660 image_id commPageImage = get_commpage_image();
1661 extended_image_info imageInfo;
1662 err = get_image_info(commPageImage, &imageInfo.basic_info);
1663 if (err != B_OK) {
1664 TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1665 strerror(err)));
1666 return err;
1667 }
1668 imageInfo.basic_info.text = team->commpage_address;
1669 imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address;
1670 imageInfo.symbol_table = NULL;
1671 imageInfo.symbol_hash = NULL;
1672 imageInfo.string_table = NULL;
1673 image_id image = register_image(team, &imageInfo, sizeof(imageInfo));
1674 if (image < 0) {
1675 TRACE(("team_create_thread_start: register_image() failed: %s\n",
1676 strerror(image)));
1677 return image;
1678 }
1679 user_debug_image_created(&imageInfo.basic_info);
1680
1681 // NOTE: Normally arch_thread_enter_userspace() never returns, that is
1682 // automatic variables with function scope will never be destroyed.
1683 {
1684 // find runtime_loader path
1685 KPath runtimeLoaderPath;
1686 err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false,
1687 runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize());
1688 if (err < B_OK) {
1689 TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1690 strerror(err)));
1691 return err;
1692 }
1693 runtimeLoaderPath.UnlockBuffer();
1694 err = runtimeLoaderPath.Append("runtime_loader");
1695
1696 if (err == B_OK) {
1697 err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0,
1698 &entry);
1699 }
1700 }
1701
1702 if (err < B_OK) {
1703 // Luckily, we don't have to clean up the mess we created - that's
1704 // done for us by the normal team deletion process
1705 TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1706 "%s\n", strerror(err)));
1707 return err;
1708 }
1709
1710 TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry));
1711
1712 // enter userspace -- returns only in case of error
1713 return thread_enter_userspace_new_team(thread, (addr_t)entry,
1714 programArgs, team->commpage_address);
1715 }
1716
1717
1718 static status_t
team_create_thread_start(void * args)1719 team_create_thread_start(void* args)
1720 {
1721 team_create_thread_start_internal(args);
1722 team_init_exit_info_on_error(thread_get_current_thread()->team);
1723 thread_exit();
1724 // does not return
1725 return B_OK;
1726 }
1727
1728
1729 static thread_id
load_image_internal(char ** & _flatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,int32 priority,team_id parentID,uint32 flags,port_id errorPort,uint32 errorToken)1730 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount,
1731 int32 envCount, int32 priority, team_id parentID, uint32 flags,
1732 port_id errorPort, uint32 errorToken)
1733 {
1734 char** flatArgs = _flatArgs;
1735 thread_id thread;
1736 status_t status;
1737 struct team_arg* teamArgs;
1738 struct team_loading_info loadingInfo;
1739 ConditionVariableEntry loadingWaitEntry;
1740 io_context* parentIOContext = NULL;
1741 team_id teamID;
1742 bool teamLimitReached = false;
1743
1744 if (flatArgs == NULL || argCount == 0)
1745 return B_BAD_VALUE;
1746
1747 const char* path = flatArgs[0];
1748
1749 TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1750 "\n", path, flatArgs, argCount));
1751
1752 // cut the path from the main thread name
1753 const char* threadName = strrchr(path, '/');
1754 if (threadName != NULL)
1755 threadName++;
1756 else
1757 threadName = path;
1758
1759 // create the main thread object
1760 Thread* mainThread;
1761 status = Thread::Create(threadName, mainThread);
1762 if (status != B_OK)
1763 return status;
1764 BReference<Thread> mainThreadReference(mainThread, true);
1765
1766 // create team object
1767 Team* team = Team::Create(mainThread->id, path, false);
1768 if (team == NULL)
1769 return B_NO_MEMORY;
1770 BReference<Team> teamReference(team, true);
1771
1772 BReference<Team> teamLoadingReference;
1773 if ((flags & B_WAIT_TILL_LOADED) != 0) {
1774 loadingInfo.condition.Init(team, "image load");
1775 loadingInfo.condition.Add(&loadingWaitEntry);
1776 loadingInfo.result = B_ERROR;
1777 team->loading_info = &loadingInfo;
1778 teamLoadingReference = teamReference;
1779 }
1780
1781 // get the parent team
1782 Team* parent = Team::Get(parentID);
1783 if (parent == NULL)
1784 return B_BAD_TEAM_ID;
1785 BReference<Team> parentReference(parent, true);
1786
1787 parent->LockTeamAndProcessGroup();
1788 team->Lock();
1789
1790 // inherit the parent's user/group
1791 inherit_parent_user_and_group(team, parent);
1792
1793 // get a reference to the parent's I/O context -- we need it to create ours
1794 parentIOContext = parent->io_context;
1795 vfs_get_io_context(parentIOContext);
1796
1797 team->Unlock();
1798 parent->UnlockTeamAndProcessGroup();
1799
1800 // check the executable's set-user/group-id permission
1801 update_set_id_user_and_group(team, path);
1802
1803 status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount,
1804 envCount, (mode_t)-1, errorPort, errorToken);
1805 if (status != B_OK)
1806 goto err1;
1807
1808 _flatArgs = NULL;
1809 // args are owned by the team_arg structure now
1810
1811 team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
1812
1813 // create a new io_context for this team
1814 team->io_context = vfs_new_io_context(parentIOContext, true);
1815 if (!team->io_context) {
1816 status = B_NO_MEMORY;
1817 goto err2;
1818 }
1819
1820 // We don't need the parent's I/O context any longer.
1821 vfs_put_io_context(parentIOContext);
1822 parentIOContext = NULL;
1823
1824 // remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1825 vfs_exec_io_context(team->io_context);
1826
1827 // create an address space for this team
1828 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
1829 &team->address_space);
1830 if (status != B_OK)
1831 goto err2;
1832
1833 team->address_space->SetRandomizingEnabled(
1834 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
1835
1836 // create the user data area
1837 status = create_team_user_data(team);
1838 if (status != B_OK)
1839 goto err4;
1840
1841 // insert the team into its parent and the teams hash
1842 parent->LockTeamAndProcessGroup();
1843 team->Lock();
1844
1845 {
1846 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1847
1848 sTeamHash.Insert(team);
1849 teamLimitReached = sUsedTeams >= sMaxTeams;
1850 if (!teamLimitReached)
1851 sUsedTeams++;
1852 }
1853
1854 insert_team_into_parent(parent, team);
1855 insert_team_into_group(parent->group, team);
1856
1857 team->Unlock();
1858 parent->UnlockTeamAndProcessGroup();
1859
1860 // notify team listeners
1861 sNotificationService.Notify(TEAM_ADDED, team);
1862
1863 if (teamLimitReached) {
1864 status = B_NO_MORE_TEAMS;
1865 goto err6;
1866 }
1867
1868 // In case we start the main thread, we shouldn't access the team object
1869 // afterwards, so cache the team's ID.
1870 teamID = team->id;
1871
1872 // Create a kernel thread, but under the context of the new team
1873 // The new thread will take over ownership of teamArgs.
1874 {
1875 ThreadCreationAttributes threadAttributes(team_create_thread_start,
1876 threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread);
1877 threadAttributes.additional_stack_size = sizeof(user_space_program_args)
1878 + teamArgs->flat_args_size;
1879 thread = thread_create_thread(threadAttributes, false);
1880 if (thread < 0) {
1881 status = thread;
1882 goto err6;
1883 }
1884 }
1885
1886 // The team has been created successfully, so we keep the reference. Or
1887 // more precisely: It's owned by the team's main thread, now.
1888 teamReference.Detach();
1889
1890 // notify the debugger while the main thread is still suspended so that it
1891 // has a chance to attach early to the child.
1892 user_debug_team_created(teamID);
1893
1894 // wait for the loader of the new team to finish its work
1895 if ((flags & B_WAIT_TILL_LOADED) != 0) {
1896 if (mainThread != NULL) {
1897 // resume the team's main thread
1898 thread_continue(mainThread);
1899 }
1900
1901 // Now wait until loading is finished. We will be woken either by the
1902 // thread, when it finished or aborted loading, or when the team is
1903 // going to die (e.g. is killed). In either case the one notifying is
1904 // responsible for unsetting `loading_info` in the team structure.
1905 loadingWaitEntry.Wait();
1906
1907 // We must synchronize with the thread that woke us up, to ensure
1908 // there are no remaining consumers of the team_loading_info.
1909 team->Lock();
1910 if (team->loading_info != NULL)
1911 panic("team loading wait complete, but loading_info != NULL");
1912 team->Unlock();
1913 teamLoadingReference.Unset();
1914
1915 if (loadingInfo.result < B_OK)
1916 return loadingInfo.result;
1917 }
1918
1919 return thread;
1920
1921 err6:
1922 // Remove the team structure from the process group, the parent team, and
1923 // the team hash table and delete the team structure.
1924 parent->LockTeamAndProcessGroup();
1925 team->Lock();
1926
1927 remove_team_from_group(team);
1928 remove_team_from_parent(team->parent, team);
1929
1930 team->Unlock();
1931 parent->UnlockTeamAndProcessGroup();
1932
1933 {
1934 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
1935 sTeamHash.Remove(team);
1936 if (!teamLimitReached)
1937 sUsedTeams--;
1938 }
1939
1940 sNotificationService.Notify(TEAM_REMOVED, team);
1941
1942 delete_team_user_data(team);
1943 err4:
1944 team->address_space->Put();
1945 err2:
1946 free_team_arg(teamArgs);
1947 err1:
1948 if (parentIOContext != NULL)
1949 vfs_put_io_context(parentIOContext);
1950
1951 return status;
1952 }
1953
1954
1955 /*! Almost shuts down the current team and loads a new image into it.
1956 If successful, this function does not return and will takeover ownership of
1957 the arguments provided.
1958 This function may only be called in a userland team (caused by one of the
1959 exec*() syscalls).
1960 */
1961 static status_t
exec_team(const char * path,char ** & _flatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,mode_t umask)1962 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
1963 int32 argCount, int32 envCount, mode_t umask)
1964 {
1965 // NOTE: Since this function normally doesn't return, don't use automatic
1966 // variables that need destruction in the function scope.
1967 char** flatArgs = _flatArgs;
1968 Team* team = thread_get_current_thread()->team;
1969 struct team_arg* teamArgs;
1970 const char* threadName;
1971 thread_id nubThreadID = -1;
1972
1973 TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %"
1974 B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount,
1975 team->id));
1976
1977 T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1));
1978
1979 // switching the kernel at run time is probably not a good idea :)
1980 if (team == team_get_kernel_team())
1981 return B_NOT_ALLOWED;
1982
1983 // we currently need to be single threaded here
1984 // TODO: maybe we should just kill all other threads and
1985 // make the current thread the team's main thread?
1986 Thread* currentThread = thread_get_current_thread();
1987 if (currentThread != team->main_thread)
1988 return B_NOT_ALLOWED;
1989
1990 // The debug nub thread, a pure kernel thread, is allowed to survive.
1991 // We iterate through the thread list to make sure that there's no other
1992 // thread.
1993 TeamLocker teamLocker(team);
1994 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
1995
1996 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
1997 nubThreadID = team->debug_info.nub_thread;
1998
1999 debugInfoLocker.Unlock();
2000
2001 for (Thread* thread = team->thread_list; thread != NULL;
2002 thread = thread->team_next) {
2003 if (thread != team->main_thread && thread->id != nubThreadID)
2004 return B_NOT_ALLOWED;
2005 }
2006
2007 team->DeleteUserTimers(true);
2008 team->ResetSignalsOnExec();
2009
2010 teamLocker.Unlock();
2011
2012 status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize,
2013 argCount, envCount, umask, -1, 0);
2014 if (status != B_OK)
2015 return status;
2016
2017 _flatArgs = NULL;
2018 // args are owned by the team_arg structure now
2019
2020 team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1);
2021
2022 // TODO: remove team resources if there are any left
2023 // thread_atkernel_exit() might not be called at all
2024
2025 thread_reset_for_exec();
2026
2027 user_debug_prepare_for_exec();
2028
2029 delete_team_user_data(team);
2030 vm_delete_areas(team->address_space, false);
2031 xsi_sem_undo(team);
2032 delete_owned_ports(team);
2033 sem_delete_owned_sems(team);
2034 remove_images(team);
2035 vfs_exec_io_context(team->io_context);
2036 delete_user_mutex_context(team->user_mutex_context);
2037 team->user_mutex_context = NULL;
2038 delete_realtime_sem_context(team->realtime_sem_context);
2039 team->realtime_sem_context = NULL;
2040
2041 // update ASLR
2042 team->address_space->SetRandomizingEnabled(
2043 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0);
2044
2045 status = create_team_user_data(team);
2046 if (status != B_OK) {
2047 // creating the user data failed -- we're toast
2048 free_team_arg(teamArgs);
2049 exit_thread(status);
2050 return status;
2051 }
2052
2053 user_debug_finish_after_exec();
2054
2055 // rename the team
2056
2057 team->Lock();
2058 team->SetName(path);
2059 team->Unlock();
2060
2061 // cut the path from the team name and rename the main thread, too
2062 threadName = strrchr(path, '/');
2063 if (threadName != NULL)
2064 threadName++;
2065 else
2066 threadName = path;
2067 rename_thread(thread_get_current_thread_id(), threadName);
2068
2069 atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE);
2070
2071 // Update user/group according to the executable's set-user/group-id
2072 // permission.
2073 update_set_id_user_and_group(team, path);
2074
2075 user_debug_team_exec();
2076
2077 // notify team listeners
2078 sNotificationService.Notify(TEAM_EXEC, team);
2079
2080 // get a user thread for the thread
2081 user_thread* userThread = team_allocate_user_thread(team);
2082 // cannot fail (the allocation for the team would have failed already)
2083 ThreadLocker currentThreadLocker(currentThread);
2084 currentThread->user_thread = userThread;
2085 currentThreadLocker.Unlock();
2086
2087 // create the user stack for the thread
2088 status = thread_create_user_stack(currentThread->team, currentThread, NULL,
2089 0, sizeof(user_space_program_args) + teamArgs->flat_args_size);
2090 if (status == B_OK) {
2091 // prepare the stack, load the runtime loader, and enter userspace
2092 team_create_thread_start(teamArgs);
2093 // does never return
2094 } else
2095 free_team_arg(teamArgs);
2096
2097 // Sorry, we have to kill ourselves, there is no way out anymore
2098 // (without any areas left and all that).
2099 exit_thread(status);
2100
2101 // We return a status here since the signal that is sent by the
2102 // call above is not immediately handled.
2103 return B_ERROR;
2104 }
2105
2106
2107 static thread_id
fork_team(void)2108 fork_team(void)
2109 {
2110 Thread* parentThread = thread_get_current_thread();
2111 Team* parentTeam = parentThread->team;
2112 Team* team;
2113 arch_fork_arg* forkArgs;
2114 struct area_info info;
2115 thread_id threadID;
2116 status_t status;
2117 ssize_t areaCookie;
2118 bool teamLimitReached = false;
2119
2120 TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id));
2121
2122 if (parentTeam == team_get_kernel_team())
2123 return B_NOT_ALLOWED;
2124
2125 // create a new team
2126 // TODO: this is very similar to load_image_internal() - maybe we can do
2127 // something about it :)
2128
2129 // create the main thread object
2130 Thread* thread;
2131 status = Thread::Create(parentThread->name, thread);
2132 if (status != B_OK)
2133 return status;
2134 BReference<Thread> threadReference(thread, true);
2135
2136 // create the team object
2137 team = Team::Create(thread->id, NULL, false);
2138 if (team == NULL)
2139 return B_NO_MEMORY;
2140
2141 parentTeam->LockTeamAndProcessGroup();
2142 team->Lock();
2143
2144 team->SetName(parentTeam->Name());
2145 team->SetArgs(parentTeam->Args());
2146
2147 team->commpage_address = parentTeam->commpage_address;
2148
2149 // Inherit the parent's user/group.
2150 inherit_parent_user_and_group(team, parentTeam);
2151
2152 // inherit signal handlers
2153 team->InheritSignalActions(parentTeam);
2154
2155 team->Unlock();
2156 parentTeam->UnlockTeamAndProcessGroup();
2157
2158 // inherit some team debug flags
2159 team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags)
2160 & B_TEAM_DEBUG_INHERITED_FLAGS;
2161
2162 forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg));
2163 if (forkArgs == NULL) {
2164 status = B_NO_MEMORY;
2165 goto err1;
2166 }
2167
2168 // create a new io_context for this team
2169 team->io_context = vfs_new_io_context(parentTeam->io_context, false);
2170 if (!team->io_context) {
2171 status = B_NO_MEMORY;
2172 goto err2;
2173 }
2174
2175 // duplicate the realtime sem context
2176 if (parentTeam->realtime_sem_context) {
2177 team->realtime_sem_context = clone_realtime_sem_context(
2178 parentTeam->realtime_sem_context);
2179 if (team->realtime_sem_context == NULL) {
2180 status = B_NO_MEMORY;
2181 goto err2;
2182 }
2183 }
2184
2185 // create an address space for this team
2186 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false,
2187 &team->address_space);
2188 if (status < B_OK)
2189 goto err3;
2190
2191 // copy all areas of the team
2192 // TODO: should be able to handle stack areas differently (ie. don't have
2193 // them copy-on-write)
2194
2195 areaCookie = 0;
2196 while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) {
2197 if (info.area == parentTeam->user_data_area) {
2198 // don't clone the user area; just create a new one
2199 status = create_team_user_data(team, info.address);
2200 if (status != B_OK)
2201 break;
2202
2203 thread->user_thread = team_allocate_user_thread(team);
2204 } else {
2205 void* address;
2206 area_id area = vm_copy_area(team->address_space->ID(), info.name,
2207 &address, B_CLONE_ADDRESS, info.area);
2208 if (area < B_OK) {
2209 status = area;
2210 break;
2211 }
2212
2213 if (info.area == parentThread->user_stack_area)
2214 thread->user_stack_area = area;
2215 }
2216 }
2217
2218 if (status < B_OK)
2219 goto err4;
2220
2221 if (thread->user_thread == NULL) {
2222 #if KDEBUG
2223 panic("user data area not found, parent area is %" B_PRId32,
2224 parentTeam->user_data_area);
2225 #endif
2226 status = B_ERROR;
2227 goto err4;
2228 }
2229
2230 thread->user_stack_base = parentThread->user_stack_base;
2231 thread->user_stack_size = parentThread->user_stack_size;
2232 thread->user_local_storage = parentThread->user_local_storage;
2233 thread->sig_block_mask = parentThread->sig_block_mask;
2234 thread->signal_stack_base = parentThread->signal_stack_base;
2235 thread->signal_stack_size = parentThread->signal_stack_size;
2236 thread->signal_stack_enabled = parentThread->signal_stack_enabled;
2237
2238 arch_store_fork_frame(forkArgs);
2239
2240 // copy image list
2241 if (copy_images(parentTeam->id, team) != B_OK)
2242 goto err5;
2243
2244 // insert the team into its parent and the teams hash
2245 parentTeam->LockTeamAndProcessGroup();
2246 team->Lock();
2247
2248 {
2249 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2250
2251 sTeamHash.Insert(team);
2252 teamLimitReached = sUsedTeams >= sMaxTeams;
2253 if (!teamLimitReached)
2254 sUsedTeams++;
2255 }
2256
2257 insert_team_into_parent(parentTeam, team);
2258 insert_team_into_group(parentTeam->group, team);
2259
2260 team->Unlock();
2261 parentTeam->UnlockTeamAndProcessGroup();
2262
2263 // notify team listeners
2264 sNotificationService.Notify(TEAM_ADDED, team);
2265
2266 if (teamLimitReached) {
2267 status = B_NO_MORE_TEAMS;
2268 goto err6;
2269 }
2270
2271 // create the main thread
2272 {
2273 ThreadCreationAttributes threadCreationAttributes(NULL,
2274 parentThread->name, parentThread->priority, NULL, team->id, thread);
2275 threadCreationAttributes.forkArgs = forkArgs;
2276 threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS;
2277 threadID = thread_create_thread(threadCreationAttributes, false);
2278 if (threadID < 0) {
2279 status = threadID;
2280 goto err6;
2281 }
2282 }
2283
2284 // notify the debugger
2285 user_debug_team_created(team->id);
2286
2287 T(TeamForked(threadID));
2288
2289 resume_thread(threadID);
2290 return threadID;
2291
2292 err6:
2293 // Remove the team structure from the process group, the parent team, and
2294 // the team hash table and delete the team structure.
2295 parentTeam->LockTeamAndProcessGroup();
2296 team->Lock();
2297
2298 remove_team_from_group(team);
2299 remove_team_from_parent(team->parent, team);
2300
2301 team->Unlock();
2302 parentTeam->UnlockTeamAndProcessGroup();
2303
2304 {
2305 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
2306 sTeamHash.Remove(team);
2307 if (!teamLimitReached)
2308 sUsedTeams--;
2309 }
2310
2311 sNotificationService.Notify(TEAM_REMOVED, team);
2312 err5:
2313 remove_images(team);
2314 err4:
2315 team->address_space->RemoveAndPut();
2316 err3:
2317 delete_realtime_sem_context(team->realtime_sem_context);
2318 err2:
2319 free(forkArgs);
2320 err1:
2321 team->ReleaseReference();
2322
2323 return status;
2324 }
2325
2326
2327 /*! Returns if the specified team \a parent has any children belonging to the
2328 process group with the specified ID \a groupID.
2329 The caller must hold \a parent's lock.
2330 */
2331 static bool
has_children_in_group(Team * parent,pid_t groupID)2332 has_children_in_group(Team* parent, pid_t groupID)
2333 {
2334 for (Team* child = parent->children; child != NULL;
2335 child = child->siblings_next) {
2336 TeamLocker childLocker(child);
2337 if (child->group_id == groupID)
2338 return true;
2339 }
2340
2341 return false;
2342 }
2343
2344
2345 /*! Returns the first job control entry from \a children, which matches \a id.
2346 \a id can be:
2347 - \code > 0 \endcode: Matching an entry with that team ID.
2348 - \code == -1 \endcode: Matching any entry.
2349 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2350 \c 0 is an invalid value for \a id.
2351
2352 The caller must hold the lock of the team that \a children belongs to.
2353
2354 \param children The job control entry list to check.
2355 \param id The match criterion.
2356 \return The first matching entry or \c NULL, if none matches.
2357 */
2358 static job_control_entry*
get_job_control_entry(team_job_control_children & children,pid_t id)2359 get_job_control_entry(team_job_control_children& children, pid_t id)
2360 {
2361 for (JobControlEntryList::Iterator it = children.entries.GetIterator();
2362 job_control_entry* entry = it.Next();) {
2363
2364 if (id > 0) {
2365 if (entry->thread == id)
2366 return entry;
2367 } else if (id == -1) {
2368 return entry;
2369 } else {
2370 pid_t processGroup
2371 = (entry->team ? entry->team->group_id : entry->group_id);
2372 if (processGroup == -id)
2373 return entry;
2374 }
2375 }
2376
2377 return NULL;
2378 }
2379
2380
2381 /*! Returns the first job control entry from one of team's dead, continued, or
2382 stopped children which matches \a id.
2383 \a id can be:
2384 - \code > 0 \endcode: Matching an entry with that team ID.
2385 - \code == -1 \endcode: Matching any entry.
2386 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2387 \c 0 is an invalid value for \a id.
2388
2389 The caller must hold \a team's lock.
2390
2391 \param team The team whose dead, stopped, and continued child lists shall be
2392 checked.
2393 \param id The match criterion.
2394 \param flags Specifies which children shall be considered. Dead children
2395 are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2396 children are considered when \a flags is ORed bitwise with \c WUNTRACED
2397 or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2398 \c WCONTINUED.
2399 \return The first matching entry or \c NULL, if none matches.
2400 */
2401 static job_control_entry*
get_job_control_entry(Team * team,pid_t id,uint32 flags)2402 get_job_control_entry(Team* team, pid_t id, uint32 flags)
2403 {
2404 job_control_entry* entry = NULL;
2405
2406 if ((flags & WEXITED) != 0)
2407 entry = get_job_control_entry(team->dead_children, id);
2408
2409 if (entry == NULL && (flags & WCONTINUED) != 0)
2410 entry = get_job_control_entry(team->continued_children, id);
2411
2412 if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0)
2413 entry = get_job_control_entry(team->stopped_children, id);
2414
2415 return entry;
2416 }
2417
2418
job_control_entry()2419 job_control_entry::job_control_entry()
2420 :
2421 has_group_ref(false)
2422 {
2423 }
2424
2425
~job_control_entry()2426 job_control_entry::~job_control_entry()
2427 {
2428 if (has_group_ref) {
2429 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
2430
2431 ProcessGroup* group = sGroupHash.Lookup(group_id);
2432 if (group == NULL) {
2433 panic("job_control_entry::~job_control_entry(): unknown group "
2434 "ID: %" B_PRId32, group_id);
2435 return;
2436 }
2437
2438 groupHashLocker.Unlock();
2439
2440 group->ReleaseReference();
2441 }
2442 }
2443
2444
2445 /*! Invoked when the owning team is dying, initializing the entry according to
2446 the dead state.
2447
2448 The caller must hold the owning team's lock and the scheduler lock.
2449 */
2450 void
InitDeadState()2451 job_control_entry::InitDeadState()
2452 {
2453 if (team != NULL) {
2454 ASSERT(team->exit.initialized);
2455
2456 group_id = team->group_id;
2457 team->group->AcquireReference();
2458 has_group_ref = true;
2459
2460 thread = team->id;
2461 status = team->exit.status;
2462 reason = team->exit.reason;
2463 signal = team->exit.signal;
2464 signaling_user = team->exit.signaling_user;
2465 user_time = team->dead_threads_user_time
2466 + team->dead_children.user_time;
2467 kernel_time = team->dead_threads_kernel_time
2468 + team->dead_children.kernel_time;
2469
2470 team = NULL;
2471 }
2472 }
2473
2474
2475 job_control_entry&
operator =(const job_control_entry & other)2476 job_control_entry::operator=(const job_control_entry& other)
2477 {
2478 state = other.state;
2479 thread = other.thread;
2480 signal = other.signal;
2481 has_group_ref = false;
2482 signaling_user = other.signaling_user;
2483 team = other.team;
2484 group_id = other.group_id;
2485 status = other.status;
2486 reason = other.reason;
2487 user_time = other.user_time;
2488 kernel_time = other.kernel_time;
2489
2490 return *this;
2491 }
2492
2493
2494 /*! This is the kernel backend for waitid().
2495 */
2496 static thread_id
wait_for_child(pid_t child,uint32 flags,siginfo_t & _info,team_usage_info & _usage_info)2497 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info,
2498 team_usage_info& _usage_info)
2499 {
2500 Thread* thread = thread_get_current_thread();
2501 Team* team = thread->team;
2502 struct job_control_entry foundEntry;
2503 struct job_control_entry* freeDeathEntry = NULL;
2504 status_t status = B_OK;
2505
2506 TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n",
2507 child, flags));
2508
2509 T(WaitForChild(child, flags));
2510
2511 if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) {
2512 T(WaitForChildDone(B_BAD_VALUE));
2513 return B_BAD_VALUE;
2514 }
2515
2516 pid_t originalChild = child;
2517
2518 bool ignoreFoundEntries = false;
2519 bool ignoreFoundEntriesChecked = false;
2520
2521 while (true) {
2522 // lock the team
2523 TeamLocker teamLocker(team);
2524
2525 // A 0 child argument means to wait for all children in the process
2526 // group of the calling team.
2527 child = originalChild == 0 ? -team->group_id : originalChild;
2528
2529 // check whether any condition holds
2530 job_control_entry* entry = get_job_control_entry(team, child, flags);
2531
2532 // If we don't have an entry yet, check whether there are any children
2533 // complying to the process group specification at all.
2534 if (entry == NULL) {
2535 // No success yet -- check whether there are any children complying
2536 // to the process group specification at all.
2537 bool childrenExist = false;
2538 if (child == -1) {
2539 childrenExist = team->children != NULL;
2540 } else if (child < -1) {
2541 childrenExist = has_children_in_group(team, -child);
2542 } else if (child != team->id) {
2543 if (Team* childTeam = Team::Get(child)) {
2544 BReference<Team> childTeamReference(childTeam, true);
2545 TeamLocker childTeamLocker(childTeam);
2546 childrenExist = childTeam->parent == team;
2547 }
2548 }
2549
2550 if (!childrenExist) {
2551 // there is no child we could wait for
2552 status = ECHILD;
2553 } else {
2554 // the children we're waiting for are still running
2555 status = B_WOULD_BLOCK;
2556 }
2557 } else {
2558 // got something
2559 foundEntry = *entry;
2560
2561 // unless WNOWAIT has been specified, "consume" the wait state
2562 if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) {
2563 if (entry->state == JOB_CONTROL_STATE_DEAD) {
2564 // The child is dead. Reap its death entry.
2565 freeDeathEntry = entry;
2566 team->dead_children.entries.Remove(entry);
2567 team->dead_children.count--;
2568 } else {
2569 // The child is well. Reset its job control state.
2570 team_set_job_control_state(entry->team,
2571 JOB_CONTROL_STATE_NONE, NULL);
2572 }
2573 }
2574 }
2575
2576 // If we haven't got anything yet, prepare for waiting for the
2577 // condition variable.
2578 ConditionVariableEntry deadWaitEntry;
2579
2580 if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0)
2581 team->dead_children.condition_variable.Add(&deadWaitEntry);
2582
2583 teamLocker.Unlock();
2584
2585 // we got our entry and can return to our caller
2586 if (status == B_OK) {
2587 if (ignoreFoundEntries) {
2588 // ... unless we shall ignore found entries
2589 delete freeDeathEntry;
2590 freeDeathEntry = NULL;
2591 continue;
2592 }
2593
2594 break;
2595 }
2596
2597 if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) {
2598 T(WaitForChildDone(status));
2599 return status;
2600 }
2601
2602 status = deadWaitEntry.Wait(B_CAN_INTERRUPT);
2603 if (status == B_INTERRUPTED) {
2604 T(WaitForChildDone(status));
2605 return status;
2606 }
2607
2608 // If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2609 // all our children are dead and fail with ECHILD. We check the
2610 // condition at this point.
2611 if (!ignoreFoundEntriesChecked) {
2612 teamLocker.Lock();
2613
2614 struct sigaction& handler = team->SignalActionFor(SIGCHLD);
2615 if ((handler.sa_flags & SA_NOCLDWAIT) != 0
2616 || handler.sa_handler == SIG_IGN) {
2617 ignoreFoundEntries = true;
2618 }
2619
2620 teamLocker.Unlock();
2621
2622 ignoreFoundEntriesChecked = true;
2623 }
2624 }
2625
2626 delete freeDeathEntry;
2627
2628 // When we got here, we have a valid death entry, and already got
2629 // unregistered from the team or group. Fill in the returned info.
2630 memset(&_info, 0, sizeof(_info));
2631 _info.si_signo = SIGCHLD;
2632 _info.si_pid = foundEntry.thread;
2633 _info.si_uid = foundEntry.signaling_user;
2634 // TODO: Fill in si_errno?
2635
2636 switch (foundEntry.state) {
2637 case JOB_CONTROL_STATE_DEAD:
2638 _info.si_code = foundEntry.reason;
2639 _info.si_status = foundEntry.reason == CLD_EXITED
2640 ? foundEntry.status : foundEntry.signal;
2641 _usage_info.user_time = foundEntry.user_time;
2642 _usage_info.kernel_time = foundEntry.kernel_time;
2643 break;
2644 case JOB_CONTROL_STATE_STOPPED:
2645 _info.si_code = CLD_STOPPED;
2646 _info.si_status = foundEntry.signal;
2647 break;
2648 case JOB_CONTROL_STATE_CONTINUED:
2649 _info.si_code = CLD_CONTINUED;
2650 _info.si_status = 0;
2651 break;
2652 case JOB_CONTROL_STATE_NONE:
2653 // can't happen
2654 break;
2655 }
2656
2657 // If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2658 // status is available.
2659 TeamLocker teamLocker(team);
2660 InterruptsSpinLocker signalLocker(team->signal_lock);
2661 SpinLocker threadCreationLocker(gThreadCreationLock);
2662
2663 if (is_team_signal_blocked(team, SIGCHLD)) {
2664 if (get_job_control_entry(team, child, flags) == NULL)
2665 team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD));
2666 }
2667
2668 threadCreationLocker.Unlock();
2669 signalLocker.Unlock();
2670 teamLocker.Unlock();
2671
2672 // When the team is dead, the main thread continues to live in the kernel
2673 // team for a very short time. To avoid surprises for the caller we rather
2674 // wait until the thread is really gone.
2675 if (foundEntry.state == JOB_CONTROL_STATE_DEAD)
2676 wait_for_thread(foundEntry.thread, NULL);
2677
2678 T(WaitForChildDone(foundEntry));
2679
2680 return foundEntry.thread;
2681 }
2682
2683
2684 /*! Fills the team_info structure with information from the specified team.
2685 Interrupts must be enabled. The team must not be locked.
2686 */
2687 static status_t
fill_team_info(Team * team,team_info * info,size_t size)2688 fill_team_info(Team* team, team_info* info, size_t size)
2689 {
2690 if (size > sizeof(team_info))
2691 return B_BAD_VALUE;
2692
2693 // TODO: Set more informations for team_info
2694 memset(info, 0, size);
2695
2696 info->team = team->id;
2697 // immutable
2698 info->image_count = count_images(team);
2699 // protected by sImageMutex
2700
2701 TeamLocker teamLocker(team);
2702 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
2703
2704 info->thread_count = team->num_threads;
2705 //info->area_count =
2706 info->debugger_nub_thread = team->debug_info.nub_thread;
2707 info->debugger_nub_port = team->debug_info.nub_port;
2708 info->uid = team->effective_uid;
2709 info->gid = team->effective_gid;
2710
2711 strlcpy(info->args, team->Args(), sizeof(info->args));
2712 info->argc = 1;
2713
2714 if (size > offsetof(team_info, real_uid)) {
2715 info->real_uid = team->real_uid;
2716 info->real_gid = team->real_gid;
2717 info->group_id = team->group_id;
2718 info->session_id = team->session_id;
2719
2720 if (team->parent != NULL)
2721 info->parent = team->parent->id;
2722 else
2723 info->parent = -1;
2724
2725 strlcpy(info->name, team->Name(), sizeof(info->name));
2726 info->start_time = team->start_time;
2727 }
2728
2729 return B_OK;
2730 }
2731
2732
2733 /*! Returns whether the process group contains stopped processes.
2734 The caller must hold the process group's lock.
2735 */
2736 static bool
process_group_has_stopped_processes(ProcessGroup * group)2737 process_group_has_stopped_processes(ProcessGroup* group)
2738 {
2739 Team* team = group->teams;
2740 while (team != NULL) {
2741 // the parent team's lock guards the job control entry -- acquire it
2742 team->LockTeamAndParent(false);
2743
2744 if (team->job_control_entry != NULL
2745 && team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) {
2746 team->UnlockTeamAndParent();
2747 return true;
2748 }
2749
2750 team->UnlockTeamAndParent();
2751
2752 team = team->group_next;
2753 }
2754
2755 return false;
2756 }
2757
2758
2759 /*! Iterates through all process groups queued in team_remove_team() and signals
2760 those that are orphaned and have stopped processes.
2761 The caller must not hold any team or process group locks.
2762 */
2763 static void
orphaned_process_group_check()2764 orphaned_process_group_check()
2765 {
2766 // process as long as there are groups in the list
2767 while (true) {
2768 // remove the head from the list
2769 MutexLocker orphanedCheckLocker(sOrphanedCheckLock);
2770
2771 ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead();
2772 if (group == NULL)
2773 return;
2774
2775 group->UnsetOrphanedCheck();
2776 BReference<ProcessGroup> groupReference(group);
2777
2778 orphanedCheckLocker.Unlock();
2779
2780 AutoLocker<ProcessGroup> groupLocker(group);
2781
2782 // If the group is orphaned and contains stopped processes, we're
2783 // supposed to send SIGHUP + SIGCONT.
2784 if (group->IsOrphaned() && process_group_has_stopped_processes(group)) {
2785 Thread* currentThread = thread_get_current_thread();
2786
2787 Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id);
2788 send_signal_to_process_group_locked(group, signal, 0);
2789
2790 signal.SetNumber(SIGCONT);
2791 send_signal_to_process_group_locked(group, signal, 0);
2792 }
2793 }
2794 }
2795
2796
2797 static status_t
common_get_team_usage_info(team_id id,int32 who,team_usage_info * info,uint32 flags)2798 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info,
2799 uint32 flags)
2800 {
2801 if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN)
2802 return B_BAD_VALUE;
2803
2804 // get the team
2805 Team* team = Team::GetAndLock(id);
2806 if (team == NULL)
2807 return B_BAD_TEAM_ID;
2808 BReference<Team> teamReference(team, true);
2809 TeamLocker teamLocker(team, true);
2810
2811 if ((flags & B_CHECK_PERMISSION) != 0) {
2812 uid_t uid = geteuid();
2813 if (uid != 0 && uid != team->effective_uid)
2814 return B_NOT_ALLOWED;
2815 }
2816
2817 bigtime_t kernelTime = 0;
2818 bigtime_t userTime = 0;
2819
2820 switch (who) {
2821 case B_TEAM_USAGE_SELF:
2822 {
2823 Thread* thread = team->thread_list;
2824
2825 for (; thread != NULL; thread = thread->team_next) {
2826 InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2827 kernelTime += thread->kernel_time;
2828 userTime += thread->user_time;
2829 }
2830
2831 kernelTime += team->dead_threads_kernel_time;
2832 userTime += team->dead_threads_user_time;
2833 break;
2834 }
2835
2836 case B_TEAM_USAGE_CHILDREN:
2837 {
2838 Team* child = team->children;
2839 for (; child != NULL; child = child->siblings_next) {
2840 TeamLocker childLocker(child);
2841
2842 Thread* thread = team->thread_list;
2843
2844 for (; thread != NULL; thread = thread->team_next) {
2845 InterruptsSpinLocker threadTimeLocker(thread->time_lock);
2846 kernelTime += thread->kernel_time;
2847 userTime += thread->user_time;
2848 }
2849
2850 kernelTime += child->dead_threads_kernel_time;
2851 userTime += child->dead_threads_user_time;
2852 }
2853
2854 kernelTime += team->dead_children.kernel_time;
2855 userTime += team->dead_children.user_time;
2856 break;
2857 }
2858 }
2859
2860 info->kernel_time = kernelTime;
2861 info->user_time = userTime;
2862
2863 return B_OK;
2864 }
2865
2866
2867 // #pragma mark - Private kernel API
2868
2869
2870 status_t
team_init(kernel_args * args)2871 team_init(kernel_args* args)
2872 {
2873 // create the team hash table
2874 new(&sTeamHash) TeamTable;
2875 if (sTeamHash.Init(64) != B_OK)
2876 panic("Failed to init team hash table!");
2877
2878 new(&sGroupHash) ProcessGroupHashTable;
2879 if (sGroupHash.Init() != B_OK)
2880 panic("Failed to init process group hash table!");
2881
2882 // create initial session and process groups
2883
2884 ProcessSession* session = new(std::nothrow) ProcessSession(1);
2885 if (session == NULL)
2886 panic("Could not create initial session.\n");
2887 BReference<ProcessSession> sessionReference(session, true);
2888
2889 ProcessGroup* group = new(std::nothrow) ProcessGroup(1);
2890 if (group == NULL)
2891 panic("Could not create initial process group.\n");
2892 BReference<ProcessGroup> groupReference(group, true);
2893
2894 group->Publish(session);
2895
2896 // create the kernel team
2897 sKernelTeam = Team::Create(1, "kernel_team", true);
2898 if (sKernelTeam == NULL)
2899 panic("could not create kernel team!\n");
2900
2901 sKernelTeam->address_space = VMAddressSpace::Kernel();
2902 sKernelTeam->SetArgs(sKernelTeam->Name());
2903 sKernelTeam->state = TEAM_STATE_NORMAL;
2904
2905 sKernelTeam->saved_set_uid = 0;
2906 sKernelTeam->real_uid = 0;
2907 sKernelTeam->effective_uid = 0;
2908 sKernelTeam->saved_set_gid = 0;
2909 sKernelTeam->real_gid = 0;
2910 sKernelTeam->effective_gid = 0;
2911 sKernelTeam->supplementary_groups = NULL;
2912
2913 insert_team_into_group(group, sKernelTeam);
2914
2915 sKernelTeam->io_context = vfs_new_io_context(NULL, false);
2916 if (sKernelTeam->io_context == NULL)
2917 panic("could not create io_context for kernel team!\n");
2918
2919 if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK)
2920 dprintf("Failed to resize FD table for kernel team!\n");
2921
2922 // stick it in the team hash
2923 sTeamHash.Insert(sKernelTeam);
2924
2925 // check safe mode settings
2926 sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS,
2927 false);
2928
2929 add_debugger_command_etc("team", &dump_team_info,
2930 "Dump info about a particular team",
2931 "[ <id> | <address> | <name> ]\n"
2932 "Prints information about the specified team. If no argument is given\n"
2933 "the current team is selected.\n"
2934 " <id> - The ID of the team.\n"
2935 " <address> - The address of the team structure.\n"
2936 " <name> - The team's name.\n", 0);
2937 add_debugger_command_etc("teams", &dump_teams, "List all teams",
2938 "\n"
2939 "Prints a list of all existing teams.\n", 0);
2940
2941 new(&sNotificationService) TeamNotificationService();
2942
2943 sNotificationService.Register();
2944
2945 return B_OK;
2946 }
2947
2948
2949 int32
team_max_teams(void)2950 team_max_teams(void)
2951 {
2952 return sMaxTeams;
2953 }
2954
2955
2956 int32
team_used_teams(void)2957 team_used_teams(void)
2958 {
2959 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
2960 return sUsedTeams;
2961 }
2962
2963
2964 /*! Returns a death entry of a child team specified by ID (if any).
2965 The caller must hold the team's lock.
2966
2967 \param team The team whose dead children list to check.
2968 \param child The ID of the child for whose death entry to lock. Must be > 0.
2969 \param _deleteEntry Return variable, indicating whether the caller needs to
2970 delete the returned entry.
2971 \return The death entry of the matching team, or \c NULL, if no death entry
2972 for the team was found.
2973 */
2974 job_control_entry*
team_get_death_entry(Team * team,thread_id child,bool * _deleteEntry)2975 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry)
2976 {
2977 if (child <= 0)
2978 return NULL;
2979
2980 job_control_entry* entry = get_job_control_entry(team->dead_children,
2981 child);
2982 if (entry) {
2983 // remove the entry only, if the caller is the parent of the found team
2984 if (team_get_current_team_id() == entry->thread) {
2985 team->dead_children.entries.Remove(entry);
2986 team->dead_children.count--;
2987 *_deleteEntry = true;
2988 } else {
2989 *_deleteEntry = false;
2990 }
2991 }
2992
2993 return entry;
2994 }
2995
2996
2997 /*! Quick check to see if we have a valid team ID. */
2998 bool
team_is_valid(team_id id)2999 team_is_valid(team_id id)
3000 {
3001 if (id <= 0)
3002 return false;
3003
3004 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3005 return team_get_team_struct_locked(id) != NULL;
3006 }
3007
3008
3009 Team*
team_get_team_struct_locked(team_id id)3010 team_get_team_struct_locked(team_id id)
3011 {
3012 return sTeamHash.Lookup(id);
3013 }
3014
3015
3016 void
team_set_controlling_tty(void * tty)3017 team_set_controlling_tty(void* tty)
3018 {
3019 // lock the team, so its session won't change while we're playing with it
3020 Team* team = thread_get_current_thread()->team;
3021 TeamLocker teamLocker(team);
3022
3023 // get and lock the session
3024 ProcessSession* session = team->group->Session();
3025 AutoLocker<ProcessSession> sessionLocker(session);
3026
3027 // set the session's fields
3028 session->controlling_tty = tty;
3029 session->foreground_group = -1;
3030 }
3031
3032
3033 void*
team_get_controlling_tty()3034 team_get_controlling_tty()
3035 {
3036 // lock the team, so its session won't change while we're playing with it
3037 Team* team = thread_get_current_thread()->team;
3038 TeamLocker teamLocker(team);
3039
3040 // get and lock the session
3041 ProcessSession* session = team->group->Session();
3042 AutoLocker<ProcessSession> sessionLocker(session);
3043
3044 // get the session's field
3045 return session->controlling_tty;
3046 }
3047
3048
3049 status_t
team_set_foreground_process_group(void * tty,pid_t processGroupID)3050 team_set_foreground_process_group(void* tty, pid_t processGroupID)
3051 {
3052 // lock the team, so its session won't change while we're playing with it
3053 Thread* thread = thread_get_current_thread();
3054 Team* team = thread->team;
3055 TeamLocker teamLocker(team);
3056
3057 // get and lock the session
3058 ProcessSession* session = team->group->Session();
3059 AutoLocker<ProcessSession> sessionLocker(session);
3060
3061 // check given TTY -- must be the controlling tty of the calling process
3062 if (session->controlling_tty != tty)
3063 return ENOTTY;
3064
3065 // check given process group -- must belong to our session
3066 {
3067 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
3068 ProcessGroup* group = sGroupHash.Lookup(processGroupID);
3069 if (group == NULL || group->Session() != session)
3070 return B_BAD_VALUE;
3071 }
3072
3073 // If we are a background group, we can do that unharmed only when we
3074 // ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
3075 if (session->foreground_group != -1
3076 && session->foreground_group != team->group_id
3077 && team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN
3078 && (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) {
3079 InterruptsSpinLocker signalLocker(team->signal_lock);
3080
3081 if (!is_team_signal_blocked(team, SIGTTOU)) {
3082 pid_t groupID = team->group_id;
3083
3084 signalLocker.Unlock();
3085 sessionLocker.Unlock();
3086 teamLocker.Unlock();
3087
3088 Signal signal(SIGTTOU, SI_USER, B_OK, team->id);
3089 send_signal_to_process_group(groupID, signal, 0);
3090 return B_INTERRUPTED;
3091 }
3092 }
3093
3094 session->foreground_group = processGroupID;
3095
3096 return B_OK;
3097 }
3098
3099
3100 uid_t
team_geteuid(team_id id)3101 team_geteuid(team_id id)
3102 {
3103 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3104 Team* team = team_get_team_struct_locked(id);
3105 if (team == NULL)
3106 return (uid_t)-1;
3107 return team->effective_uid;
3108 }
3109
3110
3111 /*! Removes the specified team from the global team hash, from its process
3112 group, and from its parent.
3113 It also moves all of its children to the kernel team.
3114
3115 The caller must hold the following locks:
3116 - \a team's process group's lock,
3117 - the kernel team's lock,
3118 - \a team's parent team's lock (might be the kernel team), and
3119 - \a team's lock.
3120 */
3121 void
team_remove_team(Team * team,pid_t & _signalGroup)3122 team_remove_team(Team* team, pid_t& _signalGroup)
3123 {
3124 Team* parent = team->parent;
3125
3126 // remember how long this team lasted
3127 parent->dead_children.kernel_time += team->dead_threads_kernel_time
3128 + team->dead_children.kernel_time;
3129 parent->dead_children.user_time += team->dead_threads_user_time
3130 + team->dead_children.user_time;
3131
3132 // remove the team from the hash table
3133 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock);
3134 sTeamHash.Remove(team);
3135 sUsedTeams--;
3136 teamsLocker.Unlock();
3137
3138 // The team can no longer be accessed by ID. Navigation to it is still
3139 // possible from its process group and its parent and children, but that
3140 // will be rectified shortly.
3141 team->state = TEAM_STATE_DEATH;
3142
3143 // If we're a controlling process (i.e. a session leader with controlling
3144 // terminal), there's a bit of signalling we have to do. We can't do any of
3145 // the signaling here due to the bunch of locks we're holding, but we need
3146 // to determine, whom to signal.
3147 _signalGroup = -1;
3148 bool isSessionLeader = false;
3149 if (team->session_id == team->id
3150 && team->group->Session()->controlling_tty != NULL) {
3151 isSessionLeader = true;
3152
3153 ProcessSession* session = team->group->Session();
3154
3155 AutoLocker<ProcessSession> sessionLocker(session);
3156
3157 session->controlling_tty = NULL;
3158 _signalGroup = session->foreground_group;
3159 }
3160
3161 // remove us from our process group
3162 remove_team_from_group(team);
3163
3164 // move the team's children to the kernel team
3165 while (Team* child = team->children) {
3166 // remove the child from the current team and add it to the kernel team
3167 TeamLocker childLocker(child);
3168
3169 remove_team_from_parent(team, child);
3170 insert_team_into_parent(sKernelTeam, child);
3171
3172 // move job control entries too
3173 sKernelTeam->stopped_children.entries.MoveFrom(
3174 &team->stopped_children.entries);
3175 sKernelTeam->continued_children.entries.MoveFrom(
3176 &team->continued_children.entries);
3177
3178 // If the team was a session leader with controlling terminal,
3179 // we need to send SIGHUP + SIGCONT to all newly-orphaned process
3180 // groups with stopped processes. Due to locking complications we can't
3181 // do that here, so we only check whether we were a reason for the
3182 // child's process group not being an orphan and, if so, schedule a
3183 // later check (cf. orphaned_process_group_check()).
3184 if (isSessionLeader) {
3185 ProcessGroup* childGroup = child->group;
3186 if (childGroup->Session()->id == team->session_id
3187 && childGroup->id != team->group_id) {
3188 childGroup->ScheduleOrphanedCheck();
3189 }
3190 }
3191
3192 // Note, we don't move the dead children entries. Those will be deleted
3193 // when the team structure is deleted.
3194 }
3195
3196 // remove us from our parent
3197 remove_team_from_parent(parent, team);
3198 }
3199
3200
3201 /*! Kills all threads but the main thread of the team and shuts down user
3202 debugging for it.
3203 To be called on exit of the team's main thread. No locks must be held.
3204
3205 \param team The team in question.
3206 \return The port of the debugger for the team, -1 if none. To be passed to
3207 team_delete_team().
3208 */
3209 port_id
team_shutdown_team(Team * team)3210 team_shutdown_team(Team* team)
3211 {
3212 ASSERT(thread_get_current_thread() == team->main_thread);
3213
3214 TeamLocker teamLocker(team);
3215
3216 // Make sure debugging changes won't happen anymore.
3217 port_id debuggerPort = -1;
3218 while (true) {
3219 // If a debugger change is in progress for the team, we'll have to
3220 // wait until it is done.
3221 ConditionVariableEntry waitForDebuggerEntry;
3222 bool waitForDebugger = false;
3223
3224 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock);
3225
3226 if (team->debug_info.debugger_changed_condition != NULL) {
3227 team->debug_info.debugger_changed_condition->Add(
3228 &waitForDebuggerEntry);
3229 waitForDebugger = true;
3230 } else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
3231 // The team is being debugged. That will stop with the termination
3232 // of the nub thread. Since we set the team state to death, no one
3233 // can install a debugger anymore. We fetch the debugger's port to
3234 // send it a message at the bitter end.
3235 debuggerPort = team->debug_info.debugger_port;
3236 }
3237
3238 debugInfoLocker.Unlock();
3239
3240 if (!waitForDebugger)
3241 break;
3242
3243 // wait for the debugger change to be finished
3244 teamLocker.Unlock();
3245
3246 waitForDebuggerEntry.Wait();
3247
3248 teamLocker.Lock();
3249 }
3250
3251 // Mark the team as shutting down. That will prevent new threads from being
3252 // created and debugger changes from taking place.
3253 team->state = TEAM_STATE_SHUTDOWN;
3254
3255 // delete all timers
3256 team->DeleteUserTimers(false);
3257
3258 // deactivate CPU time user timers for the team
3259 InterruptsSpinLocker timeLocker(team->time_lock);
3260
3261 if (team->HasActiveCPUTimeUserTimers())
3262 team->DeactivateCPUTimeUserTimers();
3263
3264 timeLocker.Unlock();
3265
3266 // kill all threads but the main thread
3267 team_death_entry deathEntry;
3268 deathEntry.condition.Init(team, "team death");
3269
3270 while (true) {
3271 team->death_entry = &deathEntry;
3272 deathEntry.remaining_threads = 0;
3273
3274 Thread* thread = team->thread_list;
3275 while (thread != NULL) {
3276 if (thread != team->main_thread) {
3277 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3278 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3279 deathEntry.remaining_threads++;
3280 }
3281
3282 thread = thread->team_next;
3283 }
3284
3285 if (deathEntry.remaining_threads == 0)
3286 break;
3287
3288 // there are threads to wait for
3289 ConditionVariableEntry entry;
3290 deathEntry.condition.Add(&entry);
3291
3292 teamLocker.Unlock();
3293
3294 entry.Wait();
3295
3296 teamLocker.Lock();
3297 }
3298
3299 team->death_entry = NULL;
3300
3301 return debuggerPort;
3302 }
3303
3304
3305 /*! Called on team exit to notify threads waiting on the team and free most
3306 resources associated with it.
3307 The caller shouldn't hold any locks.
3308 */
3309 void
team_delete_team(Team * team,port_id debuggerPort)3310 team_delete_team(Team* team, port_id debuggerPort)
3311 {
3312 // Not quite in our job description, but work that has been left by
3313 // team_remove_team() and that can be done now that we're not holding any
3314 // locks.
3315 orphaned_process_group_check();
3316
3317 team_id teamID = team->id;
3318
3319 ASSERT(team->num_threads == 0);
3320
3321 // If someone is waiting for this team to be loaded, but it dies
3322 // unexpectedly before being done, we need to notify the waiting
3323 // thread now.
3324
3325 TeamLocker teamLocker(team);
3326
3327 if (team->loading_info != NULL) {
3328 // there's indeed someone waiting
3329 team->loading_info->result = B_ERROR;
3330
3331 // wake up the waiting thread
3332 team->loading_info->condition.NotifyAll();
3333 team->loading_info = NULL;
3334 }
3335
3336 // notify team watchers
3337
3338 {
3339 // we're not reachable from anyone anymore at this point, so we
3340 // can safely access the list without any locking
3341 struct team_watcher* watcher;
3342 while ((watcher = (struct team_watcher*)list_remove_head_item(
3343 &team->watcher_list)) != NULL) {
3344 watcher->hook(teamID, watcher->data);
3345 free(watcher);
3346 }
3347 }
3348
3349 // get team exit information
3350 status_t exitStatus = -1;
3351 int signal = -1;
3352
3353 switch (team->exit.reason) {
3354 case CLD_EXITED:
3355 exitStatus = team->exit.status;
3356 break;
3357 case CLD_KILLED:
3358 signal = team->exit.signal;
3359 break;
3360 }
3361
3362 teamLocker.Unlock();
3363
3364 sNotificationService.Notify(TEAM_REMOVED, team);
3365
3366 // get team usage information
3367 InterruptsSpinLocker timeLocker(team->time_lock);
3368
3369 team_usage_info usageInfo;
3370 usageInfo.kernel_time = team->dead_threads_kernel_time;
3371 usageInfo.user_time = team->dead_threads_user_time;
3372
3373 timeLocker.Unlock();
3374
3375 // free team resources
3376
3377 delete_user_mutex_context(team->user_mutex_context);
3378 delete_realtime_sem_context(team->realtime_sem_context);
3379 xsi_sem_undo(team);
3380 remove_images(team);
3381 team->address_space->RemoveAndPut();
3382
3383 team->ReleaseReference();
3384
3385 // notify the debugger, that the team is gone
3386 user_debug_team_deleted(teamID, debuggerPort, exitStatus, signal, &usageInfo);
3387 }
3388
3389
3390 Team*
team_get_kernel_team(void)3391 team_get_kernel_team(void)
3392 {
3393 return sKernelTeam;
3394 }
3395
3396
3397 team_id
team_get_kernel_team_id(void)3398 team_get_kernel_team_id(void)
3399 {
3400 if (!sKernelTeam)
3401 return 0;
3402
3403 return sKernelTeam->id;
3404 }
3405
3406
3407 team_id
team_get_current_team_id(void)3408 team_get_current_team_id(void)
3409 {
3410 return thread_get_current_thread()->team->id;
3411 }
3412
3413
3414 status_t
team_get_address_space(team_id id,VMAddressSpace ** _addressSpace)3415 team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
3416 {
3417 if (id == sKernelTeam->id) {
3418 // we're the kernel team, so we don't have to go through all
3419 // the hassle (locking and hash lookup)
3420 *_addressSpace = VMAddressSpace::GetKernel();
3421 return B_OK;
3422 }
3423
3424 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3425
3426 Team* team = team_get_team_struct_locked(id);
3427 if (team == NULL)
3428 return B_BAD_VALUE;
3429
3430 team->address_space->Get();
3431 *_addressSpace = team->address_space;
3432 return B_OK;
3433 }
3434
3435
3436 /*! Sets the team's job control state.
3437 The caller must hold the parent team's lock. Interrupts are allowed to be
3438 enabled or disabled.
3439 \a team The team whose job control state shall be set.
3440 \a newState The new state to be set.
3441 \a signal The signal the new state was caused by. Can \c NULL, if none. Then
3442 the caller is responsible for filling in the following fields of the
3443 entry before releasing the parent team's lock, unless the new state is
3444 \c JOB_CONTROL_STATE_NONE:
3445 - \c signal: The number of the signal causing the state change.
3446 - \c signaling_user: The real UID of the user sending the signal.
3447 */
3448 void
team_set_job_control_state(Team * team,job_control_state newState,Signal * signal)3449 team_set_job_control_state(Team* team, job_control_state newState,
3450 Signal* signal)
3451 {
3452 if (team == NULL || team->job_control_entry == NULL)
3453 return;
3454
3455 // don't touch anything, if the state stays the same or the team is already
3456 // dead
3457 job_control_entry* entry = team->job_control_entry;
3458 if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD)
3459 return;
3460
3461 T(SetJobControlState(team->id, newState, signal));
3462
3463 // remove from the old list
3464 switch (entry->state) {
3465 case JOB_CONTROL_STATE_NONE:
3466 // entry is in no list ATM
3467 break;
3468 case JOB_CONTROL_STATE_DEAD:
3469 // can't get here
3470 break;
3471 case JOB_CONTROL_STATE_STOPPED:
3472 team->parent->stopped_children.entries.Remove(entry);
3473 break;
3474 case JOB_CONTROL_STATE_CONTINUED:
3475 team->parent->continued_children.entries.Remove(entry);
3476 break;
3477 }
3478
3479 entry->state = newState;
3480
3481 if (signal != NULL) {
3482 entry->signal = signal->Number();
3483 entry->signaling_user = signal->SendingUser();
3484 }
3485
3486 // add to new list
3487 team_job_control_children* childList = NULL;
3488 switch (entry->state) {
3489 case JOB_CONTROL_STATE_NONE:
3490 // entry doesn't get into any list
3491 break;
3492 case JOB_CONTROL_STATE_DEAD:
3493 childList = &team->parent->dead_children;
3494 team->parent->dead_children.count++;
3495 break;
3496 case JOB_CONTROL_STATE_STOPPED:
3497 childList = &team->parent->stopped_children;
3498 break;
3499 case JOB_CONTROL_STATE_CONTINUED:
3500 childList = &team->parent->continued_children;
3501 break;
3502 }
3503
3504 if (childList != NULL) {
3505 childList->entries.Add(entry);
3506 team->parent->dead_children.condition_variable.NotifyAll();
3507 }
3508 }
3509
3510
3511 /*! Inits the given team's exit information, if not yet initialized, to some
3512 generic "killed" status.
3513 The caller must not hold the team's lock. Interrupts must be enabled.
3514
3515 \param team The team whose exit info shall be initialized.
3516 */
3517 void
team_init_exit_info_on_error(Team * team)3518 team_init_exit_info_on_error(Team* team)
3519 {
3520 TeamLocker teamLocker(team);
3521
3522 if (!team->exit.initialized) {
3523 team->exit.reason = CLD_KILLED;
3524 team->exit.signal = SIGKILL;
3525 team->exit.signaling_user = geteuid();
3526 team->exit.status = 0;
3527 team->exit.initialized = true;
3528 }
3529 }
3530
3531
3532 /*! Adds a hook to the team that is called as soon as this team goes away.
3533 This call might get public in the future.
3534 */
3535 status_t
start_watching_team(team_id teamID,void (* hook)(team_id,void *),void * data)3536 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3537 {
3538 if (hook == NULL || teamID < B_OK)
3539 return B_BAD_VALUE;
3540
3541 // create the watcher object
3542 team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher));
3543 if (watcher == NULL)
3544 return B_NO_MEMORY;
3545
3546 watcher->hook = hook;
3547 watcher->data = data;
3548
3549 // add watcher, if the team isn't already dying
3550 // get the team
3551 Team* team = Team::GetAndLock(teamID);
3552 if (team == NULL) {
3553 free(watcher);
3554 return B_BAD_TEAM_ID;
3555 }
3556
3557 list_add_item(&team->watcher_list, watcher);
3558
3559 team->UnlockAndReleaseReference();
3560
3561 return B_OK;
3562 }
3563
3564
3565 status_t
stop_watching_team(team_id teamID,void (* hook)(team_id,void *),void * data)3566 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data)
3567 {
3568 if (hook == NULL || teamID < 0)
3569 return B_BAD_VALUE;
3570
3571 // get team and remove watcher (if present)
3572 Team* team = Team::GetAndLock(teamID);
3573 if (team == NULL)
3574 return B_BAD_TEAM_ID;
3575
3576 // search for watcher
3577 team_watcher* watcher = NULL;
3578 while ((watcher = (team_watcher*)list_get_next_item(
3579 &team->watcher_list, watcher)) != NULL) {
3580 if (watcher->hook == hook && watcher->data == data) {
3581 // got it!
3582 list_remove_item(&team->watcher_list, watcher);
3583 break;
3584 }
3585 }
3586
3587 team->UnlockAndReleaseReference();
3588
3589 if (watcher == NULL)
3590 return B_ENTRY_NOT_FOUND;
3591
3592 free(watcher);
3593 return B_OK;
3594 }
3595
3596
3597 /*! Allocates a user_thread structure from the team.
3598 The team lock must be held, unless the function is called for the team's
3599 main thread. Interrupts must be enabled.
3600 */
3601 struct user_thread*
team_allocate_user_thread(Team * team)3602 team_allocate_user_thread(Team* team)
3603 {
3604 if (team->user_data == 0)
3605 return NULL;
3606
3607 // take an entry from the free list, if any
3608 if (struct free_user_thread* entry = team->free_user_threads) {
3609 user_thread* thread = entry->thread;
3610 team->free_user_threads = entry->next;
3611 free(entry);
3612 return thread;
3613 }
3614
3615 while (true) {
3616 // enough space left?
3617 size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
3618 if (team->user_data_size - team->used_user_data < needed) {
3619 // try to resize the area
3620 if (resize_area(team->user_data_area,
3621 team->user_data_size + B_PAGE_SIZE) != B_OK) {
3622 return NULL;
3623 }
3624
3625 // resized user area successfully -- try to allocate the user_thread
3626 // again
3627 team->user_data_size += B_PAGE_SIZE;
3628 continue;
3629 }
3630
3631 // allocate the user_thread
3632 user_thread* thread
3633 = (user_thread*)(team->user_data + team->used_user_data);
3634 team->used_user_data += needed;
3635
3636 return thread;
3637 }
3638 }
3639
3640
3641 /*! Frees the given user_thread structure.
3642 The team's lock must not be held. Interrupts must be enabled.
3643 \param team The team the user thread was allocated from.
3644 \param userThread The user thread to free.
3645 */
3646 void
team_free_user_thread(Team * team,struct user_thread * userThread)3647 team_free_user_thread(Team* team, struct user_thread* userThread)
3648 {
3649 if (userThread == NULL)
3650 return;
3651
3652 // create a free list entry
3653 free_user_thread* entry
3654 = (free_user_thread*)malloc(sizeof(free_user_thread));
3655 if (entry == NULL) {
3656 // we have to leak the user thread :-/
3657 return;
3658 }
3659
3660 // add to free list
3661 TeamLocker teamLocker(team);
3662
3663 entry->thread = userThread;
3664 entry->next = team->free_user_threads;
3665 team->free_user_threads = entry;
3666 }
3667
3668
3669 // #pragma mark - Associated data interface
3670
3671
AssociatedData()3672 AssociatedData::AssociatedData()
3673 :
3674 fOwner(NULL)
3675 {
3676 }
3677
3678
~AssociatedData()3679 AssociatedData::~AssociatedData()
3680 {
3681 }
3682
3683
3684 void
OwnerDeleted(AssociatedDataOwner * owner)3685 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner)
3686 {
3687 }
3688
3689
AssociatedDataOwner()3690 AssociatedDataOwner::AssociatedDataOwner()
3691 {
3692 mutex_init(&fLock, "associated data owner");
3693 }
3694
3695
~AssociatedDataOwner()3696 AssociatedDataOwner::~AssociatedDataOwner()
3697 {
3698 mutex_destroy(&fLock);
3699 }
3700
3701
3702 bool
AddData(AssociatedData * data)3703 AssociatedDataOwner::AddData(AssociatedData* data)
3704 {
3705 MutexLocker locker(fLock);
3706
3707 if (data->Owner() != NULL)
3708 return false;
3709
3710 data->AcquireReference();
3711 fList.Add(data);
3712 data->SetOwner(this);
3713
3714 return true;
3715 }
3716
3717
3718 bool
RemoveData(AssociatedData * data)3719 AssociatedDataOwner::RemoveData(AssociatedData* data)
3720 {
3721 MutexLocker locker(fLock);
3722
3723 if (data->Owner() != this)
3724 return false;
3725
3726 data->SetOwner(NULL);
3727 fList.Remove(data);
3728
3729 locker.Unlock();
3730
3731 data->ReleaseReference();
3732
3733 return true;
3734 }
3735
3736
3737 void
PrepareForDeletion()3738 AssociatedDataOwner::PrepareForDeletion()
3739 {
3740 MutexLocker locker(fLock);
3741
3742 // move all data to a temporary list and unset the owner
3743 DataList list;
3744 list.MoveFrom(&fList);
3745
3746 for (DataList::Iterator it = list.GetIterator();
3747 AssociatedData* data = it.Next();) {
3748 data->SetOwner(NULL);
3749 }
3750
3751 locker.Unlock();
3752
3753 // call the notification hooks and release our references
3754 while (AssociatedData* data = list.RemoveHead()) {
3755 data->OwnerDeleted(this);
3756 data->ReleaseReference();
3757 }
3758 }
3759
3760
3761 /*! Associates data with the current team.
3762 When the team is deleted, the data object is notified.
3763 The team acquires a reference to the object.
3764
3765 \param data The data object.
3766 \return \c true on success, \c false otherwise. Fails only when the supplied
3767 data object is already associated with another owner.
3768 */
3769 bool
team_associate_data(AssociatedData * data)3770 team_associate_data(AssociatedData* data)
3771 {
3772 return thread_get_current_thread()->team->AddData(data);
3773 }
3774
3775
3776 /*! Dissociates data from the current team.
3777 Balances an earlier call to team_associate_data().
3778
3779 \param data The data object.
3780 \return \c true on success, \c false otherwise. Fails only when the data
3781 object is not associated with the current team.
3782 */
3783 bool
team_dissociate_data(AssociatedData * data)3784 team_dissociate_data(AssociatedData* data)
3785 {
3786 return thread_get_current_thread()->team->RemoveData(data);
3787 }
3788
3789
3790 // #pragma mark - Public kernel API
3791
3792
3793 thread_id
load_image(int32 argCount,const char ** args,const char ** env)3794 load_image(int32 argCount, const char** args, const char** env)
3795 {
3796 return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY,
3797 B_CURRENT_TEAM, B_WAIT_TILL_LOADED);
3798 }
3799
3800
3801 thread_id
load_image_etc(int32 argCount,const char * const * args,const char * const * env,int32 priority,team_id parentID,uint32 flags)3802 load_image_etc(int32 argCount, const char* const* args,
3803 const char* const* env, int32 priority, team_id parentID, uint32 flags)
3804 {
3805 // we need to flatten the args and environment
3806
3807 if (args == NULL)
3808 return B_BAD_VALUE;
3809
3810 // determine total needed size
3811 int32 argSize = 0;
3812 for (int32 i = 0; i < argCount; i++)
3813 argSize += strlen(args[i]) + 1;
3814
3815 int32 envCount = 0;
3816 int32 envSize = 0;
3817 while (env != NULL && env[envCount] != NULL)
3818 envSize += strlen(env[envCount++]) + 1;
3819
3820 int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize;
3821 if (size > MAX_PROCESS_ARGS_SIZE)
3822 return B_TOO_MANY_ARGS;
3823
3824 // allocate space
3825 char** flatArgs = (char**)malloc(size);
3826 if (flatArgs == NULL)
3827 return B_NO_MEMORY;
3828
3829 char** slot = flatArgs;
3830 char* stringSpace = (char*)(flatArgs + argCount + envCount + 2);
3831
3832 // copy arguments and environment
3833 for (int32 i = 0; i < argCount; i++) {
3834 int32 argSize = strlen(args[i]) + 1;
3835 memcpy(stringSpace, args[i], argSize);
3836 *slot++ = stringSpace;
3837 stringSpace += argSize;
3838 }
3839
3840 *slot++ = NULL;
3841
3842 for (int32 i = 0; i < envCount; i++) {
3843 int32 envSize = strlen(env[i]) + 1;
3844 memcpy(stringSpace, env[i], envSize);
3845 *slot++ = stringSpace;
3846 stringSpace += envSize;
3847 }
3848
3849 *slot++ = NULL;
3850
3851 thread_id thread = load_image_internal(flatArgs, size, argCount, envCount,
3852 B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0);
3853
3854 free(flatArgs);
3855 // load_image_internal() unset our variable if it took over ownership
3856
3857 return thread;
3858 }
3859
3860
3861 status_t
wait_for_team(team_id id,status_t * _returnCode)3862 wait_for_team(team_id id, status_t* _returnCode)
3863 {
3864 // check whether the team exists
3865 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3866
3867 Team* team = team_get_team_struct_locked(id);
3868 if (team == NULL)
3869 return B_BAD_TEAM_ID;
3870
3871 id = team->id;
3872
3873 teamsLocker.Unlock();
3874
3875 // wait for the main thread (it has the same ID as the team)
3876 return wait_for_thread(id, _returnCode);
3877 }
3878
3879
3880 status_t
kill_team(team_id id)3881 kill_team(team_id id)
3882 {
3883 InterruptsReadSpinLocker teamsLocker(sTeamHashLock);
3884
3885 Team* team = team_get_team_struct_locked(id);
3886 if (team == NULL)
3887 return B_BAD_TEAM_ID;
3888
3889 id = team->id;
3890
3891 teamsLocker.Unlock();
3892
3893 if (team == sKernelTeam)
3894 return B_NOT_ALLOWED;
3895
3896 // Just kill the team's main thread (it has same ID as the team). The
3897 // cleanup code there will take care of the team.
3898 return kill_thread(id);
3899 }
3900
3901
3902 status_t
_get_team_info(team_id id,team_info * info,size_t size)3903 _get_team_info(team_id id, team_info* info, size_t size)
3904 {
3905 // get the team
3906 Team* team = Team::Get(id);
3907 if (team == NULL)
3908 return B_BAD_TEAM_ID;
3909 BReference<Team> teamReference(team, true);
3910
3911 // fill in the info
3912 return fill_team_info(team, info, size);
3913 }
3914
3915
3916 status_t
_get_next_team_info(int32 * cookie,team_info * info,size_t size)3917 _get_next_team_info(int32* cookie, team_info* info, size_t size)
3918 {
3919 int32 slot = *cookie;
3920 if (slot < 1)
3921 slot = 1;
3922
3923 InterruptsReadSpinLocker locker(sTeamHashLock);
3924
3925 team_id lastTeamID = peek_next_thread_id();
3926 // TODO: This is broken, since the id can wrap around!
3927
3928 // get next valid team
3929 Team* team = NULL;
3930 while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot)))
3931 slot++;
3932
3933 if (team == NULL)
3934 return B_BAD_TEAM_ID;
3935
3936 // get a reference to the team and unlock
3937 BReference<Team> teamReference(team);
3938 locker.Unlock();
3939
3940 // fill in the info
3941 *cookie = ++slot;
3942 return fill_team_info(team, info, size);
3943 }
3944
3945
3946 status_t
_get_team_usage_info(team_id id,int32 who,team_usage_info * info,size_t size)3947 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size)
3948 {
3949 if (size != sizeof(team_usage_info))
3950 return B_BAD_VALUE;
3951
3952 return common_get_team_usage_info(id, who, info, 0);
3953 }
3954
3955
3956 pid_t
getpid(void)3957 getpid(void)
3958 {
3959 return thread_get_current_thread()->team->id;
3960 }
3961
3962
3963 pid_t
getppid()3964 getppid()
3965 {
3966 return _getppid(0);
3967 }
3968
3969
3970 pid_t
getpgid(pid_t id)3971 getpgid(pid_t id)
3972 {
3973 if (id < 0) {
3974 errno = EINVAL;
3975 return -1;
3976 }
3977
3978 if (id == 0) {
3979 // get process group of the calling process
3980 Team* team = thread_get_current_thread()->team;
3981 TeamLocker teamLocker(team);
3982 return team->group_id;
3983 }
3984
3985 // get the team
3986 Team* team = Team::GetAndLock(id);
3987 if (team == NULL) {
3988 errno = ESRCH;
3989 return -1;
3990 }
3991
3992 // get the team's process group ID
3993 pid_t groupID = team->group_id;
3994
3995 team->UnlockAndReleaseReference();
3996
3997 return groupID;
3998 }
3999
4000
4001 pid_t
getsid(pid_t id)4002 getsid(pid_t id)
4003 {
4004 if (id < 0) {
4005 errno = EINVAL;
4006 return -1;
4007 }
4008
4009 if (id == 0) {
4010 // get session of the calling process
4011 Team* team = thread_get_current_thread()->team;
4012 TeamLocker teamLocker(team);
4013 return team->session_id;
4014 }
4015
4016 // get the team
4017 Team* team = Team::GetAndLock(id);
4018 if (team == NULL) {
4019 errno = ESRCH;
4020 return -1;
4021 }
4022
4023 // get the team's session ID
4024 pid_t sessionID = team->session_id;
4025
4026 team->UnlockAndReleaseReference();
4027
4028 return sessionID;
4029 }
4030
4031
4032 // #pragma mark - User syscalls
4033
4034
4035 status_t
_user_exec(const char * userPath,const char * const * userFlatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,mode_t umask)4036 _user_exec(const char* userPath, const char* const* userFlatArgs,
4037 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask)
4038 {
4039 // NOTE: Since this function normally doesn't return, don't use automatic
4040 // variables that need destruction in the function scope.
4041 char path[B_PATH_NAME_LENGTH];
4042
4043 if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs)
4044 || user_strlcpy(path, userPath, sizeof(path)) < B_OK)
4045 return B_BAD_ADDRESS;
4046
4047 // copy and relocate the flat arguments
4048 char** flatArgs;
4049 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4050 argCount, envCount, flatArgs);
4051
4052 if (error == B_OK) {
4053 error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount,
4054 envCount, umask);
4055 // this one only returns in case of error
4056 }
4057
4058 free(flatArgs);
4059 return error;
4060 }
4061
4062
4063 thread_id
_user_fork(void)4064 _user_fork(void)
4065 {
4066 return fork_team();
4067 }
4068
4069
4070 pid_t
_user_wait_for_child(thread_id child,uint32 flags,siginfo_t * userInfo,team_usage_info * usageInfo)4071 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo,
4072 team_usage_info* usageInfo)
4073 {
4074 if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
4075 return B_BAD_ADDRESS;
4076 if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo))
4077 return B_BAD_ADDRESS;
4078
4079 siginfo_t info;
4080 team_usage_info usage_info;
4081 pid_t foundChild = wait_for_child(child, flags, info, usage_info);
4082 if (foundChild < 0)
4083 return syscall_restart_handle_post(foundChild);
4084
4085 // copy info back to userland
4086 if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK)
4087 return B_BAD_ADDRESS;
4088 // copy usage_info back to userland
4089 if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info,
4090 sizeof(usage_info)) != B_OK) {
4091 return B_BAD_ADDRESS;
4092 }
4093
4094 return foundChild;
4095 }
4096
4097
4098 pid_t
_user_process_info(pid_t process,int32 which)4099 _user_process_info(pid_t process, int32 which)
4100 {
4101 pid_t result;
4102 switch (which) {
4103 case SESSION_ID:
4104 result = getsid(process);
4105 break;
4106 case GROUP_ID:
4107 result = getpgid(process);
4108 break;
4109 case PARENT_ID:
4110 result = _getppid(process);
4111 break;
4112 default:
4113 return B_BAD_VALUE;
4114 }
4115
4116 return result >= 0 ? result : errno;
4117 }
4118
4119
4120 pid_t
_user_setpgid(pid_t processID,pid_t groupID)4121 _user_setpgid(pid_t processID, pid_t groupID)
4122 {
4123 // setpgid() can be called either by the parent of the target process or
4124 // by the process itself to do one of two things:
4125 // * Create a new process group with the target process' ID and the target
4126 // process as group leader.
4127 // * Set the target process' process group to an already existing one in the
4128 // same session.
4129
4130 if (groupID < 0)
4131 return B_BAD_VALUE;
4132
4133 Team* currentTeam = thread_get_current_thread()->team;
4134 if (processID == 0)
4135 processID = currentTeam->id;
4136
4137 // if the group ID is not specified, use the target process' ID
4138 if (groupID == 0)
4139 groupID = processID;
4140
4141 // We loop when running into the following race condition: We create a new
4142 // process group, because there isn't one with that ID yet, but later when
4143 // trying to publish it, we find that someone else created and published
4144 // a group with that ID in the meantime. In that case we just restart the
4145 // whole action.
4146 while (true) {
4147 // Look up the process group by ID. If it doesn't exist yet and we are
4148 // allowed to create a new one, do that.
4149 ProcessGroup* group = ProcessGroup::Get(groupID);
4150 bool newGroup = false;
4151 if (group == NULL) {
4152 if (groupID != processID)
4153 return B_NOT_ALLOWED;
4154
4155 group = new(std::nothrow) ProcessGroup(groupID);
4156 if (group == NULL)
4157 return B_NO_MEMORY;
4158
4159 newGroup = true;
4160 }
4161 BReference<ProcessGroup> groupReference(group, true);
4162
4163 // get the target team
4164 Team* team = Team::Get(processID);
4165 if (team == NULL)
4166 return ESRCH;
4167 BReference<Team> teamReference(team, true);
4168
4169 // lock the new process group and the team's current process group
4170 while (true) {
4171 // lock the team's current process group
4172 team->LockProcessGroup();
4173
4174 ProcessGroup* oldGroup = team->group;
4175 if (oldGroup == NULL) {
4176 // This can only happen if the team is exiting.
4177 ASSERT(team->state >= TEAM_STATE_SHUTDOWN);
4178 return ESRCH;
4179 }
4180
4181 if (oldGroup == group) {
4182 // it's the same as the target group, so just bail out
4183 oldGroup->Unlock();
4184 return group->id;
4185 }
4186
4187 oldGroup->AcquireReference();
4188
4189 // lock the target process group, if locking order allows it
4190 if (newGroup || group->id > oldGroup->id) {
4191 group->Lock();
4192 break;
4193 }
4194
4195 // try to lock
4196 if (group->TryLock())
4197 break;
4198
4199 // no dice -- unlock the team's current process group and relock in
4200 // the correct order
4201 oldGroup->Unlock();
4202
4203 group->Lock();
4204 oldGroup->Lock();
4205
4206 // check whether things are still the same
4207 TeamLocker teamLocker(team);
4208 if (team->group == oldGroup)
4209 break;
4210
4211 // something changed -- unlock everything and retry
4212 teamLocker.Unlock();
4213 oldGroup->Unlock();
4214 group->Unlock();
4215 oldGroup->ReleaseReference();
4216 }
4217
4218 // we now have references and locks of both new and old process group
4219 BReference<ProcessGroup> oldGroupReference(team->group, true);
4220 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4221 AutoLocker<ProcessGroup> groupLocker(group, true);
4222
4223 // also lock the target team and its parent
4224 team->LockTeamAndParent(false);
4225 TeamLocker parentLocker(team->parent, true);
4226 TeamLocker teamLocker(team, true);
4227
4228 // perform the checks
4229 if (team == currentTeam) {
4230 // we set our own group
4231
4232 // we must not change our process group ID if we're a session leader
4233 if (is_session_leader(currentTeam))
4234 return B_NOT_ALLOWED;
4235 } else {
4236 // Calling team != target team. The target team must be a child of
4237 // the calling team and in the same session. (If that's the case it
4238 // isn't a session leader either.)
4239 if (team->parent != currentTeam
4240 || team->session_id != currentTeam->session_id) {
4241 return B_NOT_ALLOWED;
4242 }
4243
4244 // The call is also supposed to fail on a child, when the child has
4245 // already executed exec*() [EACCES].
4246 if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0)
4247 return EACCES;
4248 }
4249
4250 // If we created a new process group, publish it now.
4251 if (newGroup) {
4252 InterruptsSpinLocker groupHashLocker(sGroupHashLock);
4253 if (sGroupHash.Lookup(groupID)) {
4254 // A group with the group ID appeared since we first checked.
4255 // Back to square one.
4256 continue;
4257 }
4258
4259 group->PublishLocked(team->group->Session());
4260 } else if (group->Session()->id != team->session_id) {
4261 // The existing target process group belongs to a different session.
4262 // That's not allowed.
4263 return B_NOT_ALLOWED;
4264 }
4265
4266 // Everything is ready -- set the group.
4267 remove_team_from_group(team);
4268 insert_team_into_group(group, team);
4269
4270 // Changing the process group might have changed the situation for a
4271 // parent waiting in wait_for_child(). Hence we notify it.
4272 team->parent->dead_children.condition_variable.NotifyAll();
4273
4274 return group->id;
4275 }
4276 }
4277
4278
4279 pid_t
_user_setsid(void)4280 _user_setsid(void)
4281 {
4282 Team* team = thread_get_current_thread()->team;
4283
4284 // create a new process group and session
4285 ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id);
4286 if (group == NULL)
4287 return B_NO_MEMORY;
4288 BReference<ProcessGroup> groupReference(group, true);
4289 AutoLocker<ProcessGroup> groupLocker(group);
4290
4291 ProcessSession* session = new(std::nothrow) ProcessSession(group->id);
4292 if (session == NULL)
4293 return B_NO_MEMORY;
4294 BReference<ProcessSession> sessionReference(session, true);
4295
4296 // lock the team's current process group, parent, and the team itself
4297 team->LockTeamParentAndProcessGroup();
4298 BReference<ProcessGroup> oldGroupReference(team->group);
4299 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true);
4300 TeamLocker parentLocker(team->parent, true);
4301 TeamLocker teamLocker(team, true);
4302
4303 // the team must not already be a process group leader
4304 if (is_process_group_leader(team))
4305 return B_NOT_ALLOWED;
4306
4307 // remove the team from the old and add it to the new process group
4308 remove_team_from_group(team);
4309 group->Publish(session);
4310 insert_team_into_group(group, team);
4311
4312 // Changing the process group might have changed the situation for a
4313 // parent waiting in wait_for_child(). Hence we notify it.
4314 team->parent->dead_children.condition_variable.NotifyAll();
4315
4316 return group->id;
4317 }
4318
4319
4320 status_t
_user_wait_for_team(team_id id,status_t * _userReturnCode)4321 _user_wait_for_team(team_id id, status_t* _userReturnCode)
4322 {
4323 status_t returnCode;
4324 status_t status;
4325
4326 if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode))
4327 return B_BAD_ADDRESS;
4328
4329 status = wait_for_team(id, &returnCode);
4330 if (status >= B_OK && _userReturnCode != NULL) {
4331 if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode))
4332 != B_OK)
4333 return B_BAD_ADDRESS;
4334 return B_OK;
4335 }
4336
4337 return syscall_restart_handle_post(status);
4338 }
4339
4340
4341 thread_id
_user_load_image(const char * const * userFlatArgs,size_t flatArgsSize,int32 argCount,int32 envCount,int32 priority,uint32 flags,port_id errorPort,uint32 errorToken)4342 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize,
4343 int32 argCount, int32 envCount, int32 priority, uint32 flags,
4344 port_id errorPort, uint32 errorToken)
4345 {
4346 TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount));
4347
4348 if (argCount < 1)
4349 return B_BAD_VALUE;
4350
4351 // copy and relocate the flat arguments
4352 char** flatArgs;
4353 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize,
4354 argCount, envCount, flatArgs);
4355 if (error != B_OK)
4356 return error;
4357
4358 thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize),
4359 argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort,
4360 errorToken);
4361
4362 free(flatArgs);
4363 // load_image_internal() unset our variable if it took over ownership
4364
4365 return thread;
4366 }
4367
4368
4369 void
_user_exit_team(status_t returnValue)4370 _user_exit_team(status_t returnValue)
4371 {
4372 Thread* thread = thread_get_current_thread();
4373 Team* team = thread->team;
4374
4375 // set this thread's exit status
4376 thread->exit.status = returnValue;
4377
4378 // set the team exit status
4379 TeamLocker teamLocker(team);
4380
4381 if (!team->exit.initialized) {
4382 team->exit.reason = CLD_EXITED;
4383 team->exit.signal = 0;
4384 team->exit.signaling_user = 0;
4385 team->exit.status = returnValue;
4386 team->exit.initialized = true;
4387 }
4388
4389 teamLocker.Unlock();
4390
4391 // Stop the thread, if the team is being debugged and that has been
4392 // requested.
4393 // Note: GCC 13 marks the following call as potentially overflowing, since it thinks team may
4394 // be `nullptr`. This cannot be the case in reality, therefore ignore this specific
4395 // error.
4396 #pragma GCC diagnostic push
4397 #pragma GCC diagnostic ignored "-Wstringop-overflow"
4398 if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0)
4399 user_debug_stop_thread();
4400 #pragma GCC diagnostic pop
4401
4402 // Send this thread a SIGKILL. This makes sure the thread will not return to
4403 // userland. The signal handling code forwards the signal to the main
4404 // thread (if that's not already this one), which will take the team down.
4405 Signal signal(SIGKILL, SI_USER, B_OK, team->id);
4406 send_signal_to_thread(thread, signal, 0);
4407 }
4408
4409
4410 status_t
_user_kill_team(team_id team)4411 _user_kill_team(team_id team)
4412 {
4413 return kill_team(team);
4414 }
4415
4416
4417 status_t
_user_get_team_info(team_id id,team_info * userInfo,size_t size)4418 _user_get_team_info(team_id id, team_info* userInfo, size_t size)
4419 {
4420 status_t status;
4421 team_info info;
4422
4423 if (size > sizeof(team_info))
4424 return B_BAD_VALUE;
4425
4426 if (!IS_USER_ADDRESS(userInfo))
4427 return B_BAD_ADDRESS;
4428
4429 status = _get_team_info(id, &info, size);
4430 if (status == B_OK) {
4431 if (user_memcpy(userInfo, &info, size) < B_OK)
4432 return B_BAD_ADDRESS;
4433 }
4434
4435 return status;
4436 }
4437
4438
4439 status_t
_user_get_next_team_info(int32 * userCookie,team_info * userInfo,size_t size)4440 _user_get_next_team_info(int32* userCookie, team_info* userInfo, size_t size)
4441 {
4442 status_t status;
4443 team_info info;
4444 int32 cookie;
4445
4446 if (size > sizeof(team_info))
4447 return B_BAD_VALUE;
4448
4449 if (!IS_USER_ADDRESS(userCookie)
4450 || !IS_USER_ADDRESS(userInfo)
4451 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
4452 return B_BAD_ADDRESS;
4453
4454 status = _get_next_team_info(&cookie, &info, size);
4455 if (status != B_OK)
4456 return status;
4457
4458 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
4459 || user_memcpy(userInfo, &info, size) < B_OK)
4460 return B_BAD_ADDRESS;
4461
4462 return status;
4463 }
4464
4465
4466 team_id
_user_get_current_team(void)4467 _user_get_current_team(void)
4468 {
4469 return team_get_current_team_id();
4470 }
4471
4472
4473 status_t
_user_get_team_usage_info(team_id team,int32 who,team_usage_info * userInfo,size_t size)4474 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo,
4475 size_t size)
4476 {
4477 if (size != sizeof(team_usage_info))
4478 return B_BAD_VALUE;
4479
4480 team_usage_info info;
4481 status_t status = common_get_team_usage_info(team, who, &info,
4482 B_CHECK_PERMISSION);
4483
4484 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo)
4485 || user_memcpy(userInfo, &info, size) != B_OK) {
4486 return B_BAD_ADDRESS;
4487 }
4488
4489 return status;
4490 }
4491
4492
4493 status_t
_user_get_extended_team_info(team_id teamID,uint32 flags,void * buffer,size_t size,size_t * _sizeNeeded)4494 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer,
4495 size_t size, size_t* _sizeNeeded)
4496 {
4497 // check parameters
4498 if ((buffer != NULL && !IS_USER_ADDRESS(buffer))
4499 || (buffer == NULL && size > 0)
4500 || _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) {
4501 return B_BAD_ADDRESS;
4502 }
4503
4504 KMessage info;
4505
4506 if ((flags & B_TEAM_INFO_BASIC) != 0) {
4507 // allocate memory for a copy of the needed team data
4508 struct ExtendedTeamData {
4509 team_id id;
4510 pid_t group_id;
4511 pid_t session_id;
4512 uid_t real_uid;
4513 gid_t real_gid;
4514 uid_t effective_uid;
4515 gid_t effective_gid;
4516 char name[B_OS_NAME_LENGTH];
4517 } teamClone;
4518
4519 io_context* ioContext;
4520 {
4521 // get the team structure
4522 Team* team = Team::GetAndLock(teamID);
4523 if (team == NULL)
4524 return B_BAD_TEAM_ID;
4525 BReference<Team> teamReference(team, true);
4526 TeamLocker teamLocker(team, true);
4527
4528 // copy the data
4529 teamClone.id = team->id;
4530 strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name));
4531 teamClone.group_id = team->group_id;
4532 teamClone.session_id = team->session_id;
4533 teamClone.real_uid = team->real_uid;
4534 teamClone.real_gid = team->real_gid;
4535 teamClone.effective_uid = team->effective_uid;
4536 teamClone.effective_gid = team->effective_gid;
4537
4538 // also fetch a reference to the I/O context
4539 ioContext = team->io_context;
4540 vfs_get_io_context(ioContext);
4541 }
4542 CObjectDeleter<io_context, void, vfs_put_io_context>
4543 ioContextPutter(ioContext);
4544
4545 // add the basic data to the info message
4546 if (info.AddInt32("id", teamClone.id) != B_OK
4547 || info.AddString("name", teamClone.name) != B_OK
4548 || info.AddInt32("process group", teamClone.group_id) != B_OK
4549 || info.AddInt32("session", teamClone.session_id) != B_OK
4550 || info.AddInt32("uid", teamClone.real_uid) != B_OK
4551 || info.AddInt32("gid", teamClone.real_gid) != B_OK
4552 || info.AddInt32("euid", teamClone.effective_uid) != B_OK
4553 || info.AddInt32("egid", teamClone.effective_gid) != B_OK) {
4554 return B_NO_MEMORY;
4555 }
4556
4557 // get the current working directory from the I/O context
4558 dev_t cwdDevice;
4559 ino_t cwdDirectory;
4560 {
4561 ReadLocker ioContextLocker(ioContext->lock);
4562 vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory);
4563 }
4564
4565 if (info.AddInt32("cwd device", cwdDevice) != B_OK
4566 || info.AddInt64("cwd directory", cwdDirectory) != B_OK) {
4567 return B_NO_MEMORY;
4568 }
4569 }
4570
4571 // TODO: Support the other flags!
4572
4573 // copy the needed size and, if it fits, the message back to userland
4574 size_t sizeNeeded = info.ContentSize();
4575 if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK)
4576 return B_BAD_ADDRESS;
4577
4578 if (sizeNeeded > size)
4579 return B_BUFFER_OVERFLOW;
4580
4581 if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK)
4582 return B_BAD_ADDRESS;
4583
4584 return B_OK;
4585 }
4586