1 /* 2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 /*! Team functions */ 13 14 15 #include <team.h> 16 17 #include <errno.h> 18 #include <stdio.h> 19 #include <stdlib.h> 20 #include <string.h> 21 #include <sys/wait.h> 22 23 #include <OS.h> 24 25 #include <AutoDeleter.h> 26 #include <FindDirectory.h> 27 28 #include <extended_system_info_defs.h> 29 30 #include <commpage.h> 31 #include <boot_device.h> 32 #include <elf.h> 33 #include <file_cache.h> 34 #include <find_directory_private.h> 35 #include <fs/KPath.h> 36 #include <heap.h> 37 #include <int.h> 38 #include <kernel.h> 39 #include <kimage.h> 40 #include <kscheduler.h> 41 #include <ksignal.h> 42 #include <Notifications.h> 43 #include <port.h> 44 #include <posix/realtime_sem.h> 45 #include <posix/xsi_semaphore.h> 46 #include <sem.h> 47 #include <syscall_process_info.h> 48 #include <syscall_load_image.h> 49 #include <syscall_restart.h> 50 #include <syscalls.h> 51 #include <tls.h> 52 #include <tracing.h> 53 #include <user_runtime.h> 54 #include <user_thread.h> 55 #include <usergroup.h> 56 #include <vfs.h> 57 #include <vm/vm.h> 58 #include <vm/VMAddressSpace.h> 59 #include <util/AutoLock.h> 60 61 #include "TeamThreadTables.h" 62 63 64 //#define TRACE_TEAM 65 #ifdef TRACE_TEAM 66 # define TRACE(x) dprintf x 67 #else 68 # define TRACE(x) ; 69 #endif 70 71 72 struct team_key { 73 team_id id; 74 }; 75 76 struct team_arg { 77 char *path; 78 char **flat_args; 79 size_t flat_args_size; 80 uint32 arg_count; 81 uint32 env_count; 82 mode_t umask; 83 uint32 flags; 84 port_id error_port; 85 uint32 error_token; 86 }; 87 88 #define TEAM_ARGS_FLAG_NO_ASLR 0x01 89 90 91 namespace { 92 93 94 class TeamNotificationService : public DefaultNotificationService { 95 public: 96 TeamNotificationService(); 97 98 void Notify(uint32 eventCode, Team* team); 99 }; 100 101 102 // #pragma mark - TeamTable 103 104 105 typedef BKernel::TeamThreadTable<Team> TeamTable; 106 107 108 // #pragma mark - ProcessGroupHashDefinition 109 110 111 struct ProcessGroupHashDefinition { 112 typedef pid_t KeyType; 113 typedef ProcessGroup ValueType; 114 115 size_t HashKey(pid_t key) const 116 { 117 return key; 118 } 119 120 size_t Hash(ProcessGroup* value) const 121 { 122 return HashKey(value->id); 123 } 124 125 bool Compare(pid_t key, ProcessGroup* value) const 126 { 127 return value->id == key; 128 } 129 130 ProcessGroup*& GetLink(ProcessGroup* value) const 131 { 132 return value->next; 133 } 134 }; 135 136 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable; 137 138 139 } // unnamed namespace 140 141 142 // #pragma mark - 143 144 145 // the team_id -> Team hash table and the lock protecting it 146 static TeamTable sTeamHash; 147 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER; 148 149 // the pid_t -> ProcessGroup hash table and the lock protecting it 150 static ProcessGroupHashTable sGroupHash; 151 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER; 152 153 static Team* sKernelTeam = NULL; 154 155 // A list of process groups of children of dying session leaders that need to 156 // be signalled, if they have become orphaned and contain stopped processes. 157 static ProcessGroupList sOrphanedCheckProcessGroups; 158 static mutex sOrphanedCheckLock 159 = MUTEX_INITIALIZER("orphaned process group check"); 160 161 // some arbitrarily chosen limits -- should probably depend on the available 162 // memory (the limit is not yet enforced) 163 static int32 sMaxTeams = 2048; 164 static int32 sUsedTeams = 1; 165 166 static TeamNotificationService sNotificationService; 167 168 static const size_t kTeamUserDataReservedSize = 128 * B_PAGE_SIZE; 169 static const size_t kTeamUserDataInitialSize = 4 * B_PAGE_SIZE; 170 171 172 // #pragma mark - TeamListIterator 173 174 175 TeamListIterator::TeamListIterator() 176 { 177 // queue the entry 178 InterruptsWriteSpinLocker locker(sTeamHashLock); 179 sTeamHash.InsertIteratorEntry(&fEntry); 180 } 181 182 183 TeamListIterator::~TeamListIterator() 184 { 185 // remove the entry 186 InterruptsWriteSpinLocker locker(sTeamHashLock); 187 sTeamHash.RemoveIteratorEntry(&fEntry); 188 } 189 190 191 Team* 192 TeamListIterator::Next() 193 { 194 // get the next team -- if there is one, get reference for it 195 InterruptsWriteSpinLocker locker(sTeamHashLock); 196 Team* team = sTeamHash.NextElement(&fEntry); 197 if (team != NULL) 198 team->AcquireReference(); 199 200 return team; 201 } 202 203 204 // #pragma mark - Tracing 205 206 207 #if TEAM_TRACING 208 namespace TeamTracing { 209 210 class TeamForked : public AbstractTraceEntry { 211 public: 212 TeamForked(thread_id forkedThread) 213 : 214 fForkedThread(forkedThread) 215 { 216 Initialized(); 217 } 218 219 virtual void AddDump(TraceOutput& out) 220 { 221 out.Print("team forked, new thread %" B_PRId32, fForkedThread); 222 } 223 224 private: 225 thread_id fForkedThread; 226 }; 227 228 229 class ExecTeam : public AbstractTraceEntry { 230 public: 231 ExecTeam(const char* path, int32 argCount, const char* const* args, 232 int32 envCount, const char* const* env) 233 : 234 fArgCount(argCount), 235 fArgs(NULL) 236 { 237 fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH, 238 false); 239 240 // determine the buffer size we need for the args 241 size_t argBufferSize = 0; 242 for (int32 i = 0; i < argCount; i++) 243 argBufferSize += strlen(args[i]) + 1; 244 245 // allocate a buffer 246 fArgs = (char*)alloc_tracing_buffer(argBufferSize); 247 if (fArgs) { 248 char* buffer = fArgs; 249 for (int32 i = 0; i < argCount; i++) { 250 size_t argSize = strlen(args[i]) + 1; 251 memcpy(buffer, args[i], argSize); 252 buffer += argSize; 253 } 254 } 255 256 // ignore env for the time being 257 (void)envCount; 258 (void)env; 259 260 Initialized(); 261 } 262 263 virtual void AddDump(TraceOutput& out) 264 { 265 out.Print("team exec, \"%p\", args:", fPath); 266 267 if (fArgs != NULL) { 268 char* args = fArgs; 269 for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) { 270 out.Print(" \"%s\"", args); 271 args += strlen(args) + 1; 272 } 273 } else 274 out.Print(" <too long>"); 275 } 276 277 private: 278 char* fPath; 279 int32 fArgCount; 280 char* fArgs; 281 }; 282 283 284 static const char* 285 job_control_state_name(job_control_state state) 286 { 287 switch (state) { 288 case JOB_CONTROL_STATE_NONE: 289 return "none"; 290 case JOB_CONTROL_STATE_STOPPED: 291 return "stopped"; 292 case JOB_CONTROL_STATE_CONTINUED: 293 return "continued"; 294 case JOB_CONTROL_STATE_DEAD: 295 return "dead"; 296 default: 297 return "invalid"; 298 } 299 } 300 301 302 class SetJobControlState : public AbstractTraceEntry { 303 public: 304 SetJobControlState(team_id team, job_control_state newState, Signal* signal) 305 : 306 fTeam(team), 307 fNewState(newState), 308 fSignal(signal != NULL ? signal->Number() : 0) 309 { 310 Initialized(); 311 } 312 313 virtual void AddDump(TraceOutput& out) 314 { 315 out.Print("team set job control state, team %" B_PRId32 ", " 316 "new state: %s, signal: %d", 317 fTeam, job_control_state_name(fNewState), fSignal); 318 } 319 320 private: 321 team_id fTeam; 322 job_control_state fNewState; 323 int fSignal; 324 }; 325 326 327 class WaitForChild : public AbstractTraceEntry { 328 public: 329 WaitForChild(pid_t child, uint32 flags) 330 : 331 fChild(child), 332 fFlags(flags) 333 { 334 Initialized(); 335 } 336 337 virtual void AddDump(TraceOutput& out) 338 { 339 out.Print("team wait for child, child: %" B_PRId32 ", " 340 "flags: %#" B_PRIx32, fChild, fFlags); 341 } 342 343 private: 344 pid_t fChild; 345 uint32 fFlags; 346 }; 347 348 349 class WaitForChildDone : public AbstractTraceEntry { 350 public: 351 WaitForChildDone(const job_control_entry& entry) 352 : 353 fState(entry.state), 354 fTeam(entry.thread), 355 fStatus(entry.status), 356 fReason(entry.reason), 357 fSignal(entry.signal) 358 { 359 Initialized(); 360 } 361 362 WaitForChildDone(status_t error) 363 : 364 fTeam(error) 365 { 366 Initialized(); 367 } 368 369 virtual void AddDump(TraceOutput& out) 370 { 371 if (fTeam >= 0) { 372 out.Print("team wait for child done, team: %" B_PRId32 ", " 373 "state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n", 374 fTeam, job_control_state_name(fState), fStatus, fReason, 375 fSignal); 376 } else { 377 out.Print("team wait for child failed, error: " 378 "%#" B_PRIx32 ", ", fTeam); 379 } 380 } 381 382 private: 383 job_control_state fState; 384 team_id fTeam; 385 status_t fStatus; 386 uint16 fReason; 387 uint16 fSignal; 388 }; 389 390 } // namespace TeamTracing 391 392 # define T(x) new(std::nothrow) TeamTracing::x; 393 #else 394 # define T(x) ; 395 #endif 396 397 398 // #pragma mark - TeamNotificationService 399 400 401 TeamNotificationService::TeamNotificationService() 402 : DefaultNotificationService("teams") 403 { 404 } 405 406 407 void 408 TeamNotificationService::Notify(uint32 eventCode, Team* team) 409 { 410 char eventBuffer[128]; 411 KMessage event; 412 event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR); 413 event.AddInt32("event", eventCode); 414 event.AddInt32("team", team->id); 415 event.AddPointer("teamStruct", team); 416 417 DefaultNotificationService::Notify(event, eventCode); 418 } 419 420 421 // #pragma mark - Team 422 423 424 Team::Team(team_id id, bool kernel) 425 { 426 // allocate an ID 427 this->id = id; 428 visible = true; 429 serial_number = -1; 430 431 // init mutex 432 if (kernel) { 433 mutex_init(&fLock, "Team:kernel"); 434 } else { 435 char lockName[16]; 436 snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id); 437 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 438 } 439 440 hash_next = siblings_next = children = parent = NULL; 441 fName[0] = '\0'; 442 fArgs[0] = '\0'; 443 num_threads = 0; 444 io_context = NULL; 445 address_space = NULL; 446 realtime_sem_context = NULL; 447 xsi_sem_context = NULL; 448 thread_list = NULL; 449 main_thread = NULL; 450 loading_info = NULL; 451 state = TEAM_STATE_BIRTH; 452 flags = 0; 453 death_entry = NULL; 454 user_data_area = -1; 455 user_data = 0; 456 used_user_data = 0; 457 user_data_size = 0; 458 free_user_threads = NULL; 459 460 commpage_address = NULL; 461 462 supplementary_groups = NULL; 463 supplementary_group_count = 0; 464 465 dead_threads_kernel_time = 0; 466 dead_threads_user_time = 0; 467 cpu_clock_offset = 0; 468 469 // dead threads 470 list_init(&dead_threads); 471 dead_threads_count = 0; 472 473 // dead children 474 dead_children.count = 0; 475 dead_children.kernel_time = 0; 476 dead_children.user_time = 0; 477 478 // job control entry 479 job_control_entry = new(nothrow) ::job_control_entry; 480 if (job_control_entry != NULL) { 481 job_control_entry->state = JOB_CONTROL_STATE_NONE; 482 job_control_entry->thread = id; 483 job_control_entry->team = this; 484 } 485 486 // exit status -- setting initialized to false suffices 487 exit.initialized = false; 488 489 list_init(&sem_list); 490 list_init_etc(&port_list, port_team_link_offset()); 491 list_init(&image_list); 492 list_init(&watcher_list); 493 494 clear_team_debug_info(&debug_info, true); 495 496 // init dead/stopped/continued children condition vars 497 dead_children.condition_variable.Init(&dead_children, "team children"); 498 499 B_INITIALIZE_SPINLOCK(&time_lock); 500 B_INITIALIZE_SPINLOCK(&signal_lock); 501 502 fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter( 503 kernel ? -1 : MAX_QUEUED_SIGNALS); 504 memset(fSignalActions, 0, sizeof(fSignalActions)); 505 506 fUserDefinedTimerCount = 0; 507 508 fCoreDumpCondition = NULL; 509 } 510 511 512 Team::~Team() 513 { 514 // get rid of all associated data 515 PrepareForDeletion(); 516 517 if (io_context != NULL) 518 vfs_put_io_context(io_context); 519 delete_owned_ports(this); 520 sem_delete_owned_sems(this); 521 522 DeleteUserTimers(false); 523 524 fPendingSignals.Clear(); 525 526 if (fQueuedSignalsCounter != NULL) 527 fQueuedSignalsCounter->ReleaseReference(); 528 529 while (thread_death_entry* threadDeathEntry 530 = (thread_death_entry*)list_remove_head_item(&dead_threads)) { 531 free(threadDeathEntry); 532 } 533 534 while (::job_control_entry* entry = dead_children.entries.RemoveHead()) 535 delete entry; 536 537 while (free_user_thread* entry = free_user_threads) { 538 free_user_threads = entry->next; 539 free(entry); 540 } 541 542 malloc_referenced_release(supplementary_groups); 543 544 delete job_control_entry; 545 // usually already NULL and transferred to the parent 546 547 mutex_destroy(&fLock); 548 } 549 550 551 /*static*/ Team* 552 Team::Create(team_id id, const char* name, bool kernel) 553 { 554 // create the team object 555 Team* team = new(std::nothrow) Team(id, kernel); 556 if (team == NULL) 557 return NULL; 558 ObjectDeleter<Team> teamDeleter(team); 559 560 if (name != NULL) 561 team->SetName(name); 562 563 // check initialization 564 if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL) 565 return NULL; 566 567 // finish initialization (arch specifics) 568 if (arch_team_init_team_struct(team, kernel) != B_OK) 569 return NULL; 570 571 if (!kernel) { 572 status_t error = user_timer_create_team_timers(team); 573 if (error != B_OK) 574 return NULL; 575 } 576 577 // everything went fine 578 return teamDeleter.Detach(); 579 } 580 581 582 /*! \brief Returns the team with the given ID. 583 Returns a reference to the team. 584 Team and thread spinlock must not be held. 585 */ 586 /*static*/ Team* 587 Team::Get(team_id id) 588 { 589 if (id == B_CURRENT_TEAM) { 590 Team* team = thread_get_current_thread()->team; 591 team->AcquireReference(); 592 return team; 593 } 594 595 InterruptsReadSpinLocker locker(sTeamHashLock); 596 Team* team = sTeamHash.Lookup(id); 597 if (team != NULL) 598 team->AcquireReference(); 599 return team; 600 } 601 602 603 /*! \brief Returns the team with the given ID in a locked state. 604 Returns a reference to the team. 605 Team and thread spinlock must not be held. 606 */ 607 /*static*/ Team* 608 Team::GetAndLock(team_id id) 609 { 610 // get the team 611 Team* team = Get(id); 612 if (team == NULL) 613 return NULL; 614 615 // lock it 616 team->Lock(); 617 618 // only return the team, when it isn't already dying 619 if (team->state >= TEAM_STATE_SHUTDOWN) { 620 team->Unlock(); 621 team->ReleaseReference(); 622 return NULL; 623 } 624 625 return team; 626 } 627 628 629 /*! Locks the team and its parent team (if any). 630 The caller must hold a reference to the team or otherwise make sure that 631 it won't be deleted. 632 If the team doesn't have a parent, only the team itself is locked. If the 633 team's parent is the kernel team and \a dontLockParentIfKernel is \c true, 634 only the team itself is locked. 635 636 \param dontLockParentIfKernel If \c true, the team's parent team is only 637 locked, if it is not the kernel team. 638 */ 639 void 640 Team::LockTeamAndParent(bool dontLockParentIfKernel) 641 { 642 // The locking order is parent -> child. Since the parent can change as long 643 // as we don't lock the team, we need to do a trial and error loop. 644 Lock(); 645 646 while (true) { 647 // If the team doesn't have a parent, we're done. Otherwise try to lock 648 // the parent.This will succeed in most cases, simplifying things. 649 Team* parent = this->parent; 650 if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam) 651 || parent->TryLock()) { 652 return; 653 } 654 655 // get a temporary reference to the parent, unlock this team, lock the 656 // parent, and re-lock this team 657 BReference<Team> parentReference(parent); 658 659 Unlock(); 660 parent->Lock(); 661 Lock(); 662 663 // If the parent hasn't changed in the meantime, we're done. 664 if (this->parent == parent) 665 return; 666 667 // The parent has changed -- unlock and retry. 668 parent->Unlock(); 669 } 670 } 671 672 673 /*! Unlocks the team and its parent team (if any). 674 */ 675 void 676 Team::UnlockTeamAndParent() 677 { 678 if (parent != NULL) 679 parent->Unlock(); 680 681 Unlock(); 682 } 683 684 685 /*! Locks the team, its parent team (if any), and the team's process group. 686 The caller must hold a reference to the team or otherwise make sure that 687 it won't be deleted. 688 If the team doesn't have a parent, only the team itself is locked. 689 */ 690 void 691 Team::LockTeamParentAndProcessGroup() 692 { 693 LockTeamAndProcessGroup(); 694 695 // We hold the group's and the team's lock, but not the parent team's lock. 696 // If we have a parent, try to lock it. 697 if (this->parent == NULL || this->parent->TryLock()) 698 return; 699 700 // No success -- unlock the team and let LockTeamAndParent() do the rest of 701 // the job. 702 Unlock(); 703 LockTeamAndParent(false); 704 } 705 706 707 /*! Unlocks the team, its parent team (if any), and the team's process group. 708 */ 709 void 710 Team::UnlockTeamParentAndProcessGroup() 711 { 712 group->Unlock(); 713 714 if (parent != NULL) 715 parent->Unlock(); 716 717 Unlock(); 718 } 719 720 721 void 722 Team::LockTeamAndProcessGroup() 723 { 724 // The locking order is process group -> child. Since the process group can 725 // change as long as we don't lock the team, we need to do a trial and error 726 // loop. 727 Lock(); 728 729 while (true) { 730 // Try to lock the group. This will succeed in most cases, simplifying 731 // things. 732 ProcessGroup* group = this->group; 733 if (group->TryLock()) 734 return; 735 736 // get a temporary reference to the group, unlock this team, lock the 737 // group, and re-lock this team 738 BReference<ProcessGroup> groupReference(group); 739 740 Unlock(); 741 group->Lock(); 742 Lock(); 743 744 // If the group hasn't changed in the meantime, we're done. 745 if (this->group == group) 746 return; 747 748 // The group has changed -- unlock and retry. 749 group->Unlock(); 750 } 751 } 752 753 754 void 755 Team::UnlockTeamAndProcessGroup() 756 { 757 group->Unlock(); 758 Unlock(); 759 } 760 761 762 void 763 Team::SetName(const char* name) 764 { 765 if (const char* lastSlash = strrchr(name, '/')) 766 name = lastSlash + 1; 767 768 strlcpy(fName, name, B_OS_NAME_LENGTH); 769 } 770 771 772 void 773 Team::SetArgs(const char* args) 774 { 775 strlcpy(fArgs, args, sizeof(fArgs)); 776 } 777 778 779 void 780 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount) 781 { 782 fArgs[0] = '\0'; 783 strlcpy(fArgs, path, sizeof(fArgs)); 784 for (int i = 0; i < otherArgCount; i++) { 785 strlcat(fArgs, " ", sizeof(fArgs)); 786 strlcat(fArgs, otherArgs[i], sizeof(fArgs)); 787 } 788 } 789 790 791 void 792 Team::ResetSignalsOnExec() 793 { 794 // We are supposed to keep pending signals. Signal actions shall be reset 795 // partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are 796 // (for SIGCHLD it's implementation-defined). Others shall be reset to 797 // SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other 798 // flags, but since there aren't any handlers, they make little sense, so 799 // we clear them. 800 801 for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) { 802 struct sigaction& action = SignalActionFor(i); 803 if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL) 804 action.sa_handler = SIG_DFL; 805 806 action.sa_mask = 0; 807 action.sa_flags = 0; 808 action.sa_userdata = NULL; 809 } 810 } 811 812 813 void 814 Team::InheritSignalActions(Team* parent) 815 { 816 memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions)); 817 } 818 819 820 /*! Adds the given user timer to the team and, if user-defined, assigns it an 821 ID. 822 823 The caller must hold the team's lock. 824 825 \param timer The timer to be added. If it doesn't have an ID yet, it is 826 considered user-defined and will be assigned an ID. 827 \return \c B_OK, if the timer was added successfully, another error code 828 otherwise. 829 */ 830 status_t 831 Team::AddUserTimer(UserTimer* timer) 832 { 833 // don't allow addition of timers when already shutting the team down 834 if (state >= TEAM_STATE_SHUTDOWN) 835 return B_BAD_TEAM_ID; 836 837 // If the timer is user-defined, check timer limit and increment 838 // user-defined count. 839 if (timer->ID() < 0 && !CheckAddUserDefinedTimer()) 840 return EAGAIN; 841 842 fUserTimers.AddTimer(timer); 843 844 return B_OK; 845 } 846 847 848 /*! Removes the given user timer from the team. 849 850 The caller must hold the team's lock. 851 852 \param timer The timer to be removed. 853 854 */ 855 void 856 Team::RemoveUserTimer(UserTimer* timer) 857 { 858 fUserTimers.RemoveTimer(timer); 859 860 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID) 861 UserDefinedTimersRemoved(1); 862 } 863 864 865 /*! Deletes all (or all user-defined) user timers of the team. 866 867 Timer's belonging to the team's threads are not affected. 868 The caller must hold the team's lock. 869 870 \param userDefinedOnly If \c true, only the user-defined timers are deleted, 871 otherwise all timers are deleted. 872 */ 873 void 874 Team::DeleteUserTimers(bool userDefinedOnly) 875 { 876 int32 count = fUserTimers.DeleteTimers(userDefinedOnly); 877 UserDefinedTimersRemoved(count); 878 } 879 880 881 /*! If not at the limit yet, increments the team's user-defined timer count. 882 \return \c true, if the limit wasn't reached yet, \c false otherwise. 883 */ 884 bool 885 Team::CheckAddUserDefinedTimer() 886 { 887 int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1); 888 if (oldCount >= MAX_USER_TIMERS_PER_TEAM) { 889 atomic_add(&fUserDefinedTimerCount, -1); 890 return false; 891 } 892 893 return true; 894 } 895 896 897 /*! Subtracts the given count for the team's user-defined timer count. 898 \param count The count to subtract. 899 */ 900 void 901 Team::UserDefinedTimersRemoved(int32 count) 902 { 903 atomic_add(&fUserDefinedTimerCount, -count); 904 } 905 906 907 void 908 Team::DeactivateCPUTimeUserTimers() 909 { 910 while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head()) 911 timer->Deactivate(); 912 913 while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head()) 914 timer->Deactivate(); 915 } 916 917 918 /*! Returns the team's current total CPU time (kernel + user + offset). 919 920 The caller must hold \c time_lock. 921 922 \param ignoreCurrentRun If \c true and the current thread is one team's 923 threads, don't add the time since the last time \c last_time was 924 updated. Should be used in "thread unscheduled" scheduler callbacks, 925 since although the thread is still running at that time, its time has 926 already been stopped. 927 \return The team's current total CPU time. 928 */ 929 bigtime_t 930 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const 931 { 932 bigtime_t time = cpu_clock_offset + dead_threads_kernel_time 933 + dead_threads_user_time; 934 935 Thread* currentThread = thread_get_current_thread(); 936 bigtime_t now = system_time(); 937 938 for (Thread* thread = thread_list; thread != NULL; 939 thread = thread->team_next) { 940 bool alreadyLocked = thread == lockedThread; 941 SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked); 942 time += thread->kernel_time + thread->user_time; 943 944 if (thread->last_time != 0) { 945 if (!ignoreCurrentRun || thread != currentThread) 946 time += now - thread->last_time; 947 } 948 949 if (alreadyLocked) 950 threadTimeLocker.Detach(); 951 } 952 953 return time; 954 } 955 956 957 /*! Returns the team's current user CPU time. 958 959 The caller must hold \c time_lock. 960 961 \return The team's current user CPU time. 962 */ 963 bigtime_t 964 Team::UserCPUTime() const 965 { 966 bigtime_t time = dead_threads_user_time; 967 968 bigtime_t now = system_time(); 969 970 for (Thread* thread = thread_list; thread != NULL; 971 thread = thread->team_next) { 972 SpinLocker threadTimeLocker(thread->time_lock); 973 time += thread->user_time; 974 975 if (thread->last_time != 0 && !thread->in_kernel) 976 time += now - thread->last_time; 977 } 978 979 return time; 980 } 981 982 983 // #pragma mark - ProcessGroup 984 985 986 ProcessGroup::ProcessGroup(pid_t id) 987 : 988 id(id), 989 teams(NULL), 990 fSession(NULL), 991 fInOrphanedCheckList(false) 992 { 993 char lockName[32]; 994 snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id); 995 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 996 } 997 998 999 ProcessGroup::~ProcessGroup() 1000 { 1001 TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id)); 1002 1003 // If the group is in the orphaned check list, remove it. 1004 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 1005 1006 if (fInOrphanedCheckList) 1007 sOrphanedCheckProcessGroups.Remove(this); 1008 1009 orphanedCheckLocker.Unlock(); 1010 1011 // remove group from the hash table and from the session 1012 if (fSession != NULL) { 1013 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1014 sGroupHash.RemoveUnchecked(this); 1015 groupHashLocker.Unlock(); 1016 1017 fSession->ReleaseReference(); 1018 } 1019 1020 mutex_destroy(&fLock); 1021 } 1022 1023 1024 /*static*/ ProcessGroup* 1025 ProcessGroup::Get(pid_t id) 1026 { 1027 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1028 ProcessGroup* group = sGroupHash.Lookup(id); 1029 if (group != NULL) 1030 group->AcquireReference(); 1031 return group; 1032 } 1033 1034 1035 /*! Adds the group the given session and makes it publicly accessible. 1036 The caller must not hold the process group hash lock. 1037 */ 1038 void 1039 ProcessGroup::Publish(ProcessSession* session) 1040 { 1041 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1042 PublishLocked(session); 1043 } 1044 1045 1046 /*! Adds the group to the given session and makes it publicly accessible. 1047 The caller must hold the process group hash lock. 1048 */ 1049 void 1050 ProcessGroup::PublishLocked(ProcessSession* session) 1051 { 1052 ASSERT(sGroupHash.Lookup(this->id) == NULL); 1053 1054 fSession = session; 1055 fSession->AcquireReference(); 1056 1057 sGroupHash.InsertUnchecked(this); 1058 } 1059 1060 1061 /*! Checks whether the process group is orphaned. 1062 The caller must hold the group's lock. 1063 \return \c true, if the group is orphaned, \c false otherwise. 1064 */ 1065 bool 1066 ProcessGroup::IsOrphaned() const 1067 { 1068 // Orphaned Process Group: "A process group in which the parent of every 1069 // member is either itself a member of the group or is not a member of the 1070 // group's session." (Open Group Base Specs Issue 7) 1071 bool orphaned = true; 1072 1073 Team* team = teams; 1074 while (orphaned && team != NULL) { 1075 team->LockTeamAndParent(false); 1076 1077 Team* parent = team->parent; 1078 if (parent != NULL && parent->group_id != id 1079 && parent->session_id == fSession->id) { 1080 orphaned = false; 1081 } 1082 1083 team->UnlockTeamAndParent(); 1084 1085 team = team->group_next; 1086 } 1087 1088 return orphaned; 1089 } 1090 1091 1092 void 1093 ProcessGroup::ScheduleOrphanedCheck() 1094 { 1095 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 1096 1097 if (!fInOrphanedCheckList) { 1098 sOrphanedCheckProcessGroups.Add(this); 1099 fInOrphanedCheckList = true; 1100 } 1101 } 1102 1103 1104 void 1105 ProcessGroup::UnsetOrphanedCheck() 1106 { 1107 fInOrphanedCheckList = false; 1108 } 1109 1110 1111 // #pragma mark - ProcessSession 1112 1113 1114 ProcessSession::ProcessSession(pid_t id) 1115 : 1116 id(id), 1117 controlling_tty(-1), 1118 foreground_group(-1) 1119 { 1120 char lockName[32]; 1121 snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id); 1122 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 1123 } 1124 1125 1126 ProcessSession::~ProcessSession() 1127 { 1128 mutex_destroy(&fLock); 1129 } 1130 1131 1132 // #pragma mark - KDL functions 1133 1134 1135 static void 1136 _dump_team_info(Team* team) 1137 { 1138 kprintf("TEAM: %p\n", team); 1139 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id, 1140 team->id); 1141 kprintf("serial_number: %" B_PRId64 "\n", team->serial_number); 1142 kprintf("name: '%s'\n", team->Name()); 1143 kprintf("args: '%s'\n", team->Args()); 1144 kprintf("hash_next: %p\n", team->hash_next); 1145 kprintf("parent: %p", team->parent); 1146 if (team->parent != NULL) { 1147 kprintf(" (id = %" B_PRId32 ")\n", team->parent->id); 1148 } else 1149 kprintf("\n"); 1150 1151 kprintf("children: %p\n", team->children); 1152 kprintf("num_threads: %d\n", team->num_threads); 1153 kprintf("state: %d\n", team->state); 1154 kprintf("flags: 0x%" B_PRIx32 "\n", team->flags); 1155 kprintf("io_context: %p\n", team->io_context); 1156 if (team->address_space) 1157 kprintf("address_space: %p\n", team->address_space); 1158 kprintf("user data: %p (area %" B_PRId32 ")\n", 1159 (void*)team->user_data, team->user_data_area); 1160 kprintf("free user thread: %p\n", team->free_user_threads); 1161 kprintf("main_thread: %p\n", team->main_thread); 1162 kprintf("thread_list: %p\n", team->thread_list); 1163 kprintf("group_id: %" B_PRId32 "\n", team->group_id); 1164 kprintf("session_id: %" B_PRId32 "\n", team->session_id); 1165 } 1166 1167 1168 static int 1169 dump_team_info(int argc, char** argv) 1170 { 1171 ulong arg; 1172 bool found = false; 1173 1174 if (argc < 2) { 1175 Thread* thread = thread_get_current_thread(); 1176 if (thread != NULL && thread->team != NULL) 1177 _dump_team_info(thread->team); 1178 else 1179 kprintf("No current team!\n"); 1180 return 0; 1181 } 1182 1183 arg = strtoul(argv[1], NULL, 0); 1184 if (IS_KERNEL_ADDRESS(arg)) { 1185 // semi-hack 1186 _dump_team_info((Team*)arg); 1187 return 0; 1188 } 1189 1190 // walk through the thread list, trying to match name or id 1191 for (TeamTable::Iterator it = sTeamHash.GetIterator(); 1192 Team* team = it.Next();) { 1193 if ((team->Name() && strcmp(argv[1], team->Name()) == 0) 1194 || team->id == (team_id)arg) { 1195 _dump_team_info(team); 1196 found = true; 1197 break; 1198 } 1199 } 1200 1201 if (!found) 1202 kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg); 1203 return 0; 1204 } 1205 1206 1207 static int 1208 dump_teams(int argc, char** argv) 1209 { 1210 kprintf("%-*s id %-*s name\n", B_PRINTF_POINTER_WIDTH, "team", 1211 B_PRINTF_POINTER_WIDTH, "parent"); 1212 1213 for (TeamTable::Iterator it = sTeamHash.GetIterator(); 1214 Team* team = it.Next();) { 1215 kprintf("%p%7" B_PRId32 " %p %s\n", team, team->id, team->parent, team->Name()); 1216 } 1217 1218 return 0; 1219 } 1220 1221 1222 // #pragma mark - Private functions 1223 1224 1225 /*! Inserts team \a team into the child list of team \a parent. 1226 1227 The caller must hold the lock of both \a parent and \a team. 1228 1229 \param parent The parent team. 1230 \param team The team to be inserted into \a parent's child list. 1231 */ 1232 static void 1233 insert_team_into_parent(Team* parent, Team* team) 1234 { 1235 ASSERT(parent != NULL); 1236 1237 team->siblings_next = parent->children; 1238 parent->children = team; 1239 team->parent = parent; 1240 } 1241 1242 1243 /*! Removes team \a team from the child list of team \a parent. 1244 1245 The caller must hold the lock of both \a parent and \a team. 1246 1247 \param parent The parent team. 1248 \param team The team to be removed from \a parent's child list. 1249 */ 1250 static void 1251 remove_team_from_parent(Team* parent, Team* team) 1252 { 1253 Team* child; 1254 Team* last = NULL; 1255 1256 for (child = parent->children; child != NULL; 1257 child = child->siblings_next) { 1258 if (child == team) { 1259 if (last == NULL) 1260 parent->children = child->siblings_next; 1261 else 1262 last->siblings_next = child->siblings_next; 1263 1264 team->parent = NULL; 1265 break; 1266 } 1267 last = child; 1268 } 1269 } 1270 1271 1272 /*! Returns whether the given team is a session leader. 1273 The caller must hold the team's lock or its process group's lock. 1274 */ 1275 static bool 1276 is_session_leader(Team* team) 1277 { 1278 return team->session_id == team->id; 1279 } 1280 1281 1282 /*! Returns whether the given team is a process group leader. 1283 The caller must hold the team's lock or its process group's lock. 1284 */ 1285 static bool 1286 is_process_group_leader(Team* team) 1287 { 1288 return team->group_id == team->id; 1289 } 1290 1291 1292 /*! Inserts the given team into the given process group. 1293 The caller must hold the process group's lock, the team's lock, and the 1294 team's parent's lock. 1295 */ 1296 static void 1297 insert_team_into_group(ProcessGroup* group, Team* team) 1298 { 1299 team->group = group; 1300 team->group_id = group->id; 1301 team->session_id = group->Session()->id; 1302 1303 team->group_next = group->teams; 1304 group->teams = team; 1305 group->AcquireReference(); 1306 } 1307 1308 1309 /*! Removes the given team from its process group. 1310 1311 The caller must hold the process group's lock, the team's lock, and the 1312 team's parent's lock. Interrupts must be enabled. 1313 1314 \param team The team that'll be removed from its process group. 1315 */ 1316 static void 1317 remove_team_from_group(Team* team) 1318 { 1319 ProcessGroup* group = team->group; 1320 Team* current; 1321 Team* last = NULL; 1322 1323 // the team must be in a process group to let this function have any effect 1324 if (group == NULL) 1325 return; 1326 1327 for (current = group->teams; current != NULL; 1328 current = current->group_next) { 1329 if (current == team) { 1330 if (last == NULL) 1331 group->teams = current->group_next; 1332 else 1333 last->group_next = current->group_next; 1334 1335 team->group = NULL; 1336 break; 1337 } 1338 last = current; 1339 } 1340 1341 team->group = NULL; 1342 team->group_next = NULL; 1343 1344 group->ReleaseReference(); 1345 } 1346 1347 1348 static status_t 1349 create_team_user_data(Team* team, void* exactAddress = NULL) 1350 { 1351 void* address; 1352 uint32 addressSpec; 1353 1354 if (exactAddress != NULL) { 1355 address = exactAddress; 1356 addressSpec = B_EXACT_ADDRESS; 1357 } else { 1358 address = (void*)KERNEL_USER_DATA_BASE; 1359 addressSpec = B_RANDOMIZED_BASE_ADDRESS; 1360 } 1361 1362 status_t result = vm_reserve_address_range(team->id, &address, addressSpec, 1363 kTeamUserDataReservedSize, RESERVED_AVOID_BASE); 1364 1365 virtual_address_restrictions virtualRestrictions = {}; 1366 if (result == B_OK || exactAddress != NULL) { 1367 if (exactAddress != NULL) 1368 virtualRestrictions.address = exactAddress; 1369 else 1370 virtualRestrictions.address = address; 1371 virtualRestrictions.address_specification = B_EXACT_ADDRESS; 1372 } else { 1373 virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE; 1374 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; 1375 } 1376 1377 physical_address_restrictions physicalRestrictions = {}; 1378 team->user_data_area = create_area_etc(team->id, "user area", 1379 kTeamUserDataInitialSize, B_FULL_LOCK, B_READ_AREA | B_WRITE_AREA, 0, 0, 1380 &virtualRestrictions, &physicalRestrictions, &address); 1381 if (team->user_data_area < 0) 1382 return team->user_data_area; 1383 1384 team->user_data = (addr_t)address; 1385 team->used_user_data = 0; 1386 team->user_data_size = kTeamUserDataInitialSize; 1387 team->free_user_threads = NULL; 1388 1389 return B_OK; 1390 } 1391 1392 1393 static void 1394 delete_team_user_data(Team* team) 1395 { 1396 if (team->user_data_area >= 0) { 1397 vm_delete_area(team->id, team->user_data_area, true); 1398 vm_unreserve_address_range(team->id, (void*)team->user_data, 1399 kTeamUserDataReservedSize); 1400 1401 team->user_data = 0; 1402 team->used_user_data = 0; 1403 team->user_data_size = 0; 1404 team->user_data_area = -1; 1405 while (free_user_thread* entry = team->free_user_threads) { 1406 team->free_user_threads = entry->next; 1407 free(entry); 1408 } 1409 } 1410 } 1411 1412 1413 static status_t 1414 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize, 1415 int32 argCount, int32 envCount, char**& _flatArgs) 1416 { 1417 if (argCount < 0 || envCount < 0) 1418 return B_BAD_VALUE; 1419 1420 if (flatArgsSize > MAX_PROCESS_ARGS_SIZE) 1421 return B_TOO_MANY_ARGS; 1422 if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize) 1423 return B_BAD_VALUE; 1424 1425 if (!IS_USER_ADDRESS(userFlatArgs)) 1426 return B_BAD_ADDRESS; 1427 1428 // allocate kernel memory 1429 char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize)); 1430 if (flatArgs == NULL) 1431 return B_NO_MEMORY; 1432 1433 if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) { 1434 free(flatArgs); 1435 return B_BAD_ADDRESS; 1436 } 1437 1438 // check and relocate the array 1439 status_t error = B_OK; 1440 const char* stringBase = (char*)flatArgs + argCount + envCount + 2; 1441 const char* stringEnd = (char*)flatArgs + flatArgsSize; 1442 for (int32 i = 0; i < argCount + envCount + 2; i++) { 1443 if (i == argCount || i == argCount + envCount + 1) { 1444 // check array null termination 1445 if (flatArgs[i] != NULL) { 1446 error = B_BAD_VALUE; 1447 break; 1448 } 1449 } else { 1450 // check string 1451 char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs); 1452 size_t maxLen = stringEnd - arg; 1453 if (arg < stringBase || arg >= stringEnd 1454 || strnlen(arg, maxLen) == maxLen) { 1455 error = B_BAD_VALUE; 1456 break; 1457 } 1458 1459 flatArgs[i] = arg; 1460 } 1461 } 1462 1463 if (error == B_OK) 1464 _flatArgs = flatArgs; 1465 else 1466 free(flatArgs); 1467 1468 return error; 1469 } 1470 1471 1472 static void 1473 free_team_arg(struct team_arg* teamArg) 1474 { 1475 if (teamArg != NULL) { 1476 free(teamArg->flat_args); 1477 free(teamArg->path); 1478 free(teamArg); 1479 } 1480 } 1481 1482 1483 static status_t 1484 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs, 1485 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask, 1486 port_id port, uint32 token) 1487 { 1488 struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg)); 1489 if (teamArg == NULL) 1490 return B_NO_MEMORY; 1491 1492 teamArg->path = strdup(path); 1493 if (teamArg->path == NULL) { 1494 free(teamArg); 1495 return B_NO_MEMORY; 1496 } 1497 1498 // copy the args over 1499 teamArg->flat_args = flatArgs; 1500 teamArg->flat_args_size = flatArgsSize; 1501 teamArg->arg_count = argCount; 1502 teamArg->env_count = envCount; 1503 teamArg->flags = 0; 1504 teamArg->umask = umask; 1505 teamArg->error_port = port; 1506 teamArg->error_token = token; 1507 1508 // determine the flags from the environment 1509 const char* const* env = flatArgs + argCount + 1; 1510 for (int32 i = 0; i < envCount; i++) { 1511 if (strcmp(env[i], "DISABLE_ASLR=1") == 0) { 1512 teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR; 1513 break; 1514 } 1515 } 1516 1517 *_teamArg = teamArg; 1518 return B_OK; 1519 } 1520 1521 1522 static status_t 1523 team_create_thread_start_internal(void* args) 1524 { 1525 status_t err; 1526 Thread* thread; 1527 Team* team; 1528 struct team_arg* teamArgs = (struct team_arg*)args; 1529 const char* path; 1530 addr_t entry; 1531 char** userArgs; 1532 char** userEnv; 1533 struct user_space_program_args* programArgs; 1534 uint32 argCount, envCount; 1535 1536 thread = thread_get_current_thread(); 1537 team = thread->team; 1538 cache_node_launched(teamArgs->arg_count, teamArgs->flat_args); 1539 1540 TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n", 1541 thread->id)); 1542 1543 // Main stack area layout is currently as follows (starting from 0): 1544 // 1545 // size | usage 1546 // ---------------------------------+-------------------------------- 1547 // USER_MAIN_THREAD_STACK_SIZE | actual stack 1548 // TLS_SIZE | TLS data 1549 // sizeof(user_space_program_args) | argument structure for the runtime 1550 // | loader 1551 // flat arguments size | flat process arguments and environment 1552 1553 // TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to 1554 // the heap 1555 // TODO: we could reserve the whole USER_STACK_REGION upfront... 1556 1557 argCount = teamArgs->arg_count; 1558 envCount = teamArgs->env_count; 1559 1560 programArgs = (struct user_space_program_args*)(thread->user_stack_base 1561 + thread->user_stack_size + TLS_SIZE); 1562 1563 userArgs = (char**)(programArgs + 1); 1564 userEnv = userArgs + argCount + 1; 1565 path = teamArgs->path; 1566 1567 if (user_strlcpy(programArgs->program_path, path, 1568 sizeof(programArgs->program_path)) < B_OK 1569 || user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK 1570 || user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK 1571 || user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK 1572 || user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK 1573 || user_memcpy(&programArgs->error_port, &teamArgs->error_port, 1574 sizeof(port_id)) < B_OK 1575 || user_memcpy(&programArgs->error_token, &teamArgs->error_token, 1576 sizeof(uint32)) < B_OK 1577 || user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK 1578 || user_memcpy(userArgs, teamArgs->flat_args, 1579 teamArgs->flat_args_size) < B_OK) { 1580 // the team deletion process will clean this mess 1581 free_team_arg(teamArgs); 1582 return B_BAD_ADDRESS; 1583 } 1584 1585 TRACE(("team_create_thread_start: loading elf binary '%s'\n", path)); 1586 1587 // set team args and update state 1588 team->Lock(); 1589 team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1); 1590 team->state = TEAM_STATE_NORMAL; 1591 team->Unlock(); 1592 1593 free_team_arg(teamArgs); 1594 // the arguments are already on the user stack, we no longer need 1595 // them in this form 1596 1597 // Clone commpage area 1598 area_id commPageArea = clone_commpage_area(team->id, 1599 &team->commpage_address); 1600 if (commPageArea < B_OK) { 1601 TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n", 1602 strerror(commPageArea))); 1603 return commPageArea; 1604 } 1605 1606 // Register commpage image 1607 image_id commPageImage = get_commpage_image(); 1608 extended_image_info imageInfo; 1609 err = get_image_info(commPageImage, &imageInfo.basic_info); 1610 if (err != B_OK) { 1611 TRACE(("team_create_thread_start: get_image_info() failed: %s\n", 1612 strerror(err))); 1613 return err; 1614 } 1615 imageInfo.basic_info.text = team->commpage_address; 1616 imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address; 1617 imageInfo.symbol_table = NULL; 1618 imageInfo.symbol_hash = NULL; 1619 imageInfo.string_table = NULL; 1620 image_id image = register_image(team, &imageInfo, sizeof(imageInfo)); 1621 if (image < 0) { 1622 TRACE(("team_create_thread_start: register_image() failed: %s\n", 1623 strerror(image))); 1624 return image; 1625 } 1626 1627 // NOTE: Normally arch_thread_enter_userspace() never returns, that is 1628 // automatic variables with function scope will never be destroyed. 1629 { 1630 // find runtime_loader path 1631 KPath runtimeLoaderPath; 1632 err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false, 1633 runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize()); 1634 if (err < B_OK) { 1635 TRACE(("team_create_thread_start: find_directory() failed: %s\n", 1636 strerror(err))); 1637 return err; 1638 } 1639 runtimeLoaderPath.UnlockBuffer(); 1640 err = runtimeLoaderPath.Append("runtime_loader"); 1641 1642 if (err == B_OK) { 1643 err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, 1644 &entry); 1645 } 1646 } 1647 1648 if (err < B_OK) { 1649 // Luckily, we don't have to clean up the mess we created - that's 1650 // done for us by the normal team deletion process 1651 TRACE(("team_create_thread_start: elf_load_user_image() failed: " 1652 "%s\n", strerror(err))); 1653 return err; 1654 } 1655 1656 TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry)); 1657 1658 // enter userspace -- returns only in case of error 1659 return thread_enter_userspace_new_team(thread, (addr_t)entry, 1660 programArgs, team->commpage_address); 1661 } 1662 1663 1664 static status_t 1665 team_create_thread_start(void* args) 1666 { 1667 team_create_thread_start_internal(args); 1668 team_init_exit_info_on_error(thread_get_current_thread()->team); 1669 thread_exit(); 1670 // does not return 1671 return B_OK; 1672 } 1673 1674 1675 static thread_id 1676 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount, 1677 int32 envCount, int32 priority, team_id parentID, uint32 flags, 1678 port_id errorPort, uint32 errorToken) 1679 { 1680 char** flatArgs = _flatArgs; 1681 thread_id thread; 1682 status_t status; 1683 struct team_arg* teamArgs; 1684 struct team_loading_info loadingInfo; 1685 ConditionVariableEntry loadingWaitEntry; 1686 io_context* parentIOContext = NULL; 1687 team_id teamID; 1688 bool teamLimitReached = false; 1689 1690 if (flatArgs == NULL || argCount == 0) 1691 return B_BAD_VALUE; 1692 1693 const char* path = flatArgs[0]; 1694 1695 TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32 1696 "\n", path, flatArgs, argCount)); 1697 1698 // cut the path from the main thread name 1699 const char* threadName = strrchr(path, '/'); 1700 if (threadName != NULL) 1701 threadName++; 1702 else 1703 threadName = path; 1704 1705 // create the main thread object 1706 Thread* mainThread; 1707 status = Thread::Create(threadName, mainThread); 1708 if (status != B_OK) 1709 return status; 1710 BReference<Thread> mainThreadReference(mainThread, true); 1711 1712 // create team object 1713 Team* team = Team::Create(mainThread->id, path, false); 1714 if (team == NULL) 1715 return B_NO_MEMORY; 1716 BReference<Team> teamReference(team, true); 1717 1718 if ((flags & B_WAIT_TILL_LOADED) != 0) { 1719 loadingInfo.condition.Init(team, "image load"); 1720 loadingInfo.condition.Add(&loadingWaitEntry); 1721 loadingInfo.result = B_ERROR; 1722 team->loading_info = &loadingInfo; 1723 } 1724 1725 // get the parent team 1726 Team* parent = Team::Get(parentID); 1727 if (parent == NULL) 1728 return B_BAD_TEAM_ID; 1729 BReference<Team> parentReference(parent, true); 1730 1731 parent->LockTeamAndProcessGroup(); 1732 team->Lock(); 1733 1734 // inherit the parent's user/group 1735 inherit_parent_user_and_group(team, parent); 1736 1737 // get a reference to the parent's I/O context -- we need it to create ours 1738 parentIOContext = parent->io_context; 1739 vfs_get_io_context(parentIOContext); 1740 1741 team->Unlock(); 1742 parent->UnlockTeamAndProcessGroup(); 1743 1744 // check the executable's set-user/group-id permission 1745 update_set_id_user_and_group(team, path); 1746 1747 status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount, 1748 envCount, (mode_t)-1, errorPort, errorToken); 1749 if (status != B_OK) 1750 goto err1; 1751 1752 _flatArgs = NULL; 1753 // args are owned by the team_arg structure now 1754 1755 // create a new io_context for this team 1756 team->io_context = vfs_new_io_context(parentIOContext, true); 1757 if (!team->io_context) { 1758 status = B_NO_MEMORY; 1759 goto err2; 1760 } 1761 1762 // We don't need the parent's I/O context any longer. 1763 vfs_put_io_context(parentIOContext); 1764 parentIOContext = NULL; 1765 1766 // remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour) 1767 vfs_exec_io_context(team->io_context); 1768 1769 // create an address space for this team 1770 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false, 1771 &team->address_space); 1772 if (status != B_OK) 1773 goto err2; 1774 1775 team->address_space->SetRandomizingEnabled( 1776 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0); 1777 1778 // create the user data area 1779 status = create_team_user_data(team); 1780 if (status != B_OK) 1781 goto err4; 1782 1783 // insert the team into its parent and the teams hash 1784 parent->LockTeamAndProcessGroup(); 1785 team->Lock(); 1786 1787 { 1788 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 1789 1790 sTeamHash.Insert(team); 1791 teamLimitReached = sUsedTeams >= sMaxTeams; 1792 if (!teamLimitReached) 1793 sUsedTeams++; 1794 } 1795 1796 insert_team_into_parent(parent, team); 1797 insert_team_into_group(parent->group, team); 1798 1799 team->Unlock(); 1800 parent->UnlockTeamAndProcessGroup(); 1801 1802 // notify team listeners 1803 sNotificationService.Notify(TEAM_ADDED, team); 1804 1805 if (teamLimitReached) { 1806 status = B_NO_MORE_TEAMS; 1807 goto err6; 1808 } 1809 1810 // In case we start the main thread, we shouldn't access the team object 1811 // afterwards, so cache the team's ID. 1812 teamID = team->id; 1813 1814 // Create a kernel thread, but under the context of the new team 1815 // The new thread will take over ownership of teamArgs. 1816 { 1817 ThreadCreationAttributes threadAttributes(team_create_thread_start, 1818 threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread); 1819 threadAttributes.additional_stack_size = sizeof(user_space_program_args) 1820 + teamArgs->flat_args_size; 1821 thread = thread_create_thread(threadAttributes, false); 1822 if (thread < 0) { 1823 status = thread; 1824 goto err6; 1825 } 1826 } 1827 1828 // The team has been created successfully, so we keep the reference. Or 1829 // more precisely: It's owned by the team's main thread, now. 1830 teamReference.Detach(); 1831 1832 // wait for the loader of the new team to finish its work 1833 if ((flags & B_WAIT_TILL_LOADED) != 0) { 1834 if (mainThread != NULL) { 1835 // resume the team's main thread 1836 thread_continue(mainThread); 1837 } 1838 1839 // Now wait until loading is finished. We will be woken either by the 1840 // thread, when it finished or aborted loading, or when the team is 1841 // going to die (e.g. is killed). In either case the one notifying is 1842 // responsible for unsetting `loading_info` in the team structure. 1843 loadingWaitEntry.Wait(); 1844 1845 if (loadingInfo.result < B_OK) 1846 return loadingInfo.result; 1847 } 1848 1849 // notify the debugger 1850 user_debug_team_created(teamID); 1851 1852 return thread; 1853 1854 err6: 1855 // Remove the team structure from the process group, the parent team, and 1856 // the team hash table and delete the team structure. 1857 parent->LockTeamAndProcessGroup(); 1858 team->Lock(); 1859 1860 remove_team_from_group(team); 1861 remove_team_from_parent(team->parent, team); 1862 1863 team->Unlock(); 1864 parent->UnlockTeamAndProcessGroup(); 1865 1866 { 1867 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 1868 sTeamHash.Remove(team); 1869 if (!teamLimitReached) 1870 sUsedTeams--; 1871 } 1872 1873 sNotificationService.Notify(TEAM_REMOVED, team); 1874 1875 delete_team_user_data(team); 1876 err4: 1877 team->address_space->Put(); 1878 err2: 1879 free_team_arg(teamArgs); 1880 err1: 1881 if (parentIOContext != NULL) 1882 vfs_put_io_context(parentIOContext); 1883 1884 return status; 1885 } 1886 1887 1888 /*! Almost shuts down the current team and loads a new image into it. 1889 If successful, this function does not return and will takeover ownership of 1890 the arguments provided. 1891 This function may only be called in a userland team (caused by one of the 1892 exec*() syscalls). 1893 */ 1894 static status_t 1895 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize, 1896 int32 argCount, int32 envCount, mode_t umask) 1897 { 1898 // NOTE: Since this function normally doesn't return, don't use automatic 1899 // variables that need destruction in the function scope. 1900 char** flatArgs = _flatArgs; 1901 Team* team = thread_get_current_thread()->team; 1902 struct team_arg* teamArgs; 1903 const char* threadName; 1904 thread_id nubThreadID = -1; 1905 1906 TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %" 1907 B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount, 1908 team->id)); 1909 1910 T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1)); 1911 1912 // switching the kernel at run time is probably not a good idea :) 1913 if (team == team_get_kernel_team()) 1914 return B_NOT_ALLOWED; 1915 1916 // we currently need to be single threaded here 1917 // TODO: maybe we should just kill all other threads and 1918 // make the current thread the team's main thread? 1919 Thread* currentThread = thread_get_current_thread(); 1920 if (currentThread != team->main_thread) 1921 return B_NOT_ALLOWED; 1922 1923 // The debug nub thread, a pure kernel thread, is allowed to survive. 1924 // We iterate through the thread list to make sure that there's no other 1925 // thread. 1926 TeamLocker teamLocker(team); 1927 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1928 1929 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 1930 nubThreadID = team->debug_info.nub_thread; 1931 1932 debugInfoLocker.Unlock(); 1933 1934 for (Thread* thread = team->thread_list; thread != NULL; 1935 thread = thread->team_next) { 1936 if (thread != team->main_thread && thread->id != nubThreadID) 1937 return B_NOT_ALLOWED; 1938 } 1939 1940 team->DeleteUserTimers(true); 1941 team->ResetSignalsOnExec(); 1942 1943 teamLocker.Unlock(); 1944 1945 status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, 1946 argCount, envCount, umask, -1, 0); 1947 if (status != B_OK) 1948 return status; 1949 1950 _flatArgs = NULL; 1951 // args are owned by the team_arg structure now 1952 1953 // TODO: remove team resources if there are any left 1954 // thread_atkernel_exit() might not be called at all 1955 1956 thread_reset_for_exec(); 1957 1958 user_debug_prepare_for_exec(); 1959 1960 delete_team_user_data(team); 1961 vm_delete_areas(team->address_space, false); 1962 xsi_sem_undo(team); 1963 delete_owned_ports(team); 1964 sem_delete_owned_sems(team); 1965 remove_images(team); 1966 vfs_exec_io_context(team->io_context); 1967 delete_realtime_sem_context(team->realtime_sem_context); 1968 team->realtime_sem_context = NULL; 1969 1970 // update ASLR 1971 team->address_space->SetRandomizingEnabled( 1972 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0); 1973 1974 status = create_team_user_data(team); 1975 if (status != B_OK) { 1976 // creating the user data failed -- we're toast 1977 free_team_arg(teamArgs); 1978 exit_thread(status); 1979 return status; 1980 } 1981 1982 user_debug_finish_after_exec(); 1983 1984 // rename the team 1985 1986 team->Lock(); 1987 team->SetName(path); 1988 team->Unlock(); 1989 1990 // cut the path from the team name and rename the main thread, too 1991 threadName = strrchr(path, '/'); 1992 if (threadName != NULL) 1993 threadName++; 1994 else 1995 threadName = path; 1996 rename_thread(thread_get_current_thread_id(), threadName); 1997 1998 atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE); 1999 2000 // Update user/group according to the executable's set-user/group-id 2001 // permission. 2002 update_set_id_user_and_group(team, path); 2003 2004 user_debug_team_exec(); 2005 2006 // notify team listeners 2007 sNotificationService.Notify(TEAM_EXEC, team); 2008 2009 // get a user thread for the thread 2010 user_thread* userThread = team_allocate_user_thread(team); 2011 // cannot fail (the allocation for the team would have failed already) 2012 ThreadLocker currentThreadLocker(currentThread); 2013 currentThread->user_thread = userThread; 2014 currentThreadLocker.Unlock(); 2015 2016 // create the user stack for the thread 2017 status = thread_create_user_stack(currentThread->team, currentThread, NULL, 2018 0, sizeof(user_space_program_args) + teamArgs->flat_args_size); 2019 if (status == B_OK) { 2020 // prepare the stack, load the runtime loader, and enter userspace 2021 team_create_thread_start(teamArgs); 2022 // does never return 2023 } else 2024 free_team_arg(teamArgs); 2025 2026 // Sorry, we have to kill ourselves, there is no way out anymore 2027 // (without any areas left and all that). 2028 exit_thread(status); 2029 2030 // We return a status here since the signal that is sent by the 2031 // call above is not immediately handled. 2032 return B_ERROR; 2033 } 2034 2035 2036 static thread_id 2037 fork_team(void) 2038 { 2039 Thread* parentThread = thread_get_current_thread(); 2040 Team* parentTeam = parentThread->team; 2041 Team* team; 2042 arch_fork_arg* forkArgs; 2043 struct area_info info; 2044 thread_id threadID; 2045 status_t status; 2046 ssize_t areaCookie; 2047 bool teamLimitReached = false; 2048 2049 TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id)); 2050 2051 if (parentTeam == team_get_kernel_team()) 2052 return B_NOT_ALLOWED; 2053 2054 // create a new team 2055 // TODO: this is very similar to load_image_internal() - maybe we can do 2056 // something about it :) 2057 2058 // create the main thread object 2059 Thread* thread; 2060 status = Thread::Create(parentThread->name, thread); 2061 if (status != B_OK) 2062 return status; 2063 BReference<Thread> threadReference(thread, true); 2064 2065 // create the team object 2066 team = Team::Create(thread->id, NULL, false); 2067 if (team == NULL) 2068 return B_NO_MEMORY; 2069 2070 parentTeam->LockTeamAndProcessGroup(); 2071 team->Lock(); 2072 2073 team->SetName(parentTeam->Name()); 2074 team->SetArgs(parentTeam->Args()); 2075 2076 team->commpage_address = parentTeam->commpage_address; 2077 2078 // Inherit the parent's user/group. 2079 inherit_parent_user_and_group(team, parentTeam); 2080 2081 // inherit signal handlers 2082 team->InheritSignalActions(parentTeam); 2083 2084 team->Unlock(); 2085 parentTeam->UnlockTeamAndProcessGroup(); 2086 2087 // inherit some team debug flags 2088 team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags) 2089 & B_TEAM_DEBUG_INHERITED_FLAGS; 2090 2091 forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg)); 2092 if (forkArgs == NULL) { 2093 status = B_NO_MEMORY; 2094 goto err1; 2095 } 2096 2097 // create a new io_context for this team 2098 team->io_context = vfs_new_io_context(parentTeam->io_context, false); 2099 if (!team->io_context) { 2100 status = B_NO_MEMORY; 2101 goto err2; 2102 } 2103 2104 // duplicate the realtime sem context 2105 if (parentTeam->realtime_sem_context) { 2106 team->realtime_sem_context = clone_realtime_sem_context( 2107 parentTeam->realtime_sem_context); 2108 if (team->realtime_sem_context == NULL) { 2109 status = B_NO_MEMORY; 2110 goto err2; 2111 } 2112 } 2113 2114 // create an address space for this team 2115 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false, 2116 &team->address_space); 2117 if (status < B_OK) 2118 goto err3; 2119 2120 // copy all areas of the team 2121 // TODO: should be able to handle stack areas differently (ie. don't have 2122 // them copy-on-write) 2123 2124 areaCookie = 0; 2125 while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) { 2126 if (info.area == parentTeam->user_data_area) { 2127 // don't clone the user area; just create a new one 2128 status = create_team_user_data(team, info.address); 2129 if (status != B_OK) 2130 break; 2131 2132 thread->user_thread = team_allocate_user_thread(team); 2133 } else { 2134 void* address; 2135 area_id area = vm_copy_area(team->address_space->ID(), info.name, 2136 &address, B_CLONE_ADDRESS, info.protection, info.area); 2137 if (area < B_OK) { 2138 status = area; 2139 break; 2140 } 2141 2142 if (info.area == parentThread->user_stack_area) 2143 thread->user_stack_area = area; 2144 } 2145 } 2146 2147 if (status < B_OK) 2148 goto err4; 2149 2150 if (thread->user_thread == NULL) { 2151 #if KDEBUG 2152 panic("user data area not found, parent area is %" B_PRId32, 2153 parentTeam->user_data_area); 2154 #endif 2155 status = B_ERROR; 2156 goto err4; 2157 } 2158 2159 thread->user_stack_base = parentThread->user_stack_base; 2160 thread->user_stack_size = parentThread->user_stack_size; 2161 thread->user_local_storage = parentThread->user_local_storage; 2162 thread->sig_block_mask = parentThread->sig_block_mask; 2163 thread->signal_stack_base = parentThread->signal_stack_base; 2164 thread->signal_stack_size = parentThread->signal_stack_size; 2165 thread->signal_stack_enabled = parentThread->signal_stack_enabled; 2166 2167 arch_store_fork_frame(forkArgs); 2168 2169 // copy image list 2170 if (copy_images(parentTeam->id, team) != B_OK) 2171 goto err5; 2172 2173 // insert the team into its parent and the teams hash 2174 parentTeam->LockTeamAndProcessGroup(); 2175 team->Lock(); 2176 2177 { 2178 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 2179 2180 sTeamHash.Insert(team); 2181 teamLimitReached = sUsedTeams >= sMaxTeams; 2182 if (!teamLimitReached) 2183 sUsedTeams++; 2184 } 2185 2186 insert_team_into_parent(parentTeam, team); 2187 insert_team_into_group(parentTeam->group, team); 2188 2189 team->Unlock(); 2190 parentTeam->UnlockTeamAndProcessGroup(); 2191 2192 // notify team listeners 2193 sNotificationService.Notify(TEAM_ADDED, team); 2194 2195 if (teamLimitReached) { 2196 status = B_NO_MORE_TEAMS; 2197 goto err6; 2198 } 2199 2200 // create the main thread 2201 { 2202 ThreadCreationAttributes threadCreationAttributes(NULL, 2203 parentThread->name, parentThread->priority, NULL, team->id, thread); 2204 threadCreationAttributes.forkArgs = forkArgs; 2205 threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS; 2206 threadID = thread_create_thread(threadCreationAttributes, false); 2207 if (threadID < 0) { 2208 status = threadID; 2209 goto err6; 2210 } 2211 } 2212 2213 // notify the debugger 2214 user_debug_team_created(team->id); 2215 2216 T(TeamForked(threadID)); 2217 2218 resume_thread(threadID); 2219 return threadID; 2220 2221 err6: 2222 // Remove the team structure from the process group, the parent team, and 2223 // the team hash table and delete the team structure. 2224 parentTeam->LockTeamAndProcessGroup(); 2225 team->Lock(); 2226 2227 remove_team_from_group(team); 2228 remove_team_from_parent(team->parent, team); 2229 2230 team->Unlock(); 2231 parentTeam->UnlockTeamAndProcessGroup(); 2232 2233 { 2234 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 2235 sTeamHash.Remove(team); 2236 if (!teamLimitReached) 2237 sUsedTeams--; 2238 } 2239 2240 sNotificationService.Notify(TEAM_REMOVED, team); 2241 err5: 2242 remove_images(team); 2243 err4: 2244 team->address_space->RemoveAndPut(); 2245 err3: 2246 delete_realtime_sem_context(team->realtime_sem_context); 2247 err2: 2248 free(forkArgs); 2249 err1: 2250 team->ReleaseReference(); 2251 2252 return status; 2253 } 2254 2255 2256 /*! Returns if the specified team \a parent has any children belonging to the 2257 process group with the specified ID \a groupID. 2258 The caller must hold \a parent's lock. 2259 */ 2260 static bool 2261 has_children_in_group(Team* parent, pid_t groupID) 2262 { 2263 for (Team* child = parent->children; child != NULL; 2264 child = child->siblings_next) { 2265 TeamLocker childLocker(child); 2266 if (child->group_id == groupID) 2267 return true; 2268 } 2269 2270 return false; 2271 } 2272 2273 2274 /*! Returns the first job control entry from \a children, which matches \a id. 2275 \a id can be: 2276 - \code > 0 \endcode: Matching an entry with that team ID. 2277 - \code == -1 \endcode: Matching any entry. 2278 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id. 2279 \c 0 is an invalid value for \a id. 2280 2281 The caller must hold the lock of the team that \a children belongs to. 2282 2283 \param children The job control entry list to check. 2284 \param id The match criterion. 2285 \return The first matching entry or \c NULL, if none matches. 2286 */ 2287 static job_control_entry* 2288 get_job_control_entry(team_job_control_children& children, pid_t id) 2289 { 2290 for (JobControlEntryList::Iterator it = children.entries.GetIterator(); 2291 job_control_entry* entry = it.Next();) { 2292 2293 if (id > 0) { 2294 if (entry->thread == id) 2295 return entry; 2296 } else if (id == -1) { 2297 return entry; 2298 } else { 2299 pid_t processGroup 2300 = (entry->team ? entry->team->group_id : entry->group_id); 2301 if (processGroup == -id) 2302 return entry; 2303 } 2304 } 2305 2306 return NULL; 2307 } 2308 2309 2310 /*! Returns the first job control entry from one of team's dead, continued, or 2311 stopped children which matches \a id. 2312 \a id can be: 2313 - \code > 0 \endcode: Matching an entry with that team ID. 2314 - \code == -1 \endcode: Matching any entry. 2315 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id. 2316 \c 0 is an invalid value for \a id. 2317 2318 The caller must hold \a team's lock. 2319 2320 \param team The team whose dead, stopped, and continued child lists shall be 2321 checked. 2322 \param id The match criterion. 2323 \param flags Specifies which children shall be considered. Dead children 2324 are considered when \a flags is ORed bitwise with \c WEXITED, stopped 2325 children are considered when \a flags is ORed bitwise with \c WUNTRACED 2326 or \c WSTOPPED, continued children when \a flags is ORed bitwise with 2327 \c WCONTINUED. 2328 \return The first matching entry or \c NULL, if none matches. 2329 */ 2330 static job_control_entry* 2331 get_job_control_entry(Team* team, pid_t id, uint32 flags) 2332 { 2333 job_control_entry* entry = NULL; 2334 2335 if ((flags & WEXITED) != 0) 2336 entry = get_job_control_entry(team->dead_children, id); 2337 2338 if (entry == NULL && (flags & WCONTINUED) != 0) 2339 entry = get_job_control_entry(team->continued_children, id); 2340 2341 if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0) 2342 entry = get_job_control_entry(team->stopped_children, id); 2343 2344 return entry; 2345 } 2346 2347 2348 job_control_entry::job_control_entry() 2349 : 2350 has_group_ref(false) 2351 { 2352 } 2353 2354 2355 job_control_entry::~job_control_entry() 2356 { 2357 if (has_group_ref) { 2358 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 2359 2360 ProcessGroup* group = sGroupHash.Lookup(group_id); 2361 if (group == NULL) { 2362 panic("job_control_entry::~job_control_entry(): unknown group " 2363 "ID: %" B_PRId32, group_id); 2364 return; 2365 } 2366 2367 groupHashLocker.Unlock(); 2368 2369 group->ReleaseReference(); 2370 } 2371 } 2372 2373 2374 /*! Invoked when the owning team is dying, initializing the entry according to 2375 the dead state. 2376 2377 The caller must hold the owning team's lock and the scheduler lock. 2378 */ 2379 void 2380 job_control_entry::InitDeadState() 2381 { 2382 if (team != NULL) { 2383 ASSERT(team->exit.initialized); 2384 2385 group_id = team->group_id; 2386 team->group->AcquireReference(); 2387 has_group_ref = true; 2388 2389 thread = team->id; 2390 status = team->exit.status; 2391 reason = team->exit.reason; 2392 signal = team->exit.signal; 2393 signaling_user = team->exit.signaling_user; 2394 user_time = team->dead_threads_user_time 2395 + team->dead_children.user_time; 2396 kernel_time = team->dead_threads_kernel_time 2397 + team->dead_children.kernel_time; 2398 2399 team = NULL; 2400 } 2401 } 2402 2403 2404 job_control_entry& 2405 job_control_entry::operator=(const job_control_entry& other) 2406 { 2407 state = other.state; 2408 thread = other.thread; 2409 signal = other.signal; 2410 has_group_ref = false; 2411 signaling_user = other.signaling_user; 2412 team = other.team; 2413 group_id = other.group_id; 2414 status = other.status; 2415 reason = other.reason; 2416 user_time = other.user_time; 2417 kernel_time = other.kernel_time; 2418 2419 return *this; 2420 } 2421 2422 2423 /*! This is the kernel backend for waitid(). 2424 */ 2425 static thread_id 2426 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info, 2427 team_usage_info& _usage_info) 2428 { 2429 Thread* thread = thread_get_current_thread(); 2430 Team* team = thread->team; 2431 struct job_control_entry foundEntry; 2432 struct job_control_entry* freeDeathEntry = NULL; 2433 status_t status = B_OK; 2434 2435 TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n", 2436 child, flags)); 2437 2438 T(WaitForChild(child, flags)); 2439 2440 if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) { 2441 T(WaitForChildDone(B_BAD_VALUE)); 2442 return B_BAD_VALUE; 2443 } 2444 2445 pid_t originalChild = child; 2446 2447 bool ignoreFoundEntries = false; 2448 bool ignoreFoundEntriesChecked = false; 2449 2450 while (true) { 2451 // lock the team 2452 TeamLocker teamLocker(team); 2453 2454 // A 0 child argument means to wait for all children in the process 2455 // group of the calling team. 2456 child = originalChild == 0 ? -team->group_id : originalChild; 2457 2458 // check whether any condition holds 2459 job_control_entry* entry = get_job_control_entry(team, child, flags); 2460 2461 // If we don't have an entry yet, check whether there are any children 2462 // complying to the process group specification at all. 2463 if (entry == NULL) { 2464 // No success yet -- check whether there are any children complying 2465 // to the process group specification at all. 2466 bool childrenExist = false; 2467 if (child == -1) { 2468 childrenExist = team->children != NULL; 2469 } else if (child < -1) { 2470 childrenExist = has_children_in_group(team, -child); 2471 } else if (child != team->id) { 2472 if (Team* childTeam = Team::Get(child)) { 2473 BReference<Team> childTeamReference(childTeam, true); 2474 TeamLocker childTeamLocker(childTeam); 2475 childrenExist = childTeam->parent == team; 2476 } 2477 } 2478 2479 if (!childrenExist) { 2480 // there is no child we could wait for 2481 status = ECHILD; 2482 } else { 2483 // the children we're waiting for are still running 2484 status = B_WOULD_BLOCK; 2485 } 2486 } else { 2487 // got something 2488 foundEntry = *entry; 2489 2490 // unless WNOWAIT has been specified, "consume" the wait state 2491 if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) { 2492 if (entry->state == JOB_CONTROL_STATE_DEAD) { 2493 // The child is dead. Reap its death entry. 2494 freeDeathEntry = entry; 2495 team->dead_children.entries.Remove(entry); 2496 team->dead_children.count--; 2497 } else { 2498 // The child is well. Reset its job control state. 2499 team_set_job_control_state(entry->team, 2500 JOB_CONTROL_STATE_NONE, NULL); 2501 } 2502 } 2503 } 2504 2505 // If we haven't got anything yet, prepare for waiting for the 2506 // condition variable. 2507 ConditionVariableEntry deadWaitEntry; 2508 2509 if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0) 2510 team->dead_children.condition_variable.Add(&deadWaitEntry); 2511 2512 teamLocker.Unlock(); 2513 2514 // we got our entry and can return to our caller 2515 if (status == B_OK) { 2516 if (ignoreFoundEntries) { 2517 // ... unless we shall ignore found entries 2518 delete freeDeathEntry; 2519 freeDeathEntry = NULL; 2520 continue; 2521 } 2522 2523 break; 2524 } 2525 2526 if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) { 2527 T(WaitForChildDone(status)); 2528 return status; 2529 } 2530 2531 status = deadWaitEntry.Wait(B_CAN_INTERRUPT); 2532 if (status == B_INTERRUPTED) { 2533 T(WaitForChildDone(status)); 2534 return status; 2535 } 2536 2537 // If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until 2538 // all our children are dead and fail with ECHILD. We check the 2539 // condition at this point. 2540 if (!ignoreFoundEntriesChecked) { 2541 teamLocker.Lock(); 2542 2543 struct sigaction& handler = team->SignalActionFor(SIGCHLD); 2544 if ((handler.sa_flags & SA_NOCLDWAIT) != 0 2545 || handler.sa_handler == SIG_IGN) { 2546 ignoreFoundEntries = true; 2547 } 2548 2549 teamLocker.Unlock(); 2550 2551 ignoreFoundEntriesChecked = true; 2552 } 2553 } 2554 2555 delete freeDeathEntry; 2556 2557 // When we got here, we have a valid death entry, and already got 2558 // unregistered from the team or group. Fill in the returned info. 2559 memset(&_info, 0, sizeof(_info)); 2560 _info.si_signo = SIGCHLD; 2561 _info.si_pid = foundEntry.thread; 2562 _info.si_uid = foundEntry.signaling_user; 2563 // TODO: Fill in si_errno? 2564 2565 switch (foundEntry.state) { 2566 case JOB_CONTROL_STATE_DEAD: 2567 _info.si_code = foundEntry.reason; 2568 _info.si_status = foundEntry.reason == CLD_EXITED 2569 ? foundEntry.status : foundEntry.signal; 2570 _usage_info.user_time = foundEntry.user_time; 2571 _usage_info.kernel_time = foundEntry.kernel_time; 2572 break; 2573 case JOB_CONTROL_STATE_STOPPED: 2574 _info.si_code = CLD_STOPPED; 2575 _info.si_status = foundEntry.signal; 2576 break; 2577 case JOB_CONTROL_STATE_CONTINUED: 2578 _info.si_code = CLD_CONTINUED; 2579 _info.si_status = 0; 2580 break; 2581 case JOB_CONTROL_STATE_NONE: 2582 // can't happen 2583 break; 2584 } 2585 2586 // If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child 2587 // status is available. 2588 TeamLocker teamLocker(team); 2589 InterruptsSpinLocker signalLocker(team->signal_lock); 2590 SpinLocker threadCreationLocker(gThreadCreationLock); 2591 2592 if (is_team_signal_blocked(team, SIGCHLD)) { 2593 if (get_job_control_entry(team, child, flags) == NULL) 2594 team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD)); 2595 } 2596 2597 threadCreationLocker.Unlock(); 2598 signalLocker.Unlock(); 2599 teamLocker.Unlock(); 2600 2601 // When the team is dead, the main thread continues to live in the kernel 2602 // team for a very short time. To avoid surprises for the caller we rather 2603 // wait until the thread is really gone. 2604 if (foundEntry.state == JOB_CONTROL_STATE_DEAD) 2605 wait_for_thread(foundEntry.thread, NULL); 2606 2607 T(WaitForChildDone(foundEntry)); 2608 2609 return foundEntry.thread; 2610 } 2611 2612 2613 /*! Fills the team_info structure with information from the specified team. 2614 Interrupts must be enabled. The team must not be locked. 2615 */ 2616 static status_t 2617 fill_team_info(Team* team, team_info* info, size_t size) 2618 { 2619 if (size != sizeof(team_info)) 2620 return B_BAD_VALUE; 2621 2622 // TODO: Set more informations for team_info 2623 memset(info, 0, size); 2624 2625 info->team = team->id; 2626 // immutable 2627 info->image_count = count_images(team); 2628 // protected by sImageMutex 2629 2630 TeamLocker teamLocker(team); 2631 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2632 2633 info->thread_count = team->num_threads; 2634 //info->area_count = 2635 info->debugger_nub_thread = team->debug_info.nub_thread; 2636 info->debugger_nub_port = team->debug_info.nub_port; 2637 info->uid = team->effective_uid; 2638 info->gid = team->effective_gid; 2639 2640 strlcpy(info->args, team->Args(), sizeof(info->args)); 2641 info->argc = 1; 2642 2643 return B_OK; 2644 } 2645 2646 2647 /*! Returns whether the process group contains stopped processes. 2648 The caller must hold the process group's lock. 2649 */ 2650 static bool 2651 process_group_has_stopped_processes(ProcessGroup* group) 2652 { 2653 Team* team = group->teams; 2654 while (team != NULL) { 2655 // the parent team's lock guards the job control entry -- acquire it 2656 team->LockTeamAndParent(false); 2657 2658 if (team->job_control_entry != NULL 2659 && team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) { 2660 team->UnlockTeamAndParent(); 2661 return true; 2662 } 2663 2664 team->UnlockTeamAndParent(); 2665 2666 team = team->group_next; 2667 } 2668 2669 return false; 2670 } 2671 2672 2673 /*! Iterates through all process groups queued in team_remove_team() and signals 2674 those that are orphaned and have stopped processes. 2675 The caller must not hold any team or process group locks. 2676 */ 2677 static void 2678 orphaned_process_group_check() 2679 { 2680 // process as long as there are groups in the list 2681 while (true) { 2682 // remove the head from the list 2683 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 2684 2685 ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead(); 2686 if (group == NULL) 2687 return; 2688 2689 group->UnsetOrphanedCheck(); 2690 BReference<ProcessGroup> groupReference(group); 2691 2692 orphanedCheckLocker.Unlock(); 2693 2694 AutoLocker<ProcessGroup> groupLocker(group); 2695 2696 // If the group is orphaned and contains stopped processes, we're 2697 // supposed to send SIGHUP + SIGCONT. 2698 if (group->IsOrphaned() && process_group_has_stopped_processes(group)) { 2699 Thread* currentThread = thread_get_current_thread(); 2700 2701 Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id); 2702 send_signal_to_process_group_locked(group, signal, 0); 2703 2704 signal.SetNumber(SIGCONT); 2705 send_signal_to_process_group_locked(group, signal, 0); 2706 } 2707 } 2708 } 2709 2710 2711 static status_t 2712 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info, 2713 uint32 flags) 2714 { 2715 if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN) 2716 return B_BAD_VALUE; 2717 2718 // get the team 2719 Team* team = Team::GetAndLock(id); 2720 if (team == NULL) 2721 return B_BAD_TEAM_ID; 2722 BReference<Team> teamReference(team, true); 2723 TeamLocker teamLocker(team, true); 2724 2725 if ((flags & B_CHECK_PERMISSION) != 0) { 2726 uid_t uid = geteuid(); 2727 if (uid != 0 && uid != team->effective_uid) 2728 return B_NOT_ALLOWED; 2729 } 2730 2731 bigtime_t kernelTime = 0; 2732 bigtime_t userTime = 0; 2733 2734 switch (who) { 2735 case B_TEAM_USAGE_SELF: 2736 { 2737 Thread* thread = team->thread_list; 2738 2739 for (; thread != NULL; thread = thread->team_next) { 2740 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 2741 kernelTime += thread->kernel_time; 2742 userTime += thread->user_time; 2743 } 2744 2745 kernelTime += team->dead_threads_kernel_time; 2746 userTime += team->dead_threads_user_time; 2747 break; 2748 } 2749 2750 case B_TEAM_USAGE_CHILDREN: 2751 { 2752 Team* child = team->children; 2753 for (; child != NULL; child = child->siblings_next) { 2754 TeamLocker childLocker(child); 2755 2756 Thread* thread = team->thread_list; 2757 2758 for (; thread != NULL; thread = thread->team_next) { 2759 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 2760 kernelTime += thread->kernel_time; 2761 userTime += thread->user_time; 2762 } 2763 2764 kernelTime += child->dead_threads_kernel_time; 2765 userTime += child->dead_threads_user_time; 2766 } 2767 2768 kernelTime += team->dead_children.kernel_time; 2769 userTime += team->dead_children.user_time; 2770 break; 2771 } 2772 } 2773 2774 info->kernel_time = kernelTime; 2775 info->user_time = userTime; 2776 2777 return B_OK; 2778 } 2779 2780 2781 // #pragma mark - Private kernel API 2782 2783 2784 status_t 2785 team_init(kernel_args* args) 2786 { 2787 // create the team hash table 2788 new(&sTeamHash) TeamTable; 2789 if (sTeamHash.Init(64) != B_OK) 2790 panic("Failed to init team hash table!"); 2791 2792 new(&sGroupHash) ProcessGroupHashTable; 2793 if (sGroupHash.Init() != B_OK) 2794 panic("Failed to init process group hash table!"); 2795 2796 // create initial session and process groups 2797 2798 ProcessSession* session = new(std::nothrow) ProcessSession(1); 2799 if (session == NULL) 2800 panic("Could not create initial session.\n"); 2801 BReference<ProcessSession> sessionReference(session, true); 2802 2803 ProcessGroup* group = new(std::nothrow) ProcessGroup(1); 2804 if (group == NULL) 2805 panic("Could not create initial process group.\n"); 2806 BReference<ProcessGroup> groupReference(group, true); 2807 2808 group->Publish(session); 2809 2810 // create the kernel team 2811 sKernelTeam = Team::Create(1, "kernel_team", true); 2812 if (sKernelTeam == NULL) 2813 panic("could not create kernel team!\n"); 2814 sKernelTeam->SetArgs(sKernelTeam->Name()); 2815 sKernelTeam->state = TEAM_STATE_NORMAL; 2816 2817 sKernelTeam->saved_set_uid = 0; 2818 sKernelTeam->real_uid = 0; 2819 sKernelTeam->effective_uid = 0; 2820 sKernelTeam->saved_set_gid = 0; 2821 sKernelTeam->real_gid = 0; 2822 sKernelTeam->effective_gid = 0; 2823 sKernelTeam->supplementary_groups = NULL; 2824 sKernelTeam->supplementary_group_count = 0; 2825 2826 insert_team_into_group(group, sKernelTeam); 2827 2828 sKernelTeam->io_context = vfs_new_io_context(NULL, false); 2829 if (sKernelTeam->io_context == NULL) 2830 panic("could not create io_context for kernel team!\n"); 2831 2832 if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK) 2833 dprintf("Failed to resize FD table for kernel team!\n"); 2834 2835 // stick it in the team hash 2836 sTeamHash.Insert(sKernelTeam); 2837 2838 add_debugger_command_etc("team", &dump_team_info, 2839 "Dump info about a particular team", 2840 "[ <id> | <address> | <name> ]\n" 2841 "Prints information about the specified team. If no argument is given\n" 2842 "the current team is selected.\n" 2843 " <id> - The ID of the team.\n" 2844 " <address> - The address of the team structure.\n" 2845 " <name> - The team's name.\n", 0); 2846 add_debugger_command_etc("teams", &dump_teams, "List all teams", 2847 "\n" 2848 "Prints a list of all existing teams.\n", 0); 2849 2850 new(&sNotificationService) TeamNotificationService(); 2851 2852 sNotificationService.Register(); 2853 2854 return B_OK; 2855 } 2856 2857 2858 int32 2859 team_max_teams(void) 2860 { 2861 return sMaxTeams; 2862 } 2863 2864 2865 int32 2866 team_used_teams(void) 2867 { 2868 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 2869 return sUsedTeams; 2870 } 2871 2872 2873 /*! Returns a death entry of a child team specified by ID (if any). 2874 The caller must hold the team's lock. 2875 2876 \param team The team whose dead children list to check. 2877 \param child The ID of the child for whose death entry to lock. Must be > 0. 2878 \param _deleteEntry Return variable, indicating whether the caller needs to 2879 delete the returned entry. 2880 \return The death entry of the matching team, or \c NULL, if no death entry 2881 for the team was found. 2882 */ 2883 job_control_entry* 2884 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry) 2885 { 2886 if (child <= 0) 2887 return NULL; 2888 2889 job_control_entry* entry = get_job_control_entry(team->dead_children, 2890 child); 2891 if (entry) { 2892 // remove the entry only, if the caller is the parent of the found team 2893 if (team_get_current_team_id() == entry->thread) { 2894 team->dead_children.entries.Remove(entry); 2895 team->dead_children.count--; 2896 *_deleteEntry = true; 2897 } else { 2898 *_deleteEntry = false; 2899 } 2900 } 2901 2902 return entry; 2903 } 2904 2905 2906 /*! Quick check to see if we have a valid team ID. */ 2907 bool 2908 team_is_valid(team_id id) 2909 { 2910 if (id <= 0) 2911 return false; 2912 2913 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 2914 2915 return team_get_team_struct_locked(id) != NULL; 2916 } 2917 2918 2919 Team* 2920 team_get_team_struct_locked(team_id id) 2921 { 2922 return sTeamHash.Lookup(id); 2923 } 2924 2925 2926 void 2927 team_set_controlling_tty(int32 ttyIndex) 2928 { 2929 // lock the team, so its session won't change while we're playing with it 2930 Team* team = thread_get_current_thread()->team; 2931 TeamLocker teamLocker(team); 2932 2933 // get and lock the session 2934 ProcessSession* session = team->group->Session(); 2935 AutoLocker<ProcessSession> sessionLocker(session); 2936 2937 // set the session's fields 2938 session->controlling_tty = ttyIndex; 2939 session->foreground_group = -1; 2940 } 2941 2942 2943 int32 2944 team_get_controlling_tty() 2945 { 2946 // lock the team, so its session won't change while we're playing with it 2947 Team* team = thread_get_current_thread()->team; 2948 TeamLocker teamLocker(team); 2949 2950 // get and lock the session 2951 ProcessSession* session = team->group->Session(); 2952 AutoLocker<ProcessSession> sessionLocker(session); 2953 2954 // get the session's field 2955 return session->controlling_tty; 2956 } 2957 2958 2959 status_t 2960 team_set_foreground_process_group(int32 ttyIndex, pid_t processGroupID) 2961 { 2962 // lock the team, so its session won't change while we're playing with it 2963 Thread* thread = thread_get_current_thread(); 2964 Team* team = thread->team; 2965 TeamLocker teamLocker(team); 2966 2967 // get and lock the session 2968 ProcessSession* session = team->group->Session(); 2969 AutoLocker<ProcessSession> sessionLocker(session); 2970 2971 // check given TTY -- must be the controlling tty of the calling process 2972 if (session->controlling_tty != ttyIndex) 2973 return ENOTTY; 2974 2975 // check given process group -- must belong to our session 2976 { 2977 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 2978 ProcessGroup* group = sGroupHash.Lookup(processGroupID); 2979 if (group == NULL || group->Session() != session) 2980 return B_BAD_VALUE; 2981 } 2982 2983 // If we are a background group, we can do that unharmed only when we 2984 // ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU. 2985 if (session->foreground_group != -1 2986 && session->foreground_group != team->group_id 2987 && team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN 2988 && (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) { 2989 InterruptsSpinLocker signalLocker(team->signal_lock); 2990 2991 if (!is_team_signal_blocked(team, SIGTTOU)) { 2992 pid_t groupID = team->group_id; 2993 2994 signalLocker.Unlock(); 2995 sessionLocker.Unlock(); 2996 teamLocker.Unlock(); 2997 2998 Signal signal(SIGTTOU, SI_USER, B_OK, team->id); 2999 send_signal_to_process_group(groupID, signal, 0); 3000 return B_INTERRUPTED; 3001 } 3002 } 3003 3004 session->foreground_group = processGroupID; 3005 3006 return B_OK; 3007 } 3008 3009 3010 /*! Removes the specified team from the global team hash, from its process 3011 group, and from its parent. 3012 It also moves all of its children to the kernel team. 3013 3014 The caller must hold the following locks: 3015 - \a team's process group's lock, 3016 - the kernel team's lock, 3017 - \a team's parent team's lock (might be the kernel team), and 3018 - \a team's lock. 3019 */ 3020 void 3021 team_remove_team(Team* team, pid_t& _signalGroup) 3022 { 3023 Team* parent = team->parent; 3024 3025 // remember how long this team lasted 3026 parent->dead_children.kernel_time += team->dead_threads_kernel_time 3027 + team->dead_children.kernel_time; 3028 parent->dead_children.user_time += team->dead_threads_user_time 3029 + team->dead_children.user_time; 3030 3031 // remove the team from the hash table 3032 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 3033 sTeamHash.Remove(team); 3034 sUsedTeams--; 3035 teamsLocker.Unlock(); 3036 3037 // The team can no longer be accessed by ID. Navigation to it is still 3038 // possible from its process group and its parent and children, but that 3039 // will be rectified shortly. 3040 team->state = TEAM_STATE_DEATH; 3041 3042 // If we're a controlling process (i.e. a session leader with controlling 3043 // terminal), there's a bit of signalling we have to do. We can't do any of 3044 // the signaling here due to the bunch of locks we're holding, but we need 3045 // to determine, whom to signal. 3046 _signalGroup = -1; 3047 bool isSessionLeader = false; 3048 if (team->session_id == team->id 3049 && team->group->Session()->controlling_tty >= 0) { 3050 isSessionLeader = true; 3051 3052 ProcessSession* session = team->group->Session(); 3053 3054 AutoLocker<ProcessSession> sessionLocker(session); 3055 3056 session->controlling_tty = -1; 3057 _signalGroup = session->foreground_group; 3058 } 3059 3060 // remove us from our process group 3061 remove_team_from_group(team); 3062 3063 // move the team's children to the kernel team 3064 while (Team* child = team->children) { 3065 // remove the child from the current team and add it to the kernel team 3066 TeamLocker childLocker(child); 3067 3068 remove_team_from_parent(team, child); 3069 insert_team_into_parent(sKernelTeam, child); 3070 3071 // move job control entries too 3072 sKernelTeam->stopped_children.entries.MoveFrom( 3073 &team->stopped_children.entries); 3074 sKernelTeam->continued_children.entries.MoveFrom( 3075 &team->continued_children.entries); 3076 3077 // If the team was a session leader with controlling terminal, 3078 // we need to send SIGHUP + SIGCONT to all newly-orphaned process 3079 // groups with stopped processes. Due to locking complications we can't 3080 // do that here, so we only check whether we were a reason for the 3081 // child's process group not being an orphan and, if so, schedule a 3082 // later check (cf. orphaned_process_group_check()). 3083 if (isSessionLeader) { 3084 ProcessGroup* childGroup = child->group; 3085 if (childGroup->Session()->id == team->session_id 3086 && childGroup->id != team->group_id) { 3087 childGroup->ScheduleOrphanedCheck(); 3088 } 3089 } 3090 3091 // Note, we don't move the dead children entries. Those will be deleted 3092 // when the team structure is deleted. 3093 } 3094 3095 // remove us from our parent 3096 remove_team_from_parent(parent, team); 3097 } 3098 3099 3100 /*! Kills all threads but the main thread of the team and shuts down user 3101 debugging for it. 3102 To be called on exit of the team's main thread. No locks must be held. 3103 3104 \param team The team in question. 3105 \return The port of the debugger for the team, -1 if none. To be passed to 3106 team_delete_team(). 3107 */ 3108 port_id 3109 team_shutdown_team(Team* team) 3110 { 3111 ASSERT(thread_get_current_thread() == team->main_thread); 3112 3113 TeamLocker teamLocker(team); 3114 3115 // Make sure debugging changes won't happen anymore. 3116 port_id debuggerPort = -1; 3117 while (true) { 3118 // If a debugger change is in progress for the team, we'll have to 3119 // wait until it is done. 3120 ConditionVariableEntry waitForDebuggerEntry; 3121 bool waitForDebugger = false; 3122 3123 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 3124 3125 if (team->debug_info.debugger_changed_condition != NULL) { 3126 team->debug_info.debugger_changed_condition->Add( 3127 &waitForDebuggerEntry); 3128 waitForDebugger = true; 3129 } else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 3130 // The team is being debugged. That will stop with the termination 3131 // of the nub thread. Since we set the team state to death, no one 3132 // can install a debugger anymore. We fetch the debugger's port to 3133 // send it a message at the bitter end. 3134 debuggerPort = team->debug_info.debugger_port; 3135 } 3136 3137 debugInfoLocker.Unlock(); 3138 3139 if (!waitForDebugger) 3140 break; 3141 3142 // wait for the debugger change to be finished 3143 teamLocker.Unlock(); 3144 3145 waitForDebuggerEntry.Wait(); 3146 3147 teamLocker.Lock(); 3148 } 3149 3150 // Mark the team as shutting down. That will prevent new threads from being 3151 // created and debugger changes from taking place. 3152 team->state = TEAM_STATE_SHUTDOWN; 3153 3154 // delete all timers 3155 team->DeleteUserTimers(false); 3156 3157 // deactivate CPU time user timers for the team 3158 InterruptsSpinLocker timeLocker(team->time_lock); 3159 3160 if (team->HasActiveCPUTimeUserTimers()) 3161 team->DeactivateCPUTimeUserTimers(); 3162 3163 timeLocker.Unlock(); 3164 3165 // kill all threads but the main thread 3166 team_death_entry deathEntry; 3167 deathEntry.condition.Init(team, "team death"); 3168 3169 while (true) { 3170 team->death_entry = &deathEntry; 3171 deathEntry.remaining_threads = 0; 3172 3173 Thread* thread = team->thread_list; 3174 while (thread != NULL) { 3175 if (thread != team->main_thread) { 3176 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id); 3177 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 3178 deathEntry.remaining_threads++; 3179 } 3180 3181 thread = thread->team_next; 3182 } 3183 3184 if (deathEntry.remaining_threads == 0) 3185 break; 3186 3187 // there are threads to wait for 3188 ConditionVariableEntry entry; 3189 deathEntry.condition.Add(&entry); 3190 3191 teamLocker.Unlock(); 3192 3193 entry.Wait(); 3194 3195 teamLocker.Lock(); 3196 } 3197 3198 team->death_entry = NULL; 3199 3200 return debuggerPort; 3201 } 3202 3203 3204 /*! Called on team exit to notify threads waiting on the team and free most 3205 resources associated with it. 3206 The caller shouldn't hold any locks. 3207 */ 3208 void 3209 team_delete_team(Team* team, port_id debuggerPort) 3210 { 3211 // Not quite in our job description, but work that has been left by 3212 // team_remove_team() and that can be done now that we're not holding any 3213 // locks. 3214 orphaned_process_group_check(); 3215 3216 team_id teamID = team->id; 3217 3218 ASSERT(team->num_threads == 0); 3219 3220 // If someone is waiting for this team to be loaded, but it dies 3221 // unexpectedly before being done, we need to notify the waiting 3222 // thread now. 3223 3224 TeamLocker teamLocker(team); 3225 3226 if (team->loading_info) { 3227 // there's indeed someone waiting 3228 struct team_loading_info* loadingInfo = team->loading_info; 3229 team->loading_info = NULL; 3230 3231 loadingInfo->result = B_ERROR; 3232 3233 // wake up the waiting thread 3234 loadingInfo->condition.NotifyAll(); 3235 } 3236 3237 // notify team watchers 3238 3239 { 3240 // we're not reachable from anyone anymore at this point, so we 3241 // can safely access the list without any locking 3242 struct team_watcher* watcher; 3243 while ((watcher = (struct team_watcher*)list_remove_head_item( 3244 &team->watcher_list)) != NULL) { 3245 watcher->hook(teamID, watcher->data); 3246 free(watcher); 3247 } 3248 } 3249 3250 teamLocker.Unlock(); 3251 3252 sNotificationService.Notify(TEAM_REMOVED, team); 3253 3254 // free team resources 3255 3256 delete_realtime_sem_context(team->realtime_sem_context); 3257 xsi_sem_undo(team); 3258 remove_images(team); 3259 team->address_space->RemoveAndPut(); 3260 3261 team->ReleaseReference(); 3262 3263 // notify the debugger, that the team is gone 3264 user_debug_team_deleted(teamID, debuggerPort); 3265 } 3266 3267 3268 Team* 3269 team_get_kernel_team(void) 3270 { 3271 return sKernelTeam; 3272 } 3273 3274 3275 team_id 3276 team_get_kernel_team_id(void) 3277 { 3278 if (!sKernelTeam) 3279 return 0; 3280 3281 return sKernelTeam->id; 3282 } 3283 3284 3285 team_id 3286 team_get_current_team_id(void) 3287 { 3288 return thread_get_current_thread()->team->id; 3289 } 3290 3291 3292 status_t 3293 team_get_address_space(team_id id, VMAddressSpace** _addressSpace) 3294 { 3295 if (id == sKernelTeam->id) { 3296 // we're the kernel team, so we don't have to go through all 3297 // the hassle (locking and hash lookup) 3298 *_addressSpace = VMAddressSpace::GetKernel(); 3299 return B_OK; 3300 } 3301 3302 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3303 3304 Team* team = team_get_team_struct_locked(id); 3305 if (team == NULL) 3306 return B_BAD_VALUE; 3307 3308 team->address_space->Get(); 3309 *_addressSpace = team->address_space; 3310 return B_OK; 3311 } 3312 3313 3314 /*! Sets the team's job control state. 3315 The caller must hold the parent team's lock. Interrupts are allowed to be 3316 enabled or disabled. 3317 \a team The team whose job control state shall be set. 3318 \a newState The new state to be set. 3319 \a signal The signal the new state was caused by. Can \c NULL, if none. Then 3320 the caller is responsible for filling in the following fields of the 3321 entry before releasing the parent team's lock, unless the new state is 3322 \c JOB_CONTROL_STATE_NONE: 3323 - \c signal: The number of the signal causing the state change. 3324 - \c signaling_user: The real UID of the user sending the signal. 3325 */ 3326 void 3327 team_set_job_control_state(Team* team, job_control_state newState, 3328 Signal* signal) 3329 { 3330 if (team == NULL || team->job_control_entry == NULL) 3331 return; 3332 3333 // don't touch anything, if the state stays the same or the team is already 3334 // dead 3335 job_control_entry* entry = team->job_control_entry; 3336 if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD) 3337 return; 3338 3339 T(SetJobControlState(team->id, newState, signal)); 3340 3341 // remove from the old list 3342 switch (entry->state) { 3343 case JOB_CONTROL_STATE_NONE: 3344 // entry is in no list ATM 3345 break; 3346 case JOB_CONTROL_STATE_DEAD: 3347 // can't get here 3348 break; 3349 case JOB_CONTROL_STATE_STOPPED: 3350 team->parent->stopped_children.entries.Remove(entry); 3351 break; 3352 case JOB_CONTROL_STATE_CONTINUED: 3353 team->parent->continued_children.entries.Remove(entry); 3354 break; 3355 } 3356 3357 entry->state = newState; 3358 3359 if (signal != NULL) { 3360 entry->signal = signal->Number(); 3361 entry->signaling_user = signal->SendingUser(); 3362 } 3363 3364 // add to new list 3365 team_job_control_children* childList = NULL; 3366 switch (entry->state) { 3367 case JOB_CONTROL_STATE_NONE: 3368 // entry doesn't get into any list 3369 break; 3370 case JOB_CONTROL_STATE_DEAD: 3371 childList = &team->parent->dead_children; 3372 team->parent->dead_children.count++; 3373 break; 3374 case JOB_CONTROL_STATE_STOPPED: 3375 childList = &team->parent->stopped_children; 3376 break; 3377 case JOB_CONTROL_STATE_CONTINUED: 3378 childList = &team->parent->continued_children; 3379 break; 3380 } 3381 3382 if (childList != NULL) { 3383 childList->entries.Add(entry); 3384 team->parent->dead_children.condition_variable.NotifyAll(); 3385 } 3386 } 3387 3388 3389 /*! Inits the given team's exit information, if not yet initialized, to some 3390 generic "killed" status. 3391 The caller must not hold the team's lock. Interrupts must be enabled. 3392 3393 \param team The team whose exit info shall be initialized. 3394 */ 3395 void 3396 team_init_exit_info_on_error(Team* team) 3397 { 3398 TeamLocker teamLocker(team); 3399 3400 if (!team->exit.initialized) { 3401 team->exit.reason = CLD_KILLED; 3402 team->exit.signal = SIGKILL; 3403 team->exit.signaling_user = geteuid(); 3404 team->exit.status = 0; 3405 team->exit.initialized = true; 3406 } 3407 } 3408 3409 3410 /*! Adds a hook to the team that is called as soon as this team goes away. 3411 This call might get public in the future. 3412 */ 3413 status_t 3414 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data) 3415 { 3416 if (hook == NULL || teamID < B_OK) 3417 return B_BAD_VALUE; 3418 3419 // create the watcher object 3420 team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher)); 3421 if (watcher == NULL) 3422 return B_NO_MEMORY; 3423 3424 watcher->hook = hook; 3425 watcher->data = data; 3426 3427 // add watcher, if the team isn't already dying 3428 // get the team 3429 Team* team = Team::GetAndLock(teamID); 3430 if (team == NULL) { 3431 free(watcher); 3432 return B_BAD_TEAM_ID; 3433 } 3434 3435 list_add_item(&team->watcher_list, watcher); 3436 3437 team->UnlockAndReleaseReference(); 3438 3439 return B_OK; 3440 } 3441 3442 3443 status_t 3444 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data) 3445 { 3446 if (hook == NULL || teamID < 0) 3447 return B_BAD_VALUE; 3448 3449 // get team and remove watcher (if present) 3450 Team* team = Team::GetAndLock(teamID); 3451 if (team == NULL) 3452 return B_BAD_TEAM_ID; 3453 3454 // search for watcher 3455 team_watcher* watcher = NULL; 3456 while ((watcher = (team_watcher*)list_get_next_item( 3457 &team->watcher_list, watcher)) != NULL) { 3458 if (watcher->hook == hook && watcher->data == data) { 3459 // got it! 3460 list_remove_item(&team->watcher_list, watcher); 3461 break; 3462 } 3463 } 3464 3465 team->UnlockAndReleaseReference(); 3466 3467 if (watcher == NULL) 3468 return B_ENTRY_NOT_FOUND; 3469 3470 free(watcher); 3471 return B_OK; 3472 } 3473 3474 3475 /*! Allocates a user_thread structure from the team. 3476 The team lock must be held, unless the function is called for the team's 3477 main thread. Interrupts must be enabled. 3478 */ 3479 struct user_thread* 3480 team_allocate_user_thread(Team* team) 3481 { 3482 if (team->user_data == 0) 3483 return NULL; 3484 3485 // take an entry from the free list, if any 3486 if (struct free_user_thread* entry = team->free_user_threads) { 3487 user_thread* thread = entry->thread; 3488 team->free_user_threads = entry->next; 3489 free(entry); 3490 return thread; 3491 } 3492 3493 while (true) { 3494 // enough space left? 3495 size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE); 3496 if (team->user_data_size - team->used_user_data < needed) { 3497 // try to resize the area 3498 if (resize_area(team->user_data_area, 3499 team->user_data_size + B_PAGE_SIZE) != B_OK) { 3500 return NULL; 3501 } 3502 3503 // resized user area successfully -- try to allocate the user_thread 3504 // again 3505 team->user_data_size += B_PAGE_SIZE; 3506 continue; 3507 } 3508 3509 // allocate the user_thread 3510 user_thread* thread 3511 = (user_thread*)(team->user_data + team->used_user_data); 3512 team->used_user_data += needed; 3513 3514 return thread; 3515 } 3516 } 3517 3518 3519 /*! Frees the given user_thread structure. 3520 The team's lock must not be held. Interrupts must be enabled. 3521 \param team The team the user thread was allocated from. 3522 \param userThread The user thread to free. 3523 */ 3524 void 3525 team_free_user_thread(Team* team, struct user_thread* userThread) 3526 { 3527 if (userThread == NULL) 3528 return; 3529 3530 // create a free list entry 3531 free_user_thread* entry 3532 = (free_user_thread*)malloc(sizeof(free_user_thread)); 3533 if (entry == NULL) { 3534 // we have to leak the user thread :-/ 3535 return; 3536 } 3537 3538 // add to free list 3539 TeamLocker teamLocker(team); 3540 3541 entry->thread = userThread; 3542 entry->next = team->free_user_threads; 3543 team->free_user_threads = entry; 3544 } 3545 3546 3547 // #pragma mark - Associated data interface 3548 3549 3550 AssociatedData::AssociatedData() 3551 : 3552 fOwner(NULL) 3553 { 3554 } 3555 3556 3557 AssociatedData::~AssociatedData() 3558 { 3559 } 3560 3561 3562 void 3563 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner) 3564 { 3565 } 3566 3567 3568 AssociatedDataOwner::AssociatedDataOwner() 3569 { 3570 mutex_init(&fLock, "associated data owner"); 3571 } 3572 3573 3574 AssociatedDataOwner::~AssociatedDataOwner() 3575 { 3576 mutex_destroy(&fLock); 3577 } 3578 3579 3580 bool 3581 AssociatedDataOwner::AddData(AssociatedData* data) 3582 { 3583 MutexLocker locker(fLock); 3584 3585 if (data->Owner() != NULL) 3586 return false; 3587 3588 data->AcquireReference(); 3589 fList.Add(data); 3590 data->SetOwner(this); 3591 3592 return true; 3593 } 3594 3595 3596 bool 3597 AssociatedDataOwner::RemoveData(AssociatedData* data) 3598 { 3599 MutexLocker locker(fLock); 3600 3601 if (data->Owner() != this) 3602 return false; 3603 3604 data->SetOwner(NULL); 3605 fList.Remove(data); 3606 3607 locker.Unlock(); 3608 3609 data->ReleaseReference(); 3610 3611 return true; 3612 } 3613 3614 3615 void 3616 AssociatedDataOwner::PrepareForDeletion() 3617 { 3618 MutexLocker locker(fLock); 3619 3620 // move all data to a temporary list and unset the owner 3621 DataList list; 3622 list.MoveFrom(&fList); 3623 3624 for (DataList::Iterator it = list.GetIterator(); 3625 AssociatedData* data = it.Next();) { 3626 data->SetOwner(NULL); 3627 } 3628 3629 locker.Unlock(); 3630 3631 // call the notification hooks and release our references 3632 while (AssociatedData* data = list.RemoveHead()) { 3633 data->OwnerDeleted(this); 3634 data->ReleaseReference(); 3635 } 3636 } 3637 3638 3639 /*! Associates data with the current team. 3640 When the team is deleted, the data object is notified. 3641 The team acquires a reference to the object. 3642 3643 \param data The data object. 3644 \return \c true on success, \c false otherwise. Fails only when the supplied 3645 data object is already associated with another owner. 3646 */ 3647 bool 3648 team_associate_data(AssociatedData* data) 3649 { 3650 return thread_get_current_thread()->team->AddData(data); 3651 } 3652 3653 3654 /*! Dissociates data from the current team. 3655 Balances an earlier call to team_associate_data(). 3656 3657 \param data The data object. 3658 \return \c true on success, \c false otherwise. Fails only when the data 3659 object is not associated with the current team. 3660 */ 3661 bool 3662 team_dissociate_data(AssociatedData* data) 3663 { 3664 return thread_get_current_thread()->team->RemoveData(data); 3665 } 3666 3667 3668 // #pragma mark - Public kernel API 3669 3670 3671 thread_id 3672 load_image(int32 argCount, const char** args, const char** env) 3673 { 3674 return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY, 3675 B_CURRENT_TEAM, B_WAIT_TILL_LOADED); 3676 } 3677 3678 3679 thread_id 3680 load_image_etc(int32 argCount, const char* const* args, 3681 const char* const* env, int32 priority, team_id parentID, uint32 flags) 3682 { 3683 // we need to flatten the args and environment 3684 3685 if (args == NULL) 3686 return B_BAD_VALUE; 3687 3688 // determine total needed size 3689 int32 argSize = 0; 3690 for (int32 i = 0; i < argCount; i++) 3691 argSize += strlen(args[i]) + 1; 3692 3693 int32 envCount = 0; 3694 int32 envSize = 0; 3695 while (env != NULL && env[envCount] != NULL) 3696 envSize += strlen(env[envCount++]) + 1; 3697 3698 int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize; 3699 if (size > MAX_PROCESS_ARGS_SIZE) 3700 return B_TOO_MANY_ARGS; 3701 3702 // allocate space 3703 char** flatArgs = (char**)malloc(size); 3704 if (flatArgs == NULL) 3705 return B_NO_MEMORY; 3706 3707 char** slot = flatArgs; 3708 char* stringSpace = (char*)(flatArgs + argCount + envCount + 2); 3709 3710 // copy arguments and environment 3711 for (int32 i = 0; i < argCount; i++) { 3712 int32 argSize = strlen(args[i]) + 1; 3713 memcpy(stringSpace, args[i], argSize); 3714 *slot++ = stringSpace; 3715 stringSpace += argSize; 3716 } 3717 3718 *slot++ = NULL; 3719 3720 for (int32 i = 0; i < envCount; i++) { 3721 int32 envSize = strlen(env[i]) + 1; 3722 memcpy(stringSpace, env[i], envSize); 3723 *slot++ = stringSpace; 3724 stringSpace += envSize; 3725 } 3726 3727 *slot++ = NULL; 3728 3729 thread_id thread = load_image_internal(flatArgs, size, argCount, envCount, 3730 B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0); 3731 3732 free(flatArgs); 3733 // load_image_internal() unset our variable if it took over ownership 3734 3735 return thread; 3736 } 3737 3738 3739 status_t 3740 wait_for_team(team_id id, status_t* _returnCode) 3741 { 3742 // check whether the team exists 3743 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3744 3745 Team* team = team_get_team_struct_locked(id); 3746 if (team == NULL) 3747 return B_BAD_TEAM_ID; 3748 3749 id = team->id; 3750 3751 teamsLocker.Unlock(); 3752 3753 // wait for the main thread (it has the same ID as the team) 3754 return wait_for_thread(id, _returnCode); 3755 } 3756 3757 3758 status_t 3759 kill_team(team_id id) 3760 { 3761 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3762 3763 Team* team = team_get_team_struct_locked(id); 3764 if (team == NULL) 3765 return B_BAD_TEAM_ID; 3766 3767 id = team->id; 3768 3769 teamsLocker.Unlock(); 3770 3771 if (team == sKernelTeam) 3772 return B_NOT_ALLOWED; 3773 3774 // Just kill the team's main thread (it has same ID as the team). The 3775 // cleanup code there will take care of the team. 3776 return kill_thread(id); 3777 } 3778 3779 3780 status_t 3781 _get_team_info(team_id id, team_info* info, size_t size) 3782 { 3783 // get the team 3784 Team* team = Team::Get(id); 3785 if (team == NULL) 3786 return B_BAD_TEAM_ID; 3787 BReference<Team> teamReference(team, true); 3788 3789 // fill in the info 3790 return fill_team_info(team, info, size); 3791 } 3792 3793 3794 status_t 3795 _get_next_team_info(int32* cookie, team_info* info, size_t size) 3796 { 3797 int32 slot = *cookie; 3798 if (slot < 1) 3799 slot = 1; 3800 3801 InterruptsReadSpinLocker locker(sTeamHashLock); 3802 3803 team_id lastTeamID = peek_next_thread_id(); 3804 // TODO: This is broken, since the id can wrap around! 3805 3806 // get next valid team 3807 Team* team = NULL; 3808 while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot))) 3809 slot++; 3810 3811 if (team == NULL) 3812 return B_BAD_TEAM_ID; 3813 3814 // get a reference to the team and unlock 3815 BReference<Team> teamReference(team); 3816 locker.Unlock(); 3817 3818 // fill in the info 3819 *cookie = ++slot; 3820 return fill_team_info(team, info, size); 3821 } 3822 3823 3824 status_t 3825 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size) 3826 { 3827 if (size != sizeof(team_usage_info)) 3828 return B_BAD_VALUE; 3829 3830 return common_get_team_usage_info(id, who, info, 0); 3831 } 3832 3833 3834 pid_t 3835 getpid(void) 3836 { 3837 return thread_get_current_thread()->team->id; 3838 } 3839 3840 3841 pid_t 3842 getppid(void) 3843 { 3844 Team* team = thread_get_current_thread()->team; 3845 3846 TeamLocker teamLocker(team); 3847 3848 return team->parent->id; 3849 } 3850 3851 3852 pid_t 3853 getpgid(pid_t id) 3854 { 3855 if (id < 0) { 3856 errno = EINVAL; 3857 return -1; 3858 } 3859 3860 if (id == 0) { 3861 // get process group of the calling process 3862 Team* team = thread_get_current_thread()->team; 3863 TeamLocker teamLocker(team); 3864 return team->group_id; 3865 } 3866 3867 // get the team 3868 Team* team = Team::GetAndLock(id); 3869 if (team == NULL) { 3870 errno = ESRCH; 3871 return -1; 3872 } 3873 3874 // get the team's process group ID 3875 pid_t groupID = team->group_id; 3876 3877 team->UnlockAndReleaseReference(); 3878 3879 return groupID; 3880 } 3881 3882 3883 pid_t 3884 getsid(pid_t id) 3885 { 3886 if (id < 0) { 3887 errno = EINVAL; 3888 return -1; 3889 } 3890 3891 if (id == 0) { 3892 // get session of the calling process 3893 Team* team = thread_get_current_thread()->team; 3894 TeamLocker teamLocker(team); 3895 return team->session_id; 3896 } 3897 3898 // get the team 3899 Team* team = Team::GetAndLock(id); 3900 if (team == NULL) { 3901 errno = ESRCH; 3902 return -1; 3903 } 3904 3905 // get the team's session ID 3906 pid_t sessionID = team->session_id; 3907 3908 team->UnlockAndReleaseReference(); 3909 3910 return sessionID; 3911 } 3912 3913 3914 // #pragma mark - User syscalls 3915 3916 3917 status_t 3918 _user_exec(const char* userPath, const char* const* userFlatArgs, 3919 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask) 3920 { 3921 // NOTE: Since this function normally doesn't return, don't use automatic 3922 // variables that need destruction in the function scope. 3923 char path[B_PATH_NAME_LENGTH]; 3924 3925 if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs) 3926 || user_strlcpy(path, userPath, sizeof(path)) < B_OK) 3927 return B_BAD_ADDRESS; 3928 3929 // copy and relocate the flat arguments 3930 char** flatArgs; 3931 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize, 3932 argCount, envCount, flatArgs); 3933 3934 if (error == B_OK) { 3935 error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount, 3936 envCount, umask); 3937 // this one only returns in case of error 3938 } 3939 3940 free(flatArgs); 3941 return error; 3942 } 3943 3944 3945 thread_id 3946 _user_fork(void) 3947 { 3948 return fork_team(); 3949 } 3950 3951 3952 pid_t 3953 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo, 3954 team_usage_info* usageInfo) 3955 { 3956 if (userInfo != NULL && !IS_USER_ADDRESS(userInfo)) 3957 return B_BAD_ADDRESS; 3958 if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo)) 3959 return B_BAD_ADDRESS; 3960 3961 siginfo_t info; 3962 team_usage_info usage_info; 3963 pid_t foundChild = wait_for_child(child, flags, info, usage_info); 3964 if (foundChild < 0) 3965 return syscall_restart_handle_post(foundChild); 3966 3967 // copy info back to userland 3968 if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK) 3969 return B_BAD_ADDRESS; 3970 // copy usage_info back to userland 3971 if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info, 3972 sizeof(usage_info)) != B_OK) { 3973 return B_BAD_ADDRESS; 3974 } 3975 3976 return foundChild; 3977 } 3978 3979 3980 pid_t 3981 _user_process_info(pid_t process, int32 which) 3982 { 3983 // we only allow to return the parent of the current process 3984 if (which == PARENT_ID 3985 && process != 0 && process != thread_get_current_thread()->team->id) 3986 return B_BAD_VALUE; 3987 3988 pid_t result; 3989 switch (which) { 3990 case SESSION_ID: 3991 result = getsid(process); 3992 break; 3993 case GROUP_ID: 3994 result = getpgid(process); 3995 break; 3996 case PARENT_ID: 3997 result = getppid(); 3998 break; 3999 default: 4000 return B_BAD_VALUE; 4001 } 4002 4003 return result >= 0 ? result : errno; 4004 } 4005 4006 4007 pid_t 4008 _user_setpgid(pid_t processID, pid_t groupID) 4009 { 4010 // setpgid() can be called either by the parent of the target process or 4011 // by the process itself to do one of two things: 4012 // * Create a new process group with the target process' ID and the target 4013 // process as group leader. 4014 // * Set the target process' process group to an already existing one in the 4015 // same session. 4016 4017 if (groupID < 0) 4018 return B_BAD_VALUE; 4019 4020 Team* currentTeam = thread_get_current_thread()->team; 4021 if (processID == 0) 4022 processID = currentTeam->id; 4023 4024 // if the group ID is not specified, use the target process' ID 4025 if (groupID == 0) 4026 groupID = processID; 4027 4028 // We loop when running into the following race condition: We create a new 4029 // process group, because there isn't one with that ID yet, but later when 4030 // trying to publish it, we find that someone else created and published 4031 // a group with that ID in the meantime. In that case we just restart the 4032 // whole action. 4033 while (true) { 4034 // Look up the process group by ID. If it doesn't exist yet and we are 4035 // allowed to create a new one, do that. 4036 ProcessGroup* group = ProcessGroup::Get(groupID); 4037 bool newGroup = false; 4038 if (group == NULL) { 4039 if (groupID != processID) 4040 return B_NOT_ALLOWED; 4041 4042 group = new(std::nothrow) ProcessGroup(groupID); 4043 if (group == NULL) 4044 return B_NO_MEMORY; 4045 4046 newGroup = true; 4047 } 4048 BReference<ProcessGroup> groupReference(group, true); 4049 4050 // get the target team 4051 Team* team = Team::Get(processID); 4052 if (team == NULL) 4053 return ESRCH; 4054 BReference<Team> teamReference(team, true); 4055 4056 // lock the new process group and the team's current process group 4057 while (true) { 4058 // lock the team's current process group 4059 team->LockProcessGroup(); 4060 4061 ProcessGroup* oldGroup = team->group; 4062 if (oldGroup == group) { 4063 // it's the same as the target group, so just bail out 4064 oldGroup->Unlock(); 4065 return group->id; 4066 } 4067 4068 oldGroup->AcquireReference(); 4069 4070 // lock the target process group, if locking order allows it 4071 if (newGroup || group->id > oldGroup->id) { 4072 group->Lock(); 4073 break; 4074 } 4075 4076 // try to lock 4077 if (group->TryLock()) 4078 break; 4079 4080 // no dice -- unlock the team's current process group and relock in 4081 // the correct order 4082 oldGroup->Unlock(); 4083 4084 group->Lock(); 4085 oldGroup->Lock(); 4086 4087 // check whether things are still the same 4088 TeamLocker teamLocker(team); 4089 if (team->group == oldGroup) 4090 break; 4091 4092 // something changed -- unlock everything and retry 4093 teamLocker.Unlock(); 4094 oldGroup->Unlock(); 4095 group->Unlock(); 4096 oldGroup->ReleaseReference(); 4097 } 4098 4099 // we now have references and locks of both new and old process group 4100 BReference<ProcessGroup> oldGroupReference(team->group, true); 4101 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true); 4102 AutoLocker<ProcessGroup> groupLocker(group, true); 4103 4104 // also lock the target team and its parent 4105 team->LockTeamAndParent(false); 4106 TeamLocker parentLocker(team->parent, true); 4107 TeamLocker teamLocker(team, true); 4108 4109 // perform the checks 4110 if (team == currentTeam) { 4111 // we set our own group 4112 4113 // we must not change our process group ID if we're a session leader 4114 if (is_session_leader(currentTeam)) 4115 return B_NOT_ALLOWED; 4116 } else { 4117 // Calling team != target team. The target team must be a child of 4118 // the calling team and in the same session. (If that's the case it 4119 // isn't a session leader either.) 4120 if (team->parent != currentTeam 4121 || team->session_id != currentTeam->session_id) { 4122 return B_NOT_ALLOWED; 4123 } 4124 4125 // The call is also supposed to fail on a child, when the child has 4126 // already executed exec*() [EACCES]. 4127 if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0) 4128 return EACCES; 4129 } 4130 4131 // If we created a new process group, publish it now. 4132 if (newGroup) { 4133 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 4134 if (sGroupHash.Lookup(groupID)) { 4135 // A group with the group ID appeared since we first checked. 4136 // Back to square one. 4137 continue; 4138 } 4139 4140 group->PublishLocked(team->group->Session()); 4141 } else if (group->Session()->id != team->session_id) { 4142 // The existing target process group belongs to a different session. 4143 // That's not allowed. 4144 return B_NOT_ALLOWED; 4145 } 4146 4147 // Everything is ready -- set the group. 4148 remove_team_from_group(team); 4149 insert_team_into_group(group, team); 4150 4151 // Changing the process group might have changed the situation for a 4152 // parent waiting in wait_for_child(). Hence we notify it. 4153 team->parent->dead_children.condition_variable.NotifyAll(); 4154 4155 return group->id; 4156 } 4157 } 4158 4159 4160 pid_t 4161 _user_setsid(void) 4162 { 4163 Team* team = thread_get_current_thread()->team; 4164 4165 // create a new process group and session 4166 ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id); 4167 if (group == NULL) 4168 return B_NO_MEMORY; 4169 BReference<ProcessGroup> groupReference(group, true); 4170 AutoLocker<ProcessGroup> groupLocker(group); 4171 4172 ProcessSession* session = new(std::nothrow) ProcessSession(group->id); 4173 if (session == NULL) 4174 return B_NO_MEMORY; 4175 BReference<ProcessSession> sessionReference(session, true); 4176 4177 // lock the team's current process group, parent, and the team itself 4178 team->LockTeamParentAndProcessGroup(); 4179 BReference<ProcessGroup> oldGroupReference(team->group); 4180 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true); 4181 TeamLocker parentLocker(team->parent, true); 4182 TeamLocker teamLocker(team, true); 4183 4184 // the team must not already be a process group leader 4185 if (is_process_group_leader(team)) 4186 return B_NOT_ALLOWED; 4187 4188 // remove the team from the old and add it to the new process group 4189 remove_team_from_group(team); 4190 group->Publish(session); 4191 insert_team_into_group(group, team); 4192 4193 // Changing the process group might have changed the situation for a 4194 // parent waiting in wait_for_child(). Hence we notify it. 4195 team->parent->dead_children.condition_variable.NotifyAll(); 4196 4197 return group->id; 4198 } 4199 4200 4201 status_t 4202 _user_wait_for_team(team_id id, status_t* _userReturnCode) 4203 { 4204 status_t returnCode; 4205 status_t status; 4206 4207 if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)) 4208 return B_BAD_ADDRESS; 4209 4210 status = wait_for_team(id, &returnCode); 4211 if (status >= B_OK && _userReturnCode != NULL) { 4212 if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) 4213 != B_OK) 4214 return B_BAD_ADDRESS; 4215 return B_OK; 4216 } 4217 4218 return syscall_restart_handle_post(status); 4219 } 4220 4221 4222 thread_id 4223 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize, 4224 int32 argCount, int32 envCount, int32 priority, uint32 flags, 4225 port_id errorPort, uint32 errorToken) 4226 { 4227 TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount)); 4228 4229 if (argCount < 1) 4230 return B_BAD_VALUE; 4231 4232 // copy and relocate the flat arguments 4233 char** flatArgs; 4234 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize, 4235 argCount, envCount, flatArgs); 4236 if (error != B_OK) 4237 return error; 4238 4239 thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize), 4240 argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort, 4241 errorToken); 4242 4243 free(flatArgs); 4244 // load_image_internal() unset our variable if it took over ownership 4245 4246 return thread; 4247 } 4248 4249 4250 void 4251 _user_exit_team(status_t returnValue) 4252 { 4253 Thread* thread = thread_get_current_thread(); 4254 Team* team = thread->team; 4255 4256 // set this thread's exit status 4257 thread->exit.status = returnValue; 4258 4259 // set the team exit status 4260 TeamLocker teamLocker(team); 4261 4262 if (!team->exit.initialized) { 4263 team->exit.reason = CLD_EXITED; 4264 team->exit.signal = 0; 4265 team->exit.signaling_user = 0; 4266 team->exit.status = returnValue; 4267 team->exit.initialized = true; 4268 } 4269 4270 teamLocker.Unlock(); 4271 4272 // Stop the thread, if the team is being debugged and that has been 4273 // requested. 4274 if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0) 4275 user_debug_stop_thread(); 4276 4277 // Send this thread a SIGKILL. This makes sure the thread will not return to 4278 // userland. The signal handling code forwards the signal to the main 4279 // thread (if that's not already this one), which will take the team down. 4280 Signal signal(SIGKILL, SI_USER, B_OK, team->id); 4281 send_signal_to_thread(thread, signal, 0); 4282 } 4283 4284 4285 status_t 4286 _user_kill_team(team_id team) 4287 { 4288 return kill_team(team); 4289 } 4290 4291 4292 status_t 4293 _user_get_team_info(team_id id, team_info* userInfo) 4294 { 4295 status_t status; 4296 team_info info; 4297 4298 if (!IS_USER_ADDRESS(userInfo)) 4299 return B_BAD_ADDRESS; 4300 4301 status = _get_team_info(id, &info, sizeof(team_info)); 4302 if (status == B_OK) { 4303 if (user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK) 4304 return B_BAD_ADDRESS; 4305 } 4306 4307 return status; 4308 } 4309 4310 4311 status_t 4312 _user_get_next_team_info(int32* userCookie, team_info* userInfo) 4313 { 4314 status_t status; 4315 team_info info; 4316 int32 cookie; 4317 4318 if (!IS_USER_ADDRESS(userCookie) 4319 || !IS_USER_ADDRESS(userInfo) 4320 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 4321 return B_BAD_ADDRESS; 4322 4323 status = _get_next_team_info(&cookie, &info, sizeof(team_info)); 4324 if (status != B_OK) 4325 return status; 4326 4327 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 4328 || user_memcpy(userInfo, &info, sizeof(team_info)) < B_OK) 4329 return B_BAD_ADDRESS; 4330 4331 return status; 4332 } 4333 4334 4335 team_id 4336 _user_get_current_team(void) 4337 { 4338 return team_get_current_team_id(); 4339 } 4340 4341 4342 status_t 4343 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo, 4344 size_t size) 4345 { 4346 if (size != sizeof(team_usage_info)) 4347 return B_BAD_VALUE; 4348 4349 team_usage_info info; 4350 status_t status = common_get_team_usage_info(team, who, &info, 4351 B_CHECK_PERMISSION); 4352 4353 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo) 4354 || user_memcpy(userInfo, &info, size) != B_OK) { 4355 return B_BAD_ADDRESS; 4356 } 4357 4358 return status; 4359 } 4360 4361 4362 status_t 4363 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer, 4364 size_t size, size_t* _sizeNeeded) 4365 { 4366 // check parameters 4367 if ((buffer != NULL && !IS_USER_ADDRESS(buffer)) 4368 || (buffer == NULL && size > 0) 4369 || _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) { 4370 return B_BAD_ADDRESS; 4371 } 4372 4373 KMessage info; 4374 4375 if ((flags & B_TEAM_INFO_BASIC) != 0) { 4376 // allocate memory for a copy of the needed team data 4377 struct ExtendedTeamData { 4378 team_id id; 4379 pid_t group_id; 4380 pid_t session_id; 4381 uid_t real_uid; 4382 gid_t real_gid; 4383 uid_t effective_uid; 4384 gid_t effective_gid; 4385 char name[B_OS_NAME_LENGTH]; 4386 }; 4387 4388 ExtendedTeamData* teamClone 4389 = (ExtendedTeamData*)malloc(sizeof(ExtendedTeamData)); 4390 // It would be nicer to use new, but then we'd have to use 4391 // ObjectDeleter and declare the structure outside of the function 4392 // due to template parameter restrictions. 4393 if (teamClone == NULL) 4394 return B_NO_MEMORY; 4395 MemoryDeleter teamCloneDeleter(teamClone); 4396 4397 io_context* ioContext; 4398 { 4399 // get the team structure 4400 Team* team = Team::GetAndLock(teamID); 4401 if (team == NULL) 4402 return B_BAD_TEAM_ID; 4403 BReference<Team> teamReference(team, true); 4404 TeamLocker teamLocker(team, true); 4405 4406 // copy the data 4407 teamClone->id = team->id; 4408 strlcpy(teamClone->name, team->Name(), sizeof(teamClone->name)); 4409 teamClone->group_id = team->group_id; 4410 teamClone->session_id = team->session_id; 4411 teamClone->real_uid = team->real_uid; 4412 teamClone->real_gid = team->real_gid; 4413 teamClone->effective_uid = team->effective_uid; 4414 teamClone->effective_gid = team->effective_gid; 4415 4416 // also fetch a reference to the I/O context 4417 ioContext = team->io_context; 4418 vfs_get_io_context(ioContext); 4419 } 4420 CObjectDeleter<io_context> ioContextPutter(ioContext, 4421 &vfs_put_io_context); 4422 4423 // add the basic data to the info message 4424 if (info.AddInt32("id", teamClone->id) != B_OK 4425 || info.AddString("name", teamClone->name) != B_OK 4426 || info.AddInt32("process group", teamClone->group_id) != B_OK 4427 || info.AddInt32("session", teamClone->session_id) != B_OK 4428 || info.AddInt32("uid", teamClone->real_uid) != B_OK 4429 || info.AddInt32("gid", teamClone->real_gid) != B_OK 4430 || info.AddInt32("euid", teamClone->effective_uid) != B_OK 4431 || info.AddInt32("egid", teamClone->effective_gid) != B_OK) { 4432 return B_NO_MEMORY; 4433 } 4434 4435 // get the current working directory from the I/O context 4436 dev_t cwdDevice; 4437 ino_t cwdDirectory; 4438 { 4439 MutexLocker ioContextLocker(ioContext->io_mutex); 4440 vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory); 4441 } 4442 4443 if (info.AddInt32("cwd device", cwdDevice) != B_OK 4444 || info.AddInt64("cwd directory", cwdDirectory) != B_OK) { 4445 return B_NO_MEMORY; 4446 } 4447 } 4448 4449 // TODO: Support the other flags! 4450 4451 // copy the needed size and, if it fits, the message back to userland 4452 size_t sizeNeeded = info.ContentSize(); 4453 if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK) 4454 return B_BAD_ADDRESS; 4455 4456 if (sizeNeeded > size) 4457 return B_BUFFER_OVERFLOW; 4458 4459 if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK) 4460 return B_BAD_ADDRESS; 4461 4462 return B_OK; 4463 } 4464