1 /* 2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 /*! Team functions */ 13 14 15 #include <team.h> 16 17 #include <errno.h> 18 #include <stdio.h> 19 #include <stdlib.h> 20 #include <string.h> 21 #include <sys/wait.h> 22 23 #include <OS.h> 24 25 #include <AutoDeleter.h> 26 #include <FindDirectory.h> 27 28 #include <extended_system_info_defs.h> 29 30 #include <commpage.h> 31 #include <boot_device.h> 32 #include <elf.h> 33 #include <file_cache.h> 34 #include <find_directory_private.h> 35 #include <fs/KPath.h> 36 #include <heap.h> 37 #include <int.h> 38 #include <kernel.h> 39 #include <kimage.h> 40 #include <kscheduler.h> 41 #include <ksignal.h> 42 #include <Notifications.h> 43 #include <port.h> 44 #include <posix/realtime_sem.h> 45 #include <posix/xsi_semaphore.h> 46 #include <safemode.h> 47 #include <sem.h> 48 #include <syscall_process_info.h> 49 #include <syscall_load_image.h> 50 #include <syscall_restart.h> 51 #include <syscalls.h> 52 #include <tls.h> 53 #include <tracing.h> 54 #include <user_mutex.h> 55 #include <user_runtime.h> 56 #include <user_thread.h> 57 #include <usergroup.h> 58 #include <vfs.h> 59 #include <vm/vm.h> 60 #include <vm/VMAddressSpace.h> 61 #include <util/AutoLock.h> 62 #include <util/ThreadAutoLock.h> 63 64 #include "TeamThreadTables.h" 65 66 67 //#define TRACE_TEAM 68 #ifdef TRACE_TEAM 69 # define TRACE(x) dprintf x 70 #else 71 # define TRACE(x) ; 72 #endif 73 74 75 struct team_key { 76 team_id id; 77 }; 78 79 struct team_arg { 80 char *path; 81 char **flat_args; 82 size_t flat_args_size; 83 uint32 arg_count; 84 uint32 env_count; 85 mode_t umask; 86 uint32 flags; 87 port_id error_port; 88 uint32 error_token; 89 }; 90 91 #define TEAM_ARGS_FLAG_NO_ASLR 0x01 92 93 94 namespace { 95 96 97 class TeamNotificationService : public DefaultNotificationService { 98 public: 99 TeamNotificationService(); 100 101 void Notify(uint32 eventCode, Team* team); 102 }; 103 104 105 // #pragma mark - TeamTable 106 107 108 typedef BKernel::TeamThreadTable<Team> TeamTable; 109 110 111 // #pragma mark - ProcessGroupHashDefinition 112 113 114 struct ProcessGroupHashDefinition { 115 typedef pid_t KeyType; 116 typedef ProcessGroup ValueType; 117 118 size_t HashKey(pid_t key) const 119 { 120 return key; 121 } 122 123 size_t Hash(ProcessGroup* value) const 124 { 125 return HashKey(value->id); 126 } 127 128 bool Compare(pid_t key, ProcessGroup* value) const 129 { 130 return value->id == key; 131 } 132 133 ProcessGroup*& GetLink(ProcessGroup* value) const 134 { 135 return value->next; 136 } 137 }; 138 139 typedef BOpenHashTable<ProcessGroupHashDefinition> ProcessGroupHashTable; 140 141 142 } // unnamed namespace 143 144 145 // #pragma mark - 146 147 148 // the team_id -> Team hash table and the lock protecting it 149 static TeamTable sTeamHash; 150 static rw_spinlock sTeamHashLock = B_RW_SPINLOCK_INITIALIZER; 151 152 // the pid_t -> ProcessGroup hash table and the lock protecting it 153 static ProcessGroupHashTable sGroupHash; 154 static spinlock sGroupHashLock = B_SPINLOCK_INITIALIZER; 155 156 static Team* sKernelTeam = NULL; 157 static bool sDisableUserAddOns = false; 158 159 // A list of process groups of children of dying session leaders that need to 160 // be signalled, if they have become orphaned and contain stopped processes. 161 static ProcessGroupList sOrphanedCheckProcessGroups; 162 static mutex sOrphanedCheckLock 163 = MUTEX_INITIALIZER("orphaned process group check"); 164 165 // some arbitrarily chosen limits -- should probably depend on the available 166 // memory (the limit is not yet enforced) 167 static int32 sMaxTeams = 2048; 168 static int32 sUsedTeams = 1; 169 170 static TeamNotificationService sNotificationService; 171 172 static const size_t kTeamUserDataReservedSize = 128 * B_PAGE_SIZE; 173 static const size_t kTeamUserDataInitialSize = 4 * B_PAGE_SIZE; 174 175 176 // #pragma mark - TeamListIterator 177 178 179 TeamListIterator::TeamListIterator() 180 { 181 // queue the entry 182 InterruptsWriteSpinLocker locker(sTeamHashLock); 183 sTeamHash.InsertIteratorEntry(&fEntry); 184 } 185 186 187 TeamListIterator::~TeamListIterator() 188 { 189 // remove the entry 190 InterruptsWriteSpinLocker locker(sTeamHashLock); 191 sTeamHash.RemoveIteratorEntry(&fEntry); 192 } 193 194 195 Team* 196 TeamListIterator::Next() 197 { 198 // get the next team -- if there is one, get reference for it 199 InterruptsWriteSpinLocker locker(sTeamHashLock); 200 Team* team = sTeamHash.NextElement(&fEntry); 201 if (team != NULL) 202 team->AcquireReference(); 203 204 return team; 205 } 206 207 208 // #pragma mark - Tracing 209 210 211 #if TEAM_TRACING 212 namespace TeamTracing { 213 214 class TeamForked : public AbstractTraceEntry { 215 public: 216 TeamForked(thread_id forkedThread) 217 : 218 fForkedThread(forkedThread) 219 { 220 Initialized(); 221 } 222 223 virtual void AddDump(TraceOutput& out) 224 { 225 out.Print("team forked, new thread %" B_PRId32, fForkedThread); 226 } 227 228 private: 229 thread_id fForkedThread; 230 }; 231 232 233 class ExecTeam : public AbstractTraceEntry { 234 public: 235 ExecTeam(const char* path, int32 argCount, const char* const* args, 236 int32 envCount, const char* const* env) 237 : 238 fArgCount(argCount), 239 fArgs(NULL) 240 { 241 fPath = alloc_tracing_buffer_strcpy(path, B_PATH_NAME_LENGTH, 242 false); 243 244 // determine the buffer size we need for the args 245 size_t argBufferSize = 0; 246 for (int32 i = 0; i < argCount; i++) 247 argBufferSize += strlen(args[i]) + 1; 248 249 // allocate a buffer 250 fArgs = (char*)alloc_tracing_buffer(argBufferSize); 251 if (fArgs) { 252 char* buffer = fArgs; 253 for (int32 i = 0; i < argCount; i++) { 254 size_t argSize = strlen(args[i]) + 1; 255 memcpy(buffer, args[i], argSize); 256 buffer += argSize; 257 } 258 } 259 260 // ignore env for the time being 261 (void)envCount; 262 (void)env; 263 264 Initialized(); 265 } 266 267 virtual void AddDump(TraceOutput& out) 268 { 269 out.Print("team exec, \"%p\", args:", fPath); 270 271 if (fArgs != NULL) { 272 char* args = fArgs; 273 for (int32 i = 0; !out.IsFull() && i < fArgCount; i++) { 274 out.Print(" \"%s\"", args); 275 args += strlen(args) + 1; 276 } 277 } else 278 out.Print(" <too long>"); 279 } 280 281 private: 282 char* fPath; 283 int32 fArgCount; 284 char* fArgs; 285 }; 286 287 288 static const char* 289 job_control_state_name(job_control_state state) 290 { 291 switch (state) { 292 case JOB_CONTROL_STATE_NONE: 293 return "none"; 294 case JOB_CONTROL_STATE_STOPPED: 295 return "stopped"; 296 case JOB_CONTROL_STATE_CONTINUED: 297 return "continued"; 298 case JOB_CONTROL_STATE_DEAD: 299 return "dead"; 300 default: 301 return "invalid"; 302 } 303 } 304 305 306 class SetJobControlState : public AbstractTraceEntry { 307 public: 308 SetJobControlState(team_id team, job_control_state newState, Signal* signal) 309 : 310 fTeam(team), 311 fNewState(newState), 312 fSignal(signal != NULL ? signal->Number() : 0) 313 { 314 Initialized(); 315 } 316 317 virtual void AddDump(TraceOutput& out) 318 { 319 out.Print("team set job control state, team %" B_PRId32 ", " 320 "new state: %s, signal: %d", 321 fTeam, job_control_state_name(fNewState), fSignal); 322 } 323 324 private: 325 team_id fTeam; 326 job_control_state fNewState; 327 int fSignal; 328 }; 329 330 331 class WaitForChild : public AbstractTraceEntry { 332 public: 333 WaitForChild(pid_t child, uint32 flags) 334 : 335 fChild(child), 336 fFlags(flags) 337 { 338 Initialized(); 339 } 340 341 virtual void AddDump(TraceOutput& out) 342 { 343 out.Print("team wait for child, child: %" B_PRId32 ", " 344 "flags: %#" B_PRIx32, fChild, fFlags); 345 } 346 347 private: 348 pid_t fChild; 349 uint32 fFlags; 350 }; 351 352 353 class WaitForChildDone : public AbstractTraceEntry { 354 public: 355 WaitForChildDone(const job_control_entry& entry) 356 : 357 fState(entry.state), 358 fTeam(entry.thread), 359 fStatus(entry.status), 360 fReason(entry.reason), 361 fSignal(entry.signal) 362 { 363 Initialized(); 364 } 365 366 WaitForChildDone(status_t error) 367 : 368 fTeam(error) 369 { 370 Initialized(); 371 } 372 373 virtual void AddDump(TraceOutput& out) 374 { 375 if (fTeam >= 0) { 376 out.Print("team wait for child done, team: %" B_PRId32 ", " 377 "state: %s, status: %#" B_PRIx32 ", reason: %#x, signal: %d\n", 378 fTeam, job_control_state_name(fState), fStatus, fReason, 379 fSignal); 380 } else { 381 out.Print("team wait for child failed, error: " 382 "%#" B_PRIx32 ", ", fTeam); 383 } 384 } 385 386 private: 387 job_control_state fState; 388 team_id fTeam; 389 status_t fStatus; 390 uint16 fReason; 391 uint16 fSignal; 392 }; 393 394 } // namespace TeamTracing 395 396 # define T(x) new(std::nothrow) TeamTracing::x; 397 #else 398 # define T(x) ; 399 #endif 400 401 402 // #pragma mark - TeamNotificationService 403 404 405 TeamNotificationService::TeamNotificationService() 406 : DefaultNotificationService("teams") 407 { 408 } 409 410 411 void 412 TeamNotificationService::Notify(uint32 eventCode, Team* team) 413 { 414 char eventBuffer[128]; 415 KMessage event; 416 event.SetTo(eventBuffer, sizeof(eventBuffer), TEAM_MONITOR); 417 event.AddInt32("event", eventCode); 418 event.AddInt32("team", team->id); 419 event.AddPointer("teamStruct", team); 420 421 DefaultNotificationService::Notify(event, eventCode); 422 } 423 424 425 // #pragma mark - Team 426 427 428 Team::Team(team_id id, bool kernel) 429 { 430 // allocate an ID 431 this->id = id; 432 visible = true; 433 434 hash_next = siblings_next = parent = children = group_next = NULL; 435 serial_number = -1; 436 437 group_id = session_id = -1; 438 group = NULL; 439 440 num_threads = 0; 441 state = TEAM_STATE_BIRTH; 442 flags = 0; 443 io_context = NULL; 444 user_mutex_context = NULL; 445 realtime_sem_context = NULL; 446 xsi_sem_context = NULL; 447 death_entry = NULL; 448 list_init(&dead_threads); 449 450 dead_children.condition_variable.Init(&dead_children, "team children"); 451 dead_children.count = 0; 452 dead_children.kernel_time = 0; 453 dead_children.user_time = 0; 454 455 job_control_entry = new(nothrow) ::job_control_entry; 456 if (job_control_entry != NULL) { 457 job_control_entry->state = JOB_CONTROL_STATE_NONE; 458 job_control_entry->thread = id; 459 job_control_entry->team = this; 460 } 461 462 address_space = NULL; 463 main_thread = NULL; 464 thread_list = NULL; 465 loading_info = NULL; 466 467 list_init(&image_list); 468 list_init(&watcher_list); 469 list_init(&sem_list); 470 list_init_etc(&port_list, port_team_link_offset()); 471 472 user_data = 0; 473 user_data_area = -1; 474 used_user_data = 0; 475 user_data_size = 0; 476 free_user_threads = NULL; 477 478 commpage_address = NULL; 479 480 clear_team_debug_info(&debug_info, true); 481 482 dead_threads_kernel_time = 0; 483 dead_threads_user_time = 0; 484 cpu_clock_offset = 0; 485 B_INITIALIZE_SPINLOCK(&time_lock); 486 487 saved_set_uid = real_uid = effective_uid = -1; 488 saved_set_gid = real_gid = effective_gid = -1; 489 490 // exit status -- setting initialized to false suffices 491 exit.initialized = false; 492 493 B_INITIALIZE_SPINLOCK(&signal_lock); 494 495 // init mutex 496 if (kernel) { 497 mutex_init(&fLock, "Team:kernel"); 498 } else { 499 char lockName[16]; 500 snprintf(lockName, sizeof(lockName), "Team:%" B_PRId32, id); 501 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 502 } 503 504 fName[0] = '\0'; 505 fArgs[0] = '\0'; 506 507 fQueuedSignalsCounter = new(std::nothrow) BKernel::QueuedSignalsCounter( 508 kernel ? -1 : MAX_QUEUED_SIGNALS); 509 memset(fSignalActions, 0, sizeof(fSignalActions)); 510 fUserDefinedTimerCount = 0; 511 512 fCoreDumpCondition = NULL; 513 } 514 515 516 Team::~Team() 517 { 518 // get rid of all associated data 519 PrepareForDeletion(); 520 521 if (io_context != NULL) 522 vfs_put_io_context(io_context); 523 delete_owned_ports(this); 524 sem_delete_owned_sems(this); 525 526 DeleteUserTimers(false); 527 528 fPendingSignals.Clear(); 529 530 if (fQueuedSignalsCounter != NULL) 531 fQueuedSignalsCounter->ReleaseReference(); 532 533 while (thread_death_entry* threadDeathEntry 534 = (thread_death_entry*)list_remove_head_item(&dead_threads)) { 535 free(threadDeathEntry); 536 } 537 538 while (::job_control_entry* entry = dead_children.entries.RemoveHead()) 539 delete entry; 540 541 while (free_user_thread* entry = free_user_threads) { 542 free_user_threads = entry->next; 543 free(entry); 544 } 545 546 delete job_control_entry; 547 // usually already NULL and transferred to the parent 548 549 mutex_destroy(&fLock); 550 } 551 552 553 /*static*/ Team* 554 Team::Create(team_id id, const char* name, bool kernel) 555 { 556 // create the team object 557 Team* team = new(std::nothrow) Team(id, kernel); 558 if (team == NULL) 559 return NULL; 560 ObjectDeleter<Team> teamDeleter(team); 561 562 if (name != NULL) 563 team->SetName(name); 564 565 // check initialization 566 if (team->job_control_entry == NULL || team->fQueuedSignalsCounter == NULL) 567 return NULL; 568 569 // finish initialization (arch specifics) 570 if (arch_team_init_team_struct(team, kernel) != B_OK) 571 return NULL; 572 573 if (!kernel) { 574 status_t error = user_timer_create_team_timers(team); 575 if (error != B_OK) 576 return NULL; 577 } 578 579 team->start_time = system_time(); 580 581 // everything went fine 582 return teamDeleter.Detach(); 583 } 584 585 586 /*! \brief Returns the team with the given ID. 587 Returns a reference to the team. 588 Team and thread spinlock must not be held. 589 */ 590 /*static*/ Team* 591 Team::Get(team_id id) 592 { 593 if (id == B_CURRENT_TEAM) { 594 Team* team = thread_get_current_thread()->team; 595 team->AcquireReference(); 596 return team; 597 } 598 599 InterruptsReadSpinLocker locker(sTeamHashLock); 600 Team* team = sTeamHash.Lookup(id); 601 if (team != NULL) 602 team->AcquireReference(); 603 return team; 604 } 605 606 607 /*! \brief Returns the team with the given ID in a locked state. 608 Returns a reference to the team. 609 Team and thread spinlock must not be held. 610 */ 611 /*static*/ Team* 612 Team::GetAndLock(team_id id) 613 { 614 // get the team 615 Team* team = Get(id); 616 if (team == NULL) 617 return NULL; 618 619 // lock it 620 team->Lock(); 621 622 // only return the team, when it isn't already dying 623 if (team->state >= TEAM_STATE_SHUTDOWN) { 624 team->Unlock(); 625 team->ReleaseReference(); 626 return NULL; 627 } 628 629 return team; 630 } 631 632 633 /*! Locks the team and its parent team (if any). 634 The caller must hold a reference to the team or otherwise make sure that 635 it won't be deleted. 636 If the team doesn't have a parent, only the team itself is locked. If the 637 team's parent is the kernel team and \a dontLockParentIfKernel is \c true, 638 only the team itself is locked. 639 640 \param dontLockParentIfKernel If \c true, the team's parent team is only 641 locked, if it is not the kernel team. 642 */ 643 void 644 Team::LockTeamAndParent(bool dontLockParentIfKernel) 645 { 646 // The locking order is parent -> child. Since the parent can change as long 647 // as we don't lock the team, we need to do a trial and error loop. 648 Lock(); 649 650 while (true) { 651 // If the team doesn't have a parent, we're done. Otherwise try to lock 652 // the parent.This will succeed in most cases, simplifying things. 653 Team* parent = this->parent; 654 if (parent == NULL || (dontLockParentIfKernel && parent == sKernelTeam) 655 || parent->TryLock()) { 656 return; 657 } 658 659 // get a temporary reference to the parent, unlock this team, lock the 660 // parent, and re-lock this team 661 BReference<Team> parentReference(parent); 662 663 Unlock(); 664 parent->Lock(); 665 Lock(); 666 667 // If the parent hasn't changed in the meantime, we're done. 668 if (this->parent == parent) 669 return; 670 671 // The parent has changed -- unlock and retry. 672 parent->Unlock(); 673 } 674 } 675 676 677 /*! Unlocks the team and its parent team (if any). 678 */ 679 void 680 Team::UnlockTeamAndParent() 681 { 682 if (parent != NULL) 683 parent->Unlock(); 684 685 Unlock(); 686 } 687 688 689 /*! Locks the team, its parent team (if any), and the team's process group. 690 The caller must hold a reference to the team or otherwise make sure that 691 it won't be deleted. 692 If the team doesn't have a parent, only the team itself is locked. 693 */ 694 void 695 Team::LockTeamParentAndProcessGroup() 696 { 697 LockTeamAndProcessGroup(); 698 699 // We hold the group's and the team's lock, but not the parent team's lock. 700 // If we have a parent, try to lock it. 701 if (this->parent == NULL || this->parent->TryLock()) 702 return; 703 704 // No success -- unlock the team and let LockTeamAndParent() do the rest of 705 // the job. 706 Unlock(); 707 LockTeamAndParent(false); 708 } 709 710 711 /*! Unlocks the team, its parent team (if any), and the team's process group. 712 */ 713 void 714 Team::UnlockTeamParentAndProcessGroup() 715 { 716 group->Unlock(); 717 718 if (parent != NULL) 719 parent->Unlock(); 720 721 Unlock(); 722 } 723 724 725 void 726 Team::LockTeamAndProcessGroup() 727 { 728 // The locking order is process group -> child. Since the process group can 729 // change as long as we don't lock the team, we need to do a trial and error 730 // loop. 731 Lock(); 732 733 while (true) { 734 // Try to lock the group. This will succeed in most cases, simplifying 735 // things. 736 ProcessGroup* group = this->group; 737 if (group == NULL) 738 return; 739 740 if (group->TryLock()) 741 return; 742 743 // get a temporary reference to the group, unlock this team, lock the 744 // group, and re-lock this team 745 BReference<ProcessGroup> groupReference(group); 746 747 Unlock(); 748 group->Lock(); 749 Lock(); 750 751 // If the group hasn't changed in the meantime, we're done. 752 if (this->group == group) 753 return; 754 755 // The group has changed -- unlock and retry. 756 group->Unlock(); 757 } 758 } 759 760 761 void 762 Team::UnlockTeamAndProcessGroup() 763 { 764 group->Unlock(); 765 Unlock(); 766 } 767 768 769 void 770 Team::SetName(const char* name) 771 { 772 if (const char* lastSlash = strrchr(name, '/')) 773 name = lastSlash + 1; 774 775 strlcpy(fName, name, B_OS_NAME_LENGTH); 776 } 777 778 779 void 780 Team::SetArgs(const char* args) 781 { 782 strlcpy(fArgs, args, sizeof(fArgs)); 783 } 784 785 786 void 787 Team::SetArgs(const char* path, const char* const* otherArgs, int otherArgCount) 788 { 789 fArgs[0] = '\0'; 790 strlcpy(fArgs, path, sizeof(fArgs)); 791 for (int i = 0; i < otherArgCount; i++) { 792 strlcat(fArgs, " ", sizeof(fArgs)); 793 strlcat(fArgs, otherArgs[i], sizeof(fArgs)); 794 } 795 } 796 797 798 void 799 Team::ResetSignalsOnExec() 800 { 801 // We are supposed to keep pending signals. Signal actions shall be reset 802 // partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are 803 // (for SIGCHLD it's implementation-defined). Others shall be reset to 804 // SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other 805 // flags, but since there aren't any handlers, they make little sense, so 806 // we clear them. 807 808 for (uint32 i = 1; i <= MAX_SIGNAL_NUMBER; i++) { 809 struct sigaction& action = SignalActionFor(i); 810 if (action.sa_handler != SIG_IGN && action.sa_handler != SIG_DFL) 811 action.sa_handler = SIG_DFL; 812 813 action.sa_mask = 0; 814 action.sa_flags = 0; 815 action.sa_userdata = NULL; 816 } 817 } 818 819 820 void 821 Team::InheritSignalActions(Team* parent) 822 { 823 memcpy(fSignalActions, parent->fSignalActions, sizeof(fSignalActions)); 824 } 825 826 827 /*! Adds the given user timer to the team and, if user-defined, assigns it an 828 ID. 829 830 The caller must hold the team's lock. 831 832 \param timer The timer to be added. If it doesn't have an ID yet, it is 833 considered user-defined and will be assigned an ID. 834 \return \c B_OK, if the timer was added successfully, another error code 835 otherwise. 836 */ 837 status_t 838 Team::AddUserTimer(UserTimer* timer) 839 { 840 // don't allow addition of timers when already shutting the team down 841 if (state >= TEAM_STATE_SHUTDOWN) 842 return B_BAD_TEAM_ID; 843 844 // If the timer is user-defined, check timer limit and increment 845 // user-defined count. 846 if (timer->ID() < 0 && !CheckAddUserDefinedTimer()) 847 return EAGAIN; 848 849 fUserTimers.AddTimer(timer); 850 851 return B_OK; 852 } 853 854 855 /*! Removes the given user timer from the team. 856 857 The caller must hold the team's lock. 858 859 \param timer The timer to be removed. 860 861 */ 862 void 863 Team::RemoveUserTimer(UserTimer* timer) 864 { 865 fUserTimers.RemoveTimer(timer); 866 867 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID) 868 UserDefinedTimersRemoved(1); 869 } 870 871 872 /*! Deletes all (or all user-defined) user timers of the team. 873 874 Timer's belonging to the team's threads are not affected. 875 The caller must hold the team's lock. 876 877 \param userDefinedOnly If \c true, only the user-defined timers are deleted, 878 otherwise all timers are deleted. 879 */ 880 void 881 Team::DeleteUserTimers(bool userDefinedOnly) 882 { 883 int32 count = fUserTimers.DeleteTimers(userDefinedOnly); 884 UserDefinedTimersRemoved(count); 885 } 886 887 888 /*! If not at the limit yet, increments the team's user-defined timer count. 889 \return \c true, if the limit wasn't reached yet, \c false otherwise. 890 */ 891 bool 892 Team::CheckAddUserDefinedTimer() 893 { 894 int32 oldCount = atomic_add(&fUserDefinedTimerCount, 1); 895 if (oldCount >= MAX_USER_TIMERS_PER_TEAM) { 896 atomic_add(&fUserDefinedTimerCount, -1); 897 return false; 898 } 899 900 return true; 901 } 902 903 904 /*! Subtracts the given count for the team's user-defined timer count. 905 \param count The count to subtract. 906 */ 907 void 908 Team::UserDefinedTimersRemoved(int32 count) 909 { 910 atomic_add(&fUserDefinedTimerCount, -count); 911 } 912 913 914 void 915 Team::DeactivateCPUTimeUserTimers() 916 { 917 while (TeamTimeUserTimer* timer = fCPUTimeUserTimers.Head()) 918 timer->Deactivate(); 919 920 while (TeamUserTimeUserTimer* timer = fUserTimeUserTimers.Head()) 921 timer->Deactivate(); 922 } 923 924 925 /*! Returns the team's current total CPU time (kernel + user + offset). 926 927 The caller must hold \c time_lock. 928 929 \param ignoreCurrentRun If \c true and the current thread is one team's 930 threads, don't add the time since the last time \c last_time was 931 updated. Should be used in "thread unscheduled" scheduler callbacks, 932 since although the thread is still running at that time, its time has 933 already been stopped. 934 \return The team's current total CPU time. 935 */ 936 bigtime_t 937 Team::CPUTime(bool ignoreCurrentRun, Thread* lockedThread) const 938 { 939 bigtime_t time = cpu_clock_offset + dead_threads_kernel_time 940 + dead_threads_user_time; 941 942 Thread* currentThread = thread_get_current_thread(); 943 bigtime_t now = system_time(); 944 945 for (Thread* thread = thread_list; thread != NULL; 946 thread = thread->team_next) { 947 bool alreadyLocked = thread == lockedThread; 948 SpinLocker threadTimeLocker(thread->time_lock, alreadyLocked); 949 time += thread->kernel_time + thread->user_time; 950 951 if (thread->last_time != 0) { 952 if (!ignoreCurrentRun || thread != currentThread) 953 time += now - thread->last_time; 954 } 955 956 if (alreadyLocked) 957 threadTimeLocker.Detach(); 958 } 959 960 return time; 961 } 962 963 964 /*! Returns the team's current user CPU time. 965 966 The caller must hold \c time_lock. 967 968 \return The team's current user CPU time. 969 */ 970 bigtime_t 971 Team::UserCPUTime() const 972 { 973 bigtime_t time = dead_threads_user_time; 974 975 bigtime_t now = system_time(); 976 977 for (Thread* thread = thread_list; thread != NULL; 978 thread = thread->team_next) { 979 SpinLocker threadTimeLocker(thread->time_lock); 980 time += thread->user_time; 981 982 if (thread->last_time != 0 && !thread->in_kernel) 983 time += now - thread->last_time; 984 } 985 986 return time; 987 } 988 989 990 // #pragma mark - ProcessGroup 991 992 993 ProcessGroup::ProcessGroup(pid_t id) 994 : 995 id(id), 996 teams(NULL), 997 fSession(NULL), 998 fInOrphanedCheckList(false) 999 { 1000 char lockName[32]; 1001 snprintf(lockName, sizeof(lockName), "Group:%" B_PRId32, id); 1002 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 1003 } 1004 1005 1006 ProcessGroup::~ProcessGroup() 1007 { 1008 TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32 "\n", id)); 1009 1010 // If the group is in the orphaned check list, remove it. 1011 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 1012 1013 if (fInOrphanedCheckList) 1014 sOrphanedCheckProcessGroups.Remove(this); 1015 1016 orphanedCheckLocker.Unlock(); 1017 1018 // remove group from the hash table and from the session 1019 if (fSession != NULL) { 1020 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1021 sGroupHash.RemoveUnchecked(this); 1022 groupHashLocker.Unlock(); 1023 1024 fSession->ReleaseReference(); 1025 } 1026 1027 mutex_destroy(&fLock); 1028 } 1029 1030 1031 /*static*/ ProcessGroup* 1032 ProcessGroup::Get(pid_t id) 1033 { 1034 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1035 ProcessGroup* group = sGroupHash.Lookup(id); 1036 if (group != NULL) 1037 group->AcquireReference(); 1038 return group; 1039 } 1040 1041 1042 /*! Adds the group the given session and makes it publicly accessible. 1043 The caller must not hold the process group hash lock. 1044 */ 1045 void 1046 ProcessGroup::Publish(ProcessSession* session) 1047 { 1048 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 1049 PublishLocked(session); 1050 } 1051 1052 1053 /*! Adds the group to the given session and makes it publicly accessible. 1054 The caller must hold the process group hash lock. 1055 */ 1056 void 1057 ProcessGroup::PublishLocked(ProcessSession* session) 1058 { 1059 ASSERT(sGroupHash.Lookup(this->id) == NULL); 1060 1061 fSession = session; 1062 fSession->AcquireReference(); 1063 1064 sGroupHash.InsertUnchecked(this); 1065 } 1066 1067 1068 /*! Checks whether the process group is orphaned. 1069 The caller must hold the group's lock. 1070 \return \c true, if the group is orphaned, \c false otherwise. 1071 */ 1072 bool 1073 ProcessGroup::IsOrphaned() const 1074 { 1075 // Orphaned Process Group: "A process group in which the parent of every 1076 // member is either itself a member of the group or is not a member of the 1077 // group's session." (Open Group Base Specs Issue 7) 1078 bool orphaned = true; 1079 1080 Team* team = teams; 1081 while (orphaned && team != NULL) { 1082 team->LockTeamAndParent(false); 1083 1084 Team* parent = team->parent; 1085 if (parent != NULL && parent->group_id != id 1086 && parent->session_id == fSession->id) { 1087 orphaned = false; 1088 } 1089 1090 team->UnlockTeamAndParent(); 1091 1092 team = team->group_next; 1093 } 1094 1095 return orphaned; 1096 } 1097 1098 1099 void 1100 ProcessGroup::ScheduleOrphanedCheck() 1101 { 1102 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 1103 1104 if (!fInOrphanedCheckList) { 1105 sOrphanedCheckProcessGroups.Add(this); 1106 fInOrphanedCheckList = true; 1107 } 1108 } 1109 1110 1111 void 1112 ProcessGroup::UnsetOrphanedCheck() 1113 { 1114 fInOrphanedCheckList = false; 1115 } 1116 1117 1118 // #pragma mark - ProcessSession 1119 1120 1121 ProcessSession::ProcessSession(pid_t id) 1122 : 1123 id(id), 1124 controlling_tty(NULL), 1125 foreground_group(-1) 1126 { 1127 char lockName[32]; 1128 snprintf(lockName, sizeof(lockName), "Session:%" B_PRId32, id); 1129 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 1130 } 1131 1132 1133 ProcessSession::~ProcessSession() 1134 { 1135 mutex_destroy(&fLock); 1136 } 1137 1138 1139 // #pragma mark - KDL functions 1140 1141 1142 static void 1143 _dump_team_info(Team* team) 1144 { 1145 kprintf("TEAM: %p\n", team); 1146 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", team->id, 1147 team->id); 1148 kprintf("serial_number: %" B_PRId64 "\n", team->serial_number); 1149 kprintf("name: '%s'\n", team->Name()); 1150 kprintf("args: '%s'\n", team->Args()); 1151 kprintf("hash_next: %p\n", team->hash_next); 1152 kprintf("parent: %p", team->parent); 1153 if (team->parent != NULL) { 1154 kprintf(" (id = %" B_PRId32 ")\n", team->parent->id); 1155 } else 1156 kprintf("\n"); 1157 1158 kprintf("children: %p\n", team->children); 1159 kprintf("num_threads: %d\n", team->num_threads); 1160 kprintf("state: %d\n", team->state); 1161 kprintf("flags: 0x%" B_PRIx32 "\n", team->flags); 1162 kprintf("io_context: %p\n", team->io_context); 1163 if (team->address_space) 1164 kprintf("address_space: %p\n", team->address_space); 1165 kprintf("user data: %p (area %" B_PRId32 ")\n", 1166 (void*)team->user_data, team->user_data_area); 1167 kprintf("free user thread: %p\n", team->free_user_threads); 1168 kprintf("main_thread: %p\n", team->main_thread); 1169 kprintf("thread_list: %p\n", team->thread_list); 1170 kprintf("group_id: %" B_PRId32 "\n", team->group_id); 1171 kprintf("session_id: %" B_PRId32 "\n", team->session_id); 1172 } 1173 1174 1175 static int 1176 dump_team_info(int argc, char** argv) 1177 { 1178 ulong arg; 1179 bool found = false; 1180 1181 if (argc < 2) { 1182 Thread* thread = thread_get_current_thread(); 1183 if (thread != NULL && thread->team != NULL) 1184 _dump_team_info(thread->team); 1185 else 1186 kprintf("No current team!\n"); 1187 return 0; 1188 } 1189 1190 arg = strtoul(argv[1], NULL, 0); 1191 if (IS_KERNEL_ADDRESS(arg)) { 1192 // semi-hack 1193 _dump_team_info((Team*)arg); 1194 return 0; 1195 } 1196 1197 // walk through the thread list, trying to match name or id 1198 for (TeamTable::Iterator it = sTeamHash.GetIterator(); 1199 Team* team = it.Next();) { 1200 if ((team->Name() && strcmp(argv[1], team->Name()) == 0) 1201 || team->id == (team_id)arg) { 1202 _dump_team_info(team); 1203 found = true; 1204 break; 1205 } 1206 } 1207 1208 if (!found) 1209 kprintf("team \"%s\" (%" B_PRId32 ") doesn't exist!\n", argv[1], (team_id)arg); 1210 return 0; 1211 } 1212 1213 1214 static int 1215 dump_teams(int argc, char** argv) 1216 { 1217 kprintf("%-*s id %-*s name\n", B_PRINTF_POINTER_WIDTH, "team", 1218 B_PRINTF_POINTER_WIDTH, "parent"); 1219 1220 for (TeamTable::Iterator it = sTeamHash.GetIterator(); 1221 Team* team = it.Next();) { 1222 kprintf("%p%7" B_PRId32 " %p %s\n", team, team->id, team->parent, team->Name()); 1223 } 1224 1225 return 0; 1226 } 1227 1228 1229 // #pragma mark - Private functions 1230 1231 1232 /*! Get the parent of a given process. 1233 1234 Used in the implementation of getppid (where a process can get its own 1235 parent, only) as well as in user_process_info where the information is 1236 available to anyone (allowing to display a tree of running processes) 1237 */ 1238 static pid_t 1239 _getppid(pid_t id) 1240 { 1241 if (id < 0) { 1242 errno = EINVAL; 1243 return -1; 1244 } 1245 1246 if (id == 0) { 1247 Team* team = thread_get_current_thread()->team; 1248 TeamLocker teamLocker(team); 1249 if (team->parent == NULL) { 1250 errno = EINVAL; 1251 return -1; 1252 } 1253 return team->parent->id; 1254 } 1255 1256 Team* team = Team::GetAndLock(id); 1257 if (team == NULL) { 1258 errno = ESRCH; 1259 return -1; 1260 } 1261 1262 pid_t parentID; 1263 1264 if (team->parent == NULL) { 1265 errno = EINVAL; 1266 parentID = -1; 1267 } else 1268 parentID = team->parent->id; 1269 1270 team->UnlockAndReleaseReference(); 1271 1272 return parentID; 1273 } 1274 1275 1276 /*! Inserts team \a team into the child list of team \a parent. 1277 1278 The caller must hold the lock of both \a parent and \a team. 1279 1280 \param parent The parent team. 1281 \param team The team to be inserted into \a parent's child list. 1282 */ 1283 static void 1284 insert_team_into_parent(Team* parent, Team* team) 1285 { 1286 ASSERT(parent != NULL); 1287 1288 team->siblings_next = parent->children; 1289 parent->children = team; 1290 team->parent = parent; 1291 } 1292 1293 1294 /*! Removes team \a team from the child list of team \a parent. 1295 1296 The caller must hold the lock of both \a parent and \a team. 1297 1298 \param parent The parent team. 1299 \param team The team to be removed from \a parent's child list. 1300 */ 1301 static void 1302 remove_team_from_parent(Team* parent, Team* team) 1303 { 1304 Team* child; 1305 Team* last = NULL; 1306 1307 for (child = parent->children; child != NULL; 1308 child = child->siblings_next) { 1309 if (child == team) { 1310 if (last == NULL) 1311 parent->children = child->siblings_next; 1312 else 1313 last->siblings_next = child->siblings_next; 1314 1315 team->parent = NULL; 1316 break; 1317 } 1318 last = child; 1319 } 1320 } 1321 1322 1323 /*! Returns whether the given team is a session leader. 1324 The caller must hold the team's lock or its process group's lock. 1325 */ 1326 static bool 1327 is_session_leader(Team* team) 1328 { 1329 return team->session_id == team->id; 1330 } 1331 1332 1333 /*! Returns whether the given team is a process group leader. 1334 The caller must hold the team's lock or its process group's lock. 1335 */ 1336 static bool 1337 is_process_group_leader(Team* team) 1338 { 1339 return team->group_id == team->id; 1340 } 1341 1342 1343 /*! Inserts the given team into the given process group. 1344 The caller must hold the process group's lock, the team's lock, and the 1345 team's parent's lock. 1346 */ 1347 static void 1348 insert_team_into_group(ProcessGroup* group, Team* team) 1349 { 1350 team->group = group; 1351 team->group_id = group->id; 1352 team->session_id = group->Session()->id; 1353 1354 team->group_next = group->teams; 1355 group->teams = team; 1356 group->AcquireReference(); 1357 } 1358 1359 1360 /*! Removes the given team from its process group. 1361 1362 The caller must hold the process group's lock, the team's lock, and the 1363 team's parent's lock. Interrupts must be enabled. 1364 1365 \param team The team that'll be removed from its process group. 1366 */ 1367 static void 1368 remove_team_from_group(Team* team) 1369 { 1370 ProcessGroup* group = team->group; 1371 Team* current; 1372 Team* last = NULL; 1373 1374 // the team must be in a process group to let this function have any effect 1375 if (group == NULL) 1376 return; 1377 1378 for (current = group->teams; current != NULL; 1379 current = current->group_next) { 1380 if (current == team) { 1381 if (last == NULL) 1382 group->teams = current->group_next; 1383 else 1384 last->group_next = current->group_next; 1385 1386 break; 1387 } 1388 last = current; 1389 } 1390 1391 team->group = NULL; 1392 team->group_next = NULL; 1393 team->group_id = -1; 1394 1395 group->ReleaseReference(); 1396 } 1397 1398 1399 static status_t 1400 create_team_user_data(Team* team, void* exactAddress = NULL) 1401 { 1402 void* address; 1403 uint32 addressSpec; 1404 1405 if (exactAddress != NULL) { 1406 address = exactAddress; 1407 addressSpec = B_EXACT_ADDRESS; 1408 } else { 1409 address = (void*)KERNEL_USER_DATA_BASE; 1410 addressSpec = B_RANDOMIZED_BASE_ADDRESS; 1411 } 1412 1413 status_t result = vm_reserve_address_range(team->id, &address, addressSpec, 1414 kTeamUserDataReservedSize, RESERVED_AVOID_BASE); 1415 1416 virtual_address_restrictions virtualRestrictions = {}; 1417 if (result == B_OK || exactAddress != NULL) { 1418 if (exactAddress != NULL) 1419 virtualRestrictions.address = exactAddress; 1420 else 1421 virtualRestrictions.address = address; 1422 virtualRestrictions.address_specification = B_EXACT_ADDRESS; 1423 } else { 1424 virtualRestrictions.address = (void*)KERNEL_USER_DATA_BASE; 1425 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; 1426 } 1427 1428 physical_address_restrictions physicalRestrictions = {}; 1429 team->user_data_area = create_area_etc(team->id, "user area", 1430 kTeamUserDataInitialSize, B_FULL_LOCK, 1431 B_READ_AREA | B_WRITE_AREA | B_KERNEL_AREA, 0, 0, 1432 &virtualRestrictions, &physicalRestrictions, &address); 1433 if (team->user_data_area < 0) 1434 return team->user_data_area; 1435 1436 team->user_data = (addr_t)address; 1437 team->used_user_data = 0; 1438 team->user_data_size = kTeamUserDataInitialSize; 1439 team->free_user_threads = NULL; 1440 1441 return B_OK; 1442 } 1443 1444 1445 static void 1446 delete_team_user_data(Team* team) 1447 { 1448 if (team->user_data_area >= 0) { 1449 vm_delete_area(team->id, team->user_data_area, true); 1450 vm_unreserve_address_range(team->id, (void*)team->user_data, 1451 kTeamUserDataReservedSize); 1452 1453 team->user_data = 0; 1454 team->used_user_data = 0; 1455 team->user_data_size = 0; 1456 team->user_data_area = -1; 1457 while (free_user_thread* entry = team->free_user_threads) { 1458 team->free_user_threads = entry->next; 1459 free(entry); 1460 } 1461 } 1462 } 1463 1464 1465 static status_t 1466 copy_user_process_args(const char* const* userFlatArgs, size_t flatArgsSize, 1467 int32 argCount, int32 envCount, char**& _flatArgs) 1468 { 1469 if (argCount < 0 || envCount < 0) 1470 return B_BAD_VALUE; 1471 1472 if (flatArgsSize > MAX_PROCESS_ARGS_SIZE) 1473 return B_TOO_MANY_ARGS; 1474 if ((argCount + envCount + 2) * sizeof(char*) > flatArgsSize) 1475 return B_BAD_VALUE; 1476 1477 if (!IS_USER_ADDRESS(userFlatArgs)) 1478 return B_BAD_ADDRESS; 1479 1480 // allocate kernel memory 1481 char** flatArgs = (char**)malloc(_ALIGN(flatArgsSize)); 1482 if (flatArgs == NULL) 1483 return B_NO_MEMORY; 1484 1485 if (user_memcpy(flatArgs, userFlatArgs, flatArgsSize) != B_OK) { 1486 free(flatArgs); 1487 return B_BAD_ADDRESS; 1488 } 1489 1490 // check and relocate the array 1491 status_t error = B_OK; 1492 const char* stringBase = (char*)flatArgs + argCount + envCount + 2; 1493 const char* stringEnd = (char*)flatArgs + flatArgsSize; 1494 for (int32 i = 0; i < argCount + envCount + 2; i++) { 1495 if (i == argCount || i == argCount + envCount + 1) { 1496 // check array null termination 1497 if (flatArgs[i] != NULL) { 1498 error = B_BAD_VALUE; 1499 break; 1500 } 1501 } else { 1502 // check string 1503 char* arg = (char*)flatArgs + (flatArgs[i] - (char*)userFlatArgs); 1504 size_t maxLen = stringEnd - arg; 1505 if (arg < stringBase || arg >= stringEnd 1506 || strnlen(arg, maxLen) == maxLen) { 1507 error = B_BAD_VALUE; 1508 break; 1509 } 1510 1511 flatArgs[i] = arg; 1512 } 1513 } 1514 1515 if (error == B_OK) 1516 _flatArgs = flatArgs; 1517 else 1518 free(flatArgs); 1519 1520 return error; 1521 } 1522 1523 1524 static void 1525 free_team_arg(struct team_arg* teamArg) 1526 { 1527 if (teamArg != NULL) { 1528 free(teamArg->flat_args); 1529 free(teamArg->path); 1530 free(teamArg); 1531 } 1532 } 1533 1534 1535 static status_t 1536 create_team_arg(struct team_arg** _teamArg, const char* path, char** flatArgs, 1537 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask, 1538 port_id port, uint32 token) 1539 { 1540 struct team_arg* teamArg = (struct team_arg*)malloc(sizeof(team_arg)); 1541 if (teamArg == NULL) 1542 return B_NO_MEMORY; 1543 1544 teamArg->path = strdup(path); 1545 if (teamArg->path == NULL) { 1546 free(teamArg); 1547 return B_NO_MEMORY; 1548 } 1549 1550 // copy the args over 1551 teamArg->flat_args = flatArgs; 1552 teamArg->flat_args_size = flatArgsSize; 1553 teamArg->arg_count = argCount; 1554 teamArg->env_count = envCount; 1555 teamArg->flags = 0; 1556 teamArg->umask = umask; 1557 teamArg->error_port = port; 1558 teamArg->error_token = token; 1559 1560 // determine the flags from the environment 1561 const char* const* env = flatArgs + argCount + 1; 1562 for (int32 i = 0; i < envCount; i++) { 1563 if (strcmp(env[i], "DISABLE_ASLR=1") == 0) { 1564 teamArg->flags |= TEAM_ARGS_FLAG_NO_ASLR; 1565 break; 1566 } 1567 } 1568 1569 *_teamArg = teamArg; 1570 return B_OK; 1571 } 1572 1573 1574 static status_t 1575 team_create_thread_start_internal(void* args) 1576 { 1577 status_t err; 1578 Thread* thread; 1579 Team* team; 1580 struct team_arg* teamArgs = (struct team_arg*)args; 1581 const char* path; 1582 addr_t entry; 1583 char** userArgs; 1584 char** userEnv; 1585 struct user_space_program_args* programArgs; 1586 uint32 argCount, envCount; 1587 1588 thread = thread_get_current_thread(); 1589 team = thread->team; 1590 cache_node_launched(teamArgs->arg_count, teamArgs->flat_args); 1591 1592 TRACE(("team_create_thread_start: entry thread %" B_PRId32 "\n", 1593 thread->id)); 1594 1595 // Main stack area layout is currently as follows (starting from 0): 1596 // 1597 // size | usage 1598 // ---------------------------------+-------------------------------- 1599 // USER_MAIN_THREAD_STACK_SIZE | actual stack 1600 // TLS_SIZE | TLS data 1601 // sizeof(user_space_program_args) | argument structure for the runtime 1602 // | loader 1603 // flat arguments size | flat process arguments and environment 1604 1605 // TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to 1606 // the heap 1607 // TODO: we could reserve the whole USER_STACK_REGION upfront... 1608 1609 argCount = teamArgs->arg_count; 1610 envCount = teamArgs->env_count; 1611 1612 programArgs = (struct user_space_program_args*)(thread->user_stack_base 1613 + thread->user_stack_size + TLS_SIZE); 1614 1615 userArgs = (char**)(programArgs + 1); 1616 userEnv = userArgs + argCount + 1; 1617 path = teamArgs->path; 1618 1619 if (user_strlcpy(programArgs->program_path, path, 1620 sizeof(programArgs->program_path)) < B_OK 1621 || user_memcpy(&programArgs->arg_count, &argCount, sizeof(int32)) < B_OK 1622 || user_memcpy(&programArgs->args, &userArgs, sizeof(char**)) < B_OK 1623 || user_memcpy(&programArgs->env_count, &envCount, sizeof(int32)) < B_OK 1624 || user_memcpy(&programArgs->env, &userEnv, sizeof(char**)) < B_OK 1625 || user_memcpy(&programArgs->error_port, &teamArgs->error_port, 1626 sizeof(port_id)) < B_OK 1627 || user_memcpy(&programArgs->error_token, &teamArgs->error_token, 1628 sizeof(uint32)) < B_OK 1629 || user_memcpy(&programArgs->umask, &teamArgs->umask, sizeof(mode_t)) < B_OK 1630 || user_memcpy(&programArgs->disable_user_addons, 1631 &sDisableUserAddOns, sizeof(bool)) < B_OK 1632 || user_memcpy(userArgs, teamArgs->flat_args, 1633 teamArgs->flat_args_size) < B_OK) { 1634 // the team deletion process will clean this mess 1635 free_team_arg(teamArgs); 1636 return B_BAD_ADDRESS; 1637 } 1638 1639 TRACE(("team_create_thread_start: loading elf binary '%s'\n", path)); 1640 1641 // set team args and update state 1642 team->Lock(); 1643 team->SetArgs(path, teamArgs->flat_args + 1, argCount - 1); 1644 team->state = TEAM_STATE_NORMAL; 1645 team->Unlock(); 1646 1647 free_team_arg(teamArgs); 1648 // the arguments are already on the user stack, we no longer need 1649 // them in this form 1650 1651 // Clone commpage area 1652 area_id commPageArea = clone_commpage_area(team->id, 1653 &team->commpage_address); 1654 if (commPageArea < B_OK) { 1655 TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n", 1656 strerror(commPageArea))); 1657 return commPageArea; 1658 } 1659 1660 // Register commpage image 1661 image_id commPageImage = get_commpage_image(); 1662 extended_image_info imageInfo; 1663 err = get_image_info(commPageImage, &imageInfo.basic_info); 1664 if (err != B_OK) { 1665 TRACE(("team_create_thread_start: get_image_info() failed: %s\n", 1666 strerror(err))); 1667 return err; 1668 } 1669 imageInfo.basic_info.text = team->commpage_address; 1670 imageInfo.text_delta = (ssize_t)(addr_t)team->commpage_address; 1671 imageInfo.symbol_table = NULL; 1672 imageInfo.symbol_hash = NULL; 1673 imageInfo.string_table = NULL; 1674 image_id image = register_image(team, &imageInfo, sizeof(imageInfo)); 1675 if (image < 0) { 1676 TRACE(("team_create_thread_start: register_image() failed: %s\n", 1677 strerror(image))); 1678 return image; 1679 } 1680 1681 // NOTE: Normally arch_thread_enter_userspace() never returns, that is 1682 // automatic variables with function scope will never be destroyed. 1683 { 1684 // find runtime_loader path 1685 KPath runtimeLoaderPath; 1686 err = __find_directory(B_SYSTEM_DIRECTORY, gBootDevice, false, 1687 runtimeLoaderPath.LockBuffer(), runtimeLoaderPath.BufferSize()); 1688 if (err < B_OK) { 1689 TRACE(("team_create_thread_start: find_directory() failed: %s\n", 1690 strerror(err))); 1691 return err; 1692 } 1693 runtimeLoaderPath.UnlockBuffer(); 1694 err = runtimeLoaderPath.Append("runtime_loader"); 1695 1696 if (err == B_OK) { 1697 err = elf_load_user_image(runtimeLoaderPath.Path(), team, 0, 1698 &entry); 1699 } 1700 } 1701 1702 if (err < B_OK) { 1703 // Luckily, we don't have to clean up the mess we created - that's 1704 // done for us by the normal team deletion process 1705 TRACE(("team_create_thread_start: elf_load_user_image() failed: " 1706 "%s\n", strerror(err))); 1707 return err; 1708 } 1709 1710 TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry)); 1711 1712 // enter userspace -- returns only in case of error 1713 return thread_enter_userspace_new_team(thread, (addr_t)entry, 1714 programArgs, team->commpage_address); 1715 } 1716 1717 1718 static status_t 1719 team_create_thread_start(void* args) 1720 { 1721 team_create_thread_start_internal(args); 1722 team_init_exit_info_on_error(thread_get_current_thread()->team); 1723 thread_exit(); 1724 // does not return 1725 return B_OK; 1726 } 1727 1728 1729 static thread_id 1730 load_image_internal(char**& _flatArgs, size_t flatArgsSize, int32 argCount, 1731 int32 envCount, int32 priority, team_id parentID, uint32 flags, 1732 port_id errorPort, uint32 errorToken) 1733 { 1734 char** flatArgs = _flatArgs; 1735 thread_id thread; 1736 status_t status; 1737 struct team_arg* teamArgs; 1738 struct team_loading_info loadingInfo; 1739 ConditionVariableEntry loadingWaitEntry; 1740 io_context* parentIOContext = NULL; 1741 team_id teamID; 1742 bool teamLimitReached = false; 1743 1744 if (flatArgs == NULL || argCount == 0) 1745 return B_BAD_VALUE; 1746 1747 const char* path = flatArgs[0]; 1748 1749 TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32 1750 "\n", path, flatArgs, argCount)); 1751 1752 // cut the path from the main thread name 1753 const char* threadName = strrchr(path, '/'); 1754 if (threadName != NULL) 1755 threadName++; 1756 else 1757 threadName = path; 1758 1759 // create the main thread object 1760 Thread* mainThread; 1761 status = Thread::Create(threadName, mainThread); 1762 if (status != B_OK) 1763 return status; 1764 BReference<Thread> mainThreadReference(mainThread, true); 1765 1766 // create team object 1767 Team* team = Team::Create(mainThread->id, path, false); 1768 if (team == NULL) 1769 return B_NO_MEMORY; 1770 BReference<Team> teamReference(team, true); 1771 1772 BReference<Team> teamLoadingReference; 1773 if ((flags & B_WAIT_TILL_LOADED) != 0) { 1774 loadingInfo.condition.Init(team, "image load"); 1775 loadingInfo.condition.Add(&loadingWaitEntry); 1776 loadingInfo.result = B_ERROR; 1777 team->loading_info = &loadingInfo; 1778 teamLoadingReference = teamReference; 1779 } 1780 1781 // get the parent team 1782 Team* parent = Team::Get(parentID); 1783 if (parent == NULL) 1784 return B_BAD_TEAM_ID; 1785 BReference<Team> parentReference(parent, true); 1786 1787 parent->LockTeamAndProcessGroup(); 1788 team->Lock(); 1789 1790 // inherit the parent's user/group 1791 inherit_parent_user_and_group(team, parent); 1792 1793 // get a reference to the parent's I/O context -- we need it to create ours 1794 parentIOContext = parent->io_context; 1795 vfs_get_io_context(parentIOContext); 1796 1797 team->Unlock(); 1798 parent->UnlockTeamAndProcessGroup(); 1799 1800 // check the executable's set-user/group-id permission 1801 update_set_id_user_and_group(team, path); 1802 1803 status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, argCount, 1804 envCount, (mode_t)-1, errorPort, errorToken); 1805 if (status != B_OK) 1806 goto err1; 1807 1808 _flatArgs = NULL; 1809 // args are owned by the team_arg structure now 1810 1811 // create a new io_context for this team 1812 team->io_context = vfs_new_io_context(parentIOContext, true); 1813 if (!team->io_context) { 1814 status = B_NO_MEMORY; 1815 goto err2; 1816 } 1817 1818 // We don't need the parent's I/O context any longer. 1819 vfs_put_io_context(parentIOContext); 1820 parentIOContext = NULL; 1821 1822 // remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour) 1823 vfs_exec_io_context(team->io_context); 1824 1825 // create an address space for this team 1826 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false, 1827 &team->address_space); 1828 if (status != B_OK) 1829 goto err2; 1830 1831 team->address_space->SetRandomizingEnabled( 1832 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0); 1833 1834 // create the user data area 1835 status = create_team_user_data(team); 1836 if (status != B_OK) 1837 goto err4; 1838 1839 // insert the team into its parent and the teams hash 1840 parent->LockTeamAndProcessGroup(); 1841 team->Lock(); 1842 1843 { 1844 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 1845 1846 sTeamHash.Insert(team); 1847 teamLimitReached = sUsedTeams >= sMaxTeams; 1848 if (!teamLimitReached) 1849 sUsedTeams++; 1850 } 1851 1852 insert_team_into_parent(parent, team); 1853 insert_team_into_group(parent->group, team); 1854 1855 team->Unlock(); 1856 parent->UnlockTeamAndProcessGroup(); 1857 1858 // notify team listeners 1859 sNotificationService.Notify(TEAM_ADDED, team); 1860 1861 if (teamLimitReached) { 1862 status = B_NO_MORE_TEAMS; 1863 goto err6; 1864 } 1865 1866 // In case we start the main thread, we shouldn't access the team object 1867 // afterwards, so cache the team's ID. 1868 teamID = team->id; 1869 1870 // Create a kernel thread, but under the context of the new team 1871 // The new thread will take over ownership of teamArgs. 1872 { 1873 ThreadCreationAttributes threadAttributes(team_create_thread_start, 1874 threadName, B_NORMAL_PRIORITY, teamArgs, teamID, mainThread); 1875 threadAttributes.additional_stack_size = sizeof(user_space_program_args) 1876 + teamArgs->flat_args_size; 1877 thread = thread_create_thread(threadAttributes, false); 1878 if (thread < 0) { 1879 status = thread; 1880 goto err6; 1881 } 1882 } 1883 1884 // The team has been created successfully, so we keep the reference. Or 1885 // more precisely: It's owned by the team's main thread, now. 1886 teamReference.Detach(); 1887 1888 // wait for the loader of the new team to finish its work 1889 if ((flags & B_WAIT_TILL_LOADED) != 0) { 1890 if (mainThread != NULL) { 1891 // resume the team's main thread 1892 thread_continue(mainThread); 1893 } 1894 1895 // Now wait until loading is finished. We will be woken either by the 1896 // thread, when it finished or aborted loading, or when the team is 1897 // going to die (e.g. is killed). In either case the one notifying is 1898 // responsible for unsetting `loading_info` in the team structure. 1899 loadingWaitEntry.Wait(); 1900 1901 // We must synchronize with the thread that woke us up, to ensure 1902 // there are no remaining consumers of the team_loading_info. 1903 team->Lock(); 1904 if (team->loading_info != NULL) 1905 panic("team loading wait complete, but loading_info != NULL"); 1906 team->Unlock(); 1907 teamLoadingReference.Unset(); 1908 1909 if (loadingInfo.result < B_OK) 1910 return loadingInfo.result; 1911 } 1912 1913 // notify the debugger 1914 user_debug_team_created(teamID); 1915 1916 return thread; 1917 1918 err6: 1919 // Remove the team structure from the process group, the parent team, and 1920 // the team hash table and delete the team structure. 1921 parent->LockTeamAndProcessGroup(); 1922 team->Lock(); 1923 1924 remove_team_from_group(team); 1925 remove_team_from_parent(team->parent, team); 1926 1927 team->Unlock(); 1928 parent->UnlockTeamAndProcessGroup(); 1929 1930 { 1931 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 1932 sTeamHash.Remove(team); 1933 if (!teamLimitReached) 1934 sUsedTeams--; 1935 } 1936 1937 sNotificationService.Notify(TEAM_REMOVED, team); 1938 1939 delete_team_user_data(team); 1940 err4: 1941 team->address_space->Put(); 1942 err2: 1943 free_team_arg(teamArgs); 1944 err1: 1945 if (parentIOContext != NULL) 1946 vfs_put_io_context(parentIOContext); 1947 1948 return status; 1949 } 1950 1951 1952 /*! Almost shuts down the current team and loads a new image into it. 1953 If successful, this function does not return and will takeover ownership of 1954 the arguments provided. 1955 This function may only be called in a userland team (caused by one of the 1956 exec*() syscalls). 1957 */ 1958 static status_t 1959 exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize, 1960 int32 argCount, int32 envCount, mode_t umask) 1961 { 1962 // NOTE: Since this function normally doesn't return, don't use automatic 1963 // variables that need destruction in the function scope. 1964 char** flatArgs = _flatArgs; 1965 Team* team = thread_get_current_thread()->team; 1966 struct team_arg* teamArgs; 1967 const char* threadName; 1968 thread_id nubThreadID = -1; 1969 1970 TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32 ", envCount = %" 1971 B_PRId32 "): team %" B_PRId32 "\n", path, argCount, envCount, 1972 team->id)); 1973 1974 T(ExecTeam(path, argCount, flatArgs, envCount, flatArgs + argCount + 1)); 1975 1976 // switching the kernel at run time is probably not a good idea :) 1977 if (team == team_get_kernel_team()) 1978 return B_NOT_ALLOWED; 1979 1980 // we currently need to be single threaded here 1981 // TODO: maybe we should just kill all other threads and 1982 // make the current thread the team's main thread? 1983 Thread* currentThread = thread_get_current_thread(); 1984 if (currentThread != team->main_thread) 1985 return B_NOT_ALLOWED; 1986 1987 // The debug nub thread, a pure kernel thread, is allowed to survive. 1988 // We iterate through the thread list to make sure that there's no other 1989 // thread. 1990 TeamLocker teamLocker(team); 1991 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 1992 1993 if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) 1994 nubThreadID = team->debug_info.nub_thread; 1995 1996 debugInfoLocker.Unlock(); 1997 1998 for (Thread* thread = team->thread_list; thread != NULL; 1999 thread = thread->team_next) { 2000 if (thread != team->main_thread && thread->id != nubThreadID) 2001 return B_NOT_ALLOWED; 2002 } 2003 2004 team->DeleteUserTimers(true); 2005 team->ResetSignalsOnExec(); 2006 2007 teamLocker.Unlock(); 2008 2009 status_t status = create_team_arg(&teamArgs, path, flatArgs, flatArgsSize, 2010 argCount, envCount, umask, -1, 0); 2011 if (status != B_OK) 2012 return status; 2013 2014 _flatArgs = NULL; 2015 // args are owned by the team_arg structure now 2016 2017 // TODO: remove team resources if there are any left 2018 // thread_atkernel_exit() might not be called at all 2019 2020 thread_reset_for_exec(); 2021 2022 user_debug_prepare_for_exec(); 2023 2024 delete_team_user_data(team); 2025 vm_delete_areas(team->address_space, false); 2026 xsi_sem_undo(team); 2027 delete_owned_ports(team); 2028 sem_delete_owned_sems(team); 2029 remove_images(team); 2030 vfs_exec_io_context(team->io_context); 2031 delete_user_mutex_context(team->user_mutex_context); 2032 team->user_mutex_context = NULL; 2033 delete_realtime_sem_context(team->realtime_sem_context); 2034 team->realtime_sem_context = NULL; 2035 2036 // update ASLR 2037 team->address_space->SetRandomizingEnabled( 2038 (teamArgs->flags & TEAM_ARGS_FLAG_NO_ASLR) == 0); 2039 2040 status = create_team_user_data(team); 2041 if (status != B_OK) { 2042 // creating the user data failed -- we're toast 2043 free_team_arg(teamArgs); 2044 exit_thread(status); 2045 return status; 2046 } 2047 2048 user_debug_finish_after_exec(); 2049 2050 // rename the team 2051 2052 team->Lock(); 2053 team->SetName(path); 2054 team->Unlock(); 2055 2056 // cut the path from the team name and rename the main thread, too 2057 threadName = strrchr(path, '/'); 2058 if (threadName != NULL) 2059 threadName++; 2060 else 2061 threadName = path; 2062 rename_thread(thread_get_current_thread_id(), threadName); 2063 2064 atomic_or(&team->flags, TEAM_FLAG_EXEC_DONE); 2065 2066 // Update user/group according to the executable's set-user/group-id 2067 // permission. 2068 update_set_id_user_and_group(team, path); 2069 2070 user_debug_team_exec(); 2071 2072 // notify team listeners 2073 sNotificationService.Notify(TEAM_EXEC, team); 2074 2075 // get a user thread for the thread 2076 user_thread* userThread = team_allocate_user_thread(team); 2077 // cannot fail (the allocation for the team would have failed already) 2078 ThreadLocker currentThreadLocker(currentThread); 2079 currentThread->user_thread = userThread; 2080 currentThreadLocker.Unlock(); 2081 2082 // create the user stack for the thread 2083 status = thread_create_user_stack(currentThread->team, currentThread, NULL, 2084 0, sizeof(user_space_program_args) + teamArgs->flat_args_size); 2085 if (status == B_OK) { 2086 // prepare the stack, load the runtime loader, and enter userspace 2087 team_create_thread_start(teamArgs); 2088 // does never return 2089 } else 2090 free_team_arg(teamArgs); 2091 2092 // Sorry, we have to kill ourselves, there is no way out anymore 2093 // (without any areas left and all that). 2094 exit_thread(status); 2095 2096 // We return a status here since the signal that is sent by the 2097 // call above is not immediately handled. 2098 return B_ERROR; 2099 } 2100 2101 2102 static thread_id 2103 fork_team(void) 2104 { 2105 Thread* parentThread = thread_get_current_thread(); 2106 Team* parentTeam = parentThread->team; 2107 Team* team; 2108 arch_fork_arg* forkArgs; 2109 struct area_info info; 2110 thread_id threadID; 2111 status_t status; 2112 ssize_t areaCookie; 2113 bool teamLimitReached = false; 2114 2115 TRACE(("fork_team(): team %" B_PRId32 "\n", parentTeam->id)); 2116 2117 if (parentTeam == team_get_kernel_team()) 2118 return B_NOT_ALLOWED; 2119 2120 // create a new team 2121 // TODO: this is very similar to load_image_internal() - maybe we can do 2122 // something about it :) 2123 2124 // create the main thread object 2125 Thread* thread; 2126 status = Thread::Create(parentThread->name, thread); 2127 if (status != B_OK) 2128 return status; 2129 BReference<Thread> threadReference(thread, true); 2130 2131 // create the team object 2132 team = Team::Create(thread->id, NULL, false); 2133 if (team == NULL) 2134 return B_NO_MEMORY; 2135 2136 parentTeam->LockTeamAndProcessGroup(); 2137 team->Lock(); 2138 2139 team->SetName(parentTeam->Name()); 2140 team->SetArgs(parentTeam->Args()); 2141 2142 team->commpage_address = parentTeam->commpage_address; 2143 2144 // Inherit the parent's user/group. 2145 inherit_parent_user_and_group(team, parentTeam); 2146 2147 // inherit signal handlers 2148 team->InheritSignalActions(parentTeam); 2149 2150 team->Unlock(); 2151 parentTeam->UnlockTeamAndProcessGroup(); 2152 2153 // inherit some team debug flags 2154 team->debug_info.flags |= atomic_get(&parentTeam->debug_info.flags) 2155 & B_TEAM_DEBUG_INHERITED_FLAGS; 2156 2157 forkArgs = (arch_fork_arg*)malloc(sizeof(arch_fork_arg)); 2158 if (forkArgs == NULL) { 2159 status = B_NO_MEMORY; 2160 goto err1; 2161 } 2162 2163 // create a new io_context for this team 2164 team->io_context = vfs_new_io_context(parentTeam->io_context, false); 2165 if (!team->io_context) { 2166 status = B_NO_MEMORY; 2167 goto err2; 2168 } 2169 2170 // duplicate the realtime sem context 2171 if (parentTeam->realtime_sem_context) { 2172 team->realtime_sem_context = clone_realtime_sem_context( 2173 parentTeam->realtime_sem_context); 2174 if (team->realtime_sem_context == NULL) { 2175 status = B_NO_MEMORY; 2176 goto err2; 2177 } 2178 } 2179 2180 // create an address space for this team 2181 status = VMAddressSpace::Create(team->id, USER_BASE, USER_SIZE, false, 2182 &team->address_space); 2183 if (status < B_OK) 2184 goto err3; 2185 2186 // copy all areas of the team 2187 // TODO: should be able to handle stack areas differently (ie. don't have 2188 // them copy-on-write) 2189 2190 areaCookie = 0; 2191 while (get_next_area_info(B_CURRENT_TEAM, &areaCookie, &info) == B_OK) { 2192 if (info.area == parentTeam->user_data_area) { 2193 // don't clone the user area; just create a new one 2194 status = create_team_user_data(team, info.address); 2195 if (status != B_OK) 2196 break; 2197 2198 thread->user_thread = team_allocate_user_thread(team); 2199 } else { 2200 void* address; 2201 area_id area = vm_copy_area(team->address_space->ID(), info.name, 2202 &address, B_CLONE_ADDRESS, info.area); 2203 if (area < B_OK) { 2204 status = area; 2205 break; 2206 } 2207 2208 if (info.area == parentThread->user_stack_area) 2209 thread->user_stack_area = area; 2210 } 2211 } 2212 2213 if (status < B_OK) 2214 goto err4; 2215 2216 if (thread->user_thread == NULL) { 2217 #if KDEBUG 2218 panic("user data area not found, parent area is %" B_PRId32, 2219 parentTeam->user_data_area); 2220 #endif 2221 status = B_ERROR; 2222 goto err4; 2223 } 2224 2225 thread->user_stack_base = parentThread->user_stack_base; 2226 thread->user_stack_size = parentThread->user_stack_size; 2227 thread->user_local_storage = parentThread->user_local_storage; 2228 thread->sig_block_mask = parentThread->sig_block_mask; 2229 thread->signal_stack_base = parentThread->signal_stack_base; 2230 thread->signal_stack_size = parentThread->signal_stack_size; 2231 thread->signal_stack_enabled = parentThread->signal_stack_enabled; 2232 2233 arch_store_fork_frame(forkArgs); 2234 2235 // copy image list 2236 if (copy_images(parentTeam->id, team) != B_OK) 2237 goto err5; 2238 2239 // insert the team into its parent and the teams hash 2240 parentTeam->LockTeamAndProcessGroup(); 2241 team->Lock(); 2242 2243 { 2244 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 2245 2246 sTeamHash.Insert(team); 2247 teamLimitReached = sUsedTeams >= sMaxTeams; 2248 if (!teamLimitReached) 2249 sUsedTeams++; 2250 } 2251 2252 insert_team_into_parent(parentTeam, team); 2253 insert_team_into_group(parentTeam->group, team); 2254 2255 team->Unlock(); 2256 parentTeam->UnlockTeamAndProcessGroup(); 2257 2258 // notify team listeners 2259 sNotificationService.Notify(TEAM_ADDED, team); 2260 2261 if (teamLimitReached) { 2262 status = B_NO_MORE_TEAMS; 2263 goto err6; 2264 } 2265 2266 // create the main thread 2267 { 2268 ThreadCreationAttributes threadCreationAttributes(NULL, 2269 parentThread->name, parentThread->priority, NULL, team->id, thread); 2270 threadCreationAttributes.forkArgs = forkArgs; 2271 threadCreationAttributes.flags |= THREAD_CREATION_FLAG_DEFER_SIGNALS; 2272 threadID = thread_create_thread(threadCreationAttributes, false); 2273 if (threadID < 0) { 2274 status = threadID; 2275 goto err6; 2276 } 2277 } 2278 2279 // notify the debugger 2280 user_debug_team_created(team->id); 2281 2282 T(TeamForked(threadID)); 2283 2284 resume_thread(threadID); 2285 return threadID; 2286 2287 err6: 2288 // Remove the team structure from the process group, the parent team, and 2289 // the team hash table and delete the team structure. 2290 parentTeam->LockTeamAndProcessGroup(); 2291 team->Lock(); 2292 2293 remove_team_from_group(team); 2294 remove_team_from_parent(team->parent, team); 2295 2296 team->Unlock(); 2297 parentTeam->UnlockTeamAndProcessGroup(); 2298 2299 { 2300 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 2301 sTeamHash.Remove(team); 2302 if (!teamLimitReached) 2303 sUsedTeams--; 2304 } 2305 2306 sNotificationService.Notify(TEAM_REMOVED, team); 2307 err5: 2308 remove_images(team); 2309 err4: 2310 team->address_space->RemoveAndPut(); 2311 err3: 2312 delete_realtime_sem_context(team->realtime_sem_context); 2313 err2: 2314 free(forkArgs); 2315 err1: 2316 team->ReleaseReference(); 2317 2318 return status; 2319 } 2320 2321 2322 /*! Returns if the specified team \a parent has any children belonging to the 2323 process group with the specified ID \a groupID. 2324 The caller must hold \a parent's lock. 2325 */ 2326 static bool 2327 has_children_in_group(Team* parent, pid_t groupID) 2328 { 2329 for (Team* child = parent->children; child != NULL; 2330 child = child->siblings_next) { 2331 TeamLocker childLocker(child); 2332 if (child->group_id == groupID) 2333 return true; 2334 } 2335 2336 return false; 2337 } 2338 2339 2340 /*! Returns the first job control entry from \a children, which matches \a id. 2341 \a id can be: 2342 - \code > 0 \endcode: Matching an entry with that team ID. 2343 - \code == -1 \endcode: Matching any entry. 2344 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id. 2345 \c 0 is an invalid value for \a id. 2346 2347 The caller must hold the lock of the team that \a children belongs to. 2348 2349 \param children The job control entry list to check. 2350 \param id The match criterion. 2351 \return The first matching entry or \c NULL, if none matches. 2352 */ 2353 static job_control_entry* 2354 get_job_control_entry(team_job_control_children& children, pid_t id) 2355 { 2356 for (JobControlEntryList::Iterator it = children.entries.GetIterator(); 2357 job_control_entry* entry = it.Next();) { 2358 2359 if (id > 0) { 2360 if (entry->thread == id) 2361 return entry; 2362 } else if (id == -1) { 2363 return entry; 2364 } else { 2365 pid_t processGroup 2366 = (entry->team ? entry->team->group_id : entry->group_id); 2367 if (processGroup == -id) 2368 return entry; 2369 } 2370 } 2371 2372 return NULL; 2373 } 2374 2375 2376 /*! Returns the first job control entry from one of team's dead, continued, or 2377 stopped children which matches \a id. 2378 \a id can be: 2379 - \code > 0 \endcode: Matching an entry with that team ID. 2380 - \code == -1 \endcode: Matching any entry. 2381 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id. 2382 \c 0 is an invalid value for \a id. 2383 2384 The caller must hold \a team's lock. 2385 2386 \param team The team whose dead, stopped, and continued child lists shall be 2387 checked. 2388 \param id The match criterion. 2389 \param flags Specifies which children shall be considered. Dead children 2390 are considered when \a flags is ORed bitwise with \c WEXITED, stopped 2391 children are considered when \a flags is ORed bitwise with \c WUNTRACED 2392 or \c WSTOPPED, continued children when \a flags is ORed bitwise with 2393 \c WCONTINUED. 2394 \return The first matching entry or \c NULL, if none matches. 2395 */ 2396 static job_control_entry* 2397 get_job_control_entry(Team* team, pid_t id, uint32 flags) 2398 { 2399 job_control_entry* entry = NULL; 2400 2401 if ((flags & WEXITED) != 0) 2402 entry = get_job_control_entry(team->dead_children, id); 2403 2404 if (entry == NULL && (flags & WCONTINUED) != 0) 2405 entry = get_job_control_entry(team->continued_children, id); 2406 2407 if (entry == NULL && (flags & (WUNTRACED | WSTOPPED)) != 0) 2408 entry = get_job_control_entry(team->stopped_children, id); 2409 2410 return entry; 2411 } 2412 2413 2414 job_control_entry::job_control_entry() 2415 : 2416 has_group_ref(false) 2417 { 2418 } 2419 2420 2421 job_control_entry::~job_control_entry() 2422 { 2423 if (has_group_ref) { 2424 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 2425 2426 ProcessGroup* group = sGroupHash.Lookup(group_id); 2427 if (group == NULL) { 2428 panic("job_control_entry::~job_control_entry(): unknown group " 2429 "ID: %" B_PRId32, group_id); 2430 return; 2431 } 2432 2433 groupHashLocker.Unlock(); 2434 2435 group->ReleaseReference(); 2436 } 2437 } 2438 2439 2440 /*! Invoked when the owning team is dying, initializing the entry according to 2441 the dead state. 2442 2443 The caller must hold the owning team's lock and the scheduler lock. 2444 */ 2445 void 2446 job_control_entry::InitDeadState() 2447 { 2448 if (team != NULL) { 2449 ASSERT(team->exit.initialized); 2450 2451 group_id = team->group_id; 2452 team->group->AcquireReference(); 2453 has_group_ref = true; 2454 2455 thread = team->id; 2456 status = team->exit.status; 2457 reason = team->exit.reason; 2458 signal = team->exit.signal; 2459 signaling_user = team->exit.signaling_user; 2460 user_time = team->dead_threads_user_time 2461 + team->dead_children.user_time; 2462 kernel_time = team->dead_threads_kernel_time 2463 + team->dead_children.kernel_time; 2464 2465 team = NULL; 2466 } 2467 } 2468 2469 2470 job_control_entry& 2471 job_control_entry::operator=(const job_control_entry& other) 2472 { 2473 state = other.state; 2474 thread = other.thread; 2475 signal = other.signal; 2476 has_group_ref = false; 2477 signaling_user = other.signaling_user; 2478 team = other.team; 2479 group_id = other.group_id; 2480 status = other.status; 2481 reason = other.reason; 2482 user_time = other.user_time; 2483 kernel_time = other.kernel_time; 2484 2485 return *this; 2486 } 2487 2488 2489 /*! This is the kernel backend for waitid(). 2490 */ 2491 static thread_id 2492 wait_for_child(pid_t child, uint32 flags, siginfo_t& _info, 2493 team_usage_info& _usage_info) 2494 { 2495 Thread* thread = thread_get_current_thread(); 2496 Team* team = thread->team; 2497 struct job_control_entry foundEntry; 2498 struct job_control_entry* freeDeathEntry = NULL; 2499 status_t status = B_OK; 2500 2501 TRACE(("wait_for_child(child = %" B_PRId32 ", flags = %" B_PRId32 ")\n", 2502 child, flags)); 2503 2504 T(WaitForChild(child, flags)); 2505 2506 if ((flags & (WEXITED | WUNTRACED | WSTOPPED | WCONTINUED)) == 0) { 2507 T(WaitForChildDone(B_BAD_VALUE)); 2508 return B_BAD_VALUE; 2509 } 2510 2511 pid_t originalChild = child; 2512 2513 bool ignoreFoundEntries = false; 2514 bool ignoreFoundEntriesChecked = false; 2515 2516 while (true) { 2517 // lock the team 2518 TeamLocker teamLocker(team); 2519 2520 // A 0 child argument means to wait for all children in the process 2521 // group of the calling team. 2522 child = originalChild == 0 ? -team->group_id : originalChild; 2523 2524 // check whether any condition holds 2525 job_control_entry* entry = get_job_control_entry(team, child, flags); 2526 2527 // If we don't have an entry yet, check whether there are any children 2528 // complying to the process group specification at all. 2529 if (entry == NULL) { 2530 // No success yet -- check whether there are any children complying 2531 // to the process group specification at all. 2532 bool childrenExist = false; 2533 if (child == -1) { 2534 childrenExist = team->children != NULL; 2535 } else if (child < -1) { 2536 childrenExist = has_children_in_group(team, -child); 2537 } else if (child != team->id) { 2538 if (Team* childTeam = Team::Get(child)) { 2539 BReference<Team> childTeamReference(childTeam, true); 2540 TeamLocker childTeamLocker(childTeam); 2541 childrenExist = childTeam->parent == team; 2542 } 2543 } 2544 2545 if (!childrenExist) { 2546 // there is no child we could wait for 2547 status = ECHILD; 2548 } else { 2549 // the children we're waiting for are still running 2550 status = B_WOULD_BLOCK; 2551 } 2552 } else { 2553 // got something 2554 foundEntry = *entry; 2555 2556 // unless WNOWAIT has been specified, "consume" the wait state 2557 if ((flags & WNOWAIT) == 0 || ignoreFoundEntries) { 2558 if (entry->state == JOB_CONTROL_STATE_DEAD) { 2559 // The child is dead. Reap its death entry. 2560 freeDeathEntry = entry; 2561 team->dead_children.entries.Remove(entry); 2562 team->dead_children.count--; 2563 } else { 2564 // The child is well. Reset its job control state. 2565 team_set_job_control_state(entry->team, 2566 JOB_CONTROL_STATE_NONE, NULL); 2567 } 2568 } 2569 } 2570 2571 // If we haven't got anything yet, prepare for waiting for the 2572 // condition variable. 2573 ConditionVariableEntry deadWaitEntry; 2574 2575 if (status == B_WOULD_BLOCK && (flags & WNOHANG) == 0) 2576 team->dead_children.condition_variable.Add(&deadWaitEntry); 2577 2578 teamLocker.Unlock(); 2579 2580 // we got our entry and can return to our caller 2581 if (status == B_OK) { 2582 if (ignoreFoundEntries) { 2583 // ... unless we shall ignore found entries 2584 delete freeDeathEntry; 2585 freeDeathEntry = NULL; 2586 continue; 2587 } 2588 2589 break; 2590 } 2591 2592 if (status != B_WOULD_BLOCK || (flags & WNOHANG) != 0) { 2593 T(WaitForChildDone(status)); 2594 return status; 2595 } 2596 2597 status = deadWaitEntry.Wait(B_CAN_INTERRUPT); 2598 if (status == B_INTERRUPTED) { 2599 T(WaitForChildDone(status)); 2600 return status; 2601 } 2602 2603 // If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until 2604 // all our children are dead and fail with ECHILD. We check the 2605 // condition at this point. 2606 if (!ignoreFoundEntriesChecked) { 2607 teamLocker.Lock(); 2608 2609 struct sigaction& handler = team->SignalActionFor(SIGCHLD); 2610 if ((handler.sa_flags & SA_NOCLDWAIT) != 0 2611 || handler.sa_handler == SIG_IGN) { 2612 ignoreFoundEntries = true; 2613 } 2614 2615 teamLocker.Unlock(); 2616 2617 ignoreFoundEntriesChecked = true; 2618 } 2619 } 2620 2621 delete freeDeathEntry; 2622 2623 // When we got here, we have a valid death entry, and already got 2624 // unregistered from the team or group. Fill in the returned info. 2625 memset(&_info, 0, sizeof(_info)); 2626 _info.si_signo = SIGCHLD; 2627 _info.si_pid = foundEntry.thread; 2628 _info.si_uid = foundEntry.signaling_user; 2629 // TODO: Fill in si_errno? 2630 2631 switch (foundEntry.state) { 2632 case JOB_CONTROL_STATE_DEAD: 2633 _info.si_code = foundEntry.reason; 2634 _info.si_status = foundEntry.reason == CLD_EXITED 2635 ? foundEntry.status : foundEntry.signal; 2636 _usage_info.user_time = foundEntry.user_time; 2637 _usage_info.kernel_time = foundEntry.kernel_time; 2638 break; 2639 case JOB_CONTROL_STATE_STOPPED: 2640 _info.si_code = CLD_STOPPED; 2641 _info.si_status = foundEntry.signal; 2642 break; 2643 case JOB_CONTROL_STATE_CONTINUED: 2644 _info.si_code = CLD_CONTINUED; 2645 _info.si_status = 0; 2646 break; 2647 case JOB_CONTROL_STATE_NONE: 2648 // can't happen 2649 break; 2650 } 2651 2652 // If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child 2653 // status is available. 2654 TeamLocker teamLocker(team); 2655 InterruptsSpinLocker signalLocker(team->signal_lock); 2656 SpinLocker threadCreationLocker(gThreadCreationLock); 2657 2658 if (is_team_signal_blocked(team, SIGCHLD)) { 2659 if (get_job_control_entry(team, child, flags) == NULL) 2660 team->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD)); 2661 } 2662 2663 threadCreationLocker.Unlock(); 2664 signalLocker.Unlock(); 2665 teamLocker.Unlock(); 2666 2667 // When the team is dead, the main thread continues to live in the kernel 2668 // team for a very short time. To avoid surprises for the caller we rather 2669 // wait until the thread is really gone. 2670 if (foundEntry.state == JOB_CONTROL_STATE_DEAD) 2671 wait_for_thread(foundEntry.thread, NULL); 2672 2673 T(WaitForChildDone(foundEntry)); 2674 2675 return foundEntry.thread; 2676 } 2677 2678 2679 /*! Fills the team_info structure with information from the specified team. 2680 Interrupts must be enabled. The team must not be locked. 2681 */ 2682 static status_t 2683 fill_team_info(Team* team, team_info* info, size_t size) 2684 { 2685 if (size > sizeof(team_info)) 2686 return B_BAD_VALUE; 2687 2688 // TODO: Set more informations for team_info 2689 memset(info, 0, size); 2690 2691 info->team = team->id; 2692 // immutable 2693 info->image_count = count_images(team); 2694 // protected by sImageMutex 2695 2696 TeamLocker teamLocker(team); 2697 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 2698 2699 info->thread_count = team->num_threads; 2700 //info->area_count = 2701 info->debugger_nub_thread = team->debug_info.nub_thread; 2702 info->debugger_nub_port = team->debug_info.nub_port; 2703 info->uid = team->effective_uid; 2704 info->gid = team->effective_gid; 2705 2706 strlcpy(info->args, team->Args(), sizeof(info->args)); 2707 info->argc = 1; 2708 2709 if (size > offsetof(team_info, real_uid)) { 2710 info->real_uid = team->real_uid; 2711 info->real_gid = team->real_gid; 2712 info->group_id = team->group_id; 2713 info->session_id = team->session_id; 2714 2715 if (team->parent != NULL) 2716 info->parent = team->parent->id; 2717 else 2718 info->parent = -1; 2719 2720 strlcpy(info->name, team->Name(), sizeof(info->name)); 2721 info->start_time = team->start_time; 2722 } 2723 2724 return B_OK; 2725 } 2726 2727 2728 /*! Returns whether the process group contains stopped processes. 2729 The caller must hold the process group's lock. 2730 */ 2731 static bool 2732 process_group_has_stopped_processes(ProcessGroup* group) 2733 { 2734 Team* team = group->teams; 2735 while (team != NULL) { 2736 // the parent team's lock guards the job control entry -- acquire it 2737 team->LockTeamAndParent(false); 2738 2739 if (team->job_control_entry != NULL 2740 && team->job_control_entry->state == JOB_CONTROL_STATE_STOPPED) { 2741 team->UnlockTeamAndParent(); 2742 return true; 2743 } 2744 2745 team->UnlockTeamAndParent(); 2746 2747 team = team->group_next; 2748 } 2749 2750 return false; 2751 } 2752 2753 2754 /*! Iterates through all process groups queued in team_remove_team() and signals 2755 those that are orphaned and have stopped processes. 2756 The caller must not hold any team or process group locks. 2757 */ 2758 static void 2759 orphaned_process_group_check() 2760 { 2761 // process as long as there are groups in the list 2762 while (true) { 2763 // remove the head from the list 2764 MutexLocker orphanedCheckLocker(sOrphanedCheckLock); 2765 2766 ProcessGroup* group = sOrphanedCheckProcessGroups.RemoveHead(); 2767 if (group == NULL) 2768 return; 2769 2770 group->UnsetOrphanedCheck(); 2771 BReference<ProcessGroup> groupReference(group); 2772 2773 orphanedCheckLocker.Unlock(); 2774 2775 AutoLocker<ProcessGroup> groupLocker(group); 2776 2777 // If the group is orphaned and contains stopped processes, we're 2778 // supposed to send SIGHUP + SIGCONT. 2779 if (group->IsOrphaned() && process_group_has_stopped_processes(group)) { 2780 Thread* currentThread = thread_get_current_thread(); 2781 2782 Signal signal(SIGHUP, SI_USER, B_OK, currentThread->team->id); 2783 send_signal_to_process_group_locked(group, signal, 0); 2784 2785 signal.SetNumber(SIGCONT); 2786 send_signal_to_process_group_locked(group, signal, 0); 2787 } 2788 } 2789 } 2790 2791 2792 static status_t 2793 common_get_team_usage_info(team_id id, int32 who, team_usage_info* info, 2794 uint32 flags) 2795 { 2796 if (who != B_TEAM_USAGE_SELF && who != B_TEAM_USAGE_CHILDREN) 2797 return B_BAD_VALUE; 2798 2799 // get the team 2800 Team* team = Team::GetAndLock(id); 2801 if (team == NULL) 2802 return B_BAD_TEAM_ID; 2803 BReference<Team> teamReference(team, true); 2804 TeamLocker teamLocker(team, true); 2805 2806 if ((flags & B_CHECK_PERMISSION) != 0) { 2807 uid_t uid = geteuid(); 2808 if (uid != 0 && uid != team->effective_uid) 2809 return B_NOT_ALLOWED; 2810 } 2811 2812 bigtime_t kernelTime = 0; 2813 bigtime_t userTime = 0; 2814 2815 switch (who) { 2816 case B_TEAM_USAGE_SELF: 2817 { 2818 Thread* thread = team->thread_list; 2819 2820 for (; thread != NULL; thread = thread->team_next) { 2821 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 2822 kernelTime += thread->kernel_time; 2823 userTime += thread->user_time; 2824 } 2825 2826 kernelTime += team->dead_threads_kernel_time; 2827 userTime += team->dead_threads_user_time; 2828 break; 2829 } 2830 2831 case B_TEAM_USAGE_CHILDREN: 2832 { 2833 Team* child = team->children; 2834 for (; child != NULL; child = child->siblings_next) { 2835 TeamLocker childLocker(child); 2836 2837 Thread* thread = team->thread_list; 2838 2839 for (; thread != NULL; thread = thread->team_next) { 2840 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 2841 kernelTime += thread->kernel_time; 2842 userTime += thread->user_time; 2843 } 2844 2845 kernelTime += child->dead_threads_kernel_time; 2846 userTime += child->dead_threads_user_time; 2847 } 2848 2849 kernelTime += team->dead_children.kernel_time; 2850 userTime += team->dead_children.user_time; 2851 break; 2852 } 2853 } 2854 2855 info->kernel_time = kernelTime; 2856 info->user_time = userTime; 2857 2858 return B_OK; 2859 } 2860 2861 2862 // #pragma mark - Private kernel API 2863 2864 2865 status_t 2866 team_init(kernel_args* args) 2867 { 2868 // create the team hash table 2869 new(&sTeamHash) TeamTable; 2870 if (sTeamHash.Init(64) != B_OK) 2871 panic("Failed to init team hash table!"); 2872 2873 new(&sGroupHash) ProcessGroupHashTable; 2874 if (sGroupHash.Init() != B_OK) 2875 panic("Failed to init process group hash table!"); 2876 2877 // create initial session and process groups 2878 2879 ProcessSession* session = new(std::nothrow) ProcessSession(1); 2880 if (session == NULL) 2881 panic("Could not create initial session.\n"); 2882 BReference<ProcessSession> sessionReference(session, true); 2883 2884 ProcessGroup* group = new(std::nothrow) ProcessGroup(1); 2885 if (group == NULL) 2886 panic("Could not create initial process group.\n"); 2887 BReference<ProcessGroup> groupReference(group, true); 2888 2889 group->Publish(session); 2890 2891 // create the kernel team 2892 sKernelTeam = Team::Create(1, "kernel_team", true); 2893 if (sKernelTeam == NULL) 2894 panic("could not create kernel team!\n"); 2895 2896 sKernelTeam->address_space = VMAddressSpace::Kernel(); 2897 sKernelTeam->SetArgs(sKernelTeam->Name()); 2898 sKernelTeam->state = TEAM_STATE_NORMAL; 2899 2900 sKernelTeam->saved_set_uid = 0; 2901 sKernelTeam->real_uid = 0; 2902 sKernelTeam->effective_uid = 0; 2903 sKernelTeam->saved_set_gid = 0; 2904 sKernelTeam->real_gid = 0; 2905 sKernelTeam->effective_gid = 0; 2906 sKernelTeam->supplementary_groups = NULL; 2907 2908 insert_team_into_group(group, sKernelTeam); 2909 2910 sKernelTeam->io_context = vfs_new_io_context(NULL, false); 2911 if (sKernelTeam->io_context == NULL) 2912 panic("could not create io_context for kernel team!\n"); 2913 2914 if (vfs_resize_fd_table(sKernelTeam->io_context, 4096) != B_OK) 2915 dprintf("Failed to resize FD table for kernel team!\n"); 2916 2917 // stick it in the team hash 2918 sTeamHash.Insert(sKernelTeam); 2919 2920 // check safe mode settings 2921 sDisableUserAddOns = get_safemode_boolean(B_SAFEMODE_DISABLE_USER_ADD_ONS, 2922 false); 2923 2924 add_debugger_command_etc("team", &dump_team_info, 2925 "Dump info about a particular team", 2926 "[ <id> | <address> | <name> ]\n" 2927 "Prints information about the specified team. If no argument is given\n" 2928 "the current team is selected.\n" 2929 " <id> - The ID of the team.\n" 2930 " <address> - The address of the team structure.\n" 2931 " <name> - The team's name.\n", 0); 2932 add_debugger_command_etc("teams", &dump_teams, "List all teams", 2933 "\n" 2934 "Prints a list of all existing teams.\n", 0); 2935 2936 new(&sNotificationService) TeamNotificationService(); 2937 2938 sNotificationService.Register(); 2939 2940 return B_OK; 2941 } 2942 2943 2944 int32 2945 team_max_teams(void) 2946 { 2947 return sMaxTeams; 2948 } 2949 2950 2951 int32 2952 team_used_teams(void) 2953 { 2954 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 2955 return sUsedTeams; 2956 } 2957 2958 2959 /*! Returns a death entry of a child team specified by ID (if any). 2960 The caller must hold the team's lock. 2961 2962 \param team The team whose dead children list to check. 2963 \param child The ID of the child for whose death entry to lock. Must be > 0. 2964 \param _deleteEntry Return variable, indicating whether the caller needs to 2965 delete the returned entry. 2966 \return The death entry of the matching team, or \c NULL, if no death entry 2967 for the team was found. 2968 */ 2969 job_control_entry* 2970 team_get_death_entry(Team* team, thread_id child, bool* _deleteEntry) 2971 { 2972 if (child <= 0) 2973 return NULL; 2974 2975 job_control_entry* entry = get_job_control_entry(team->dead_children, 2976 child); 2977 if (entry) { 2978 // remove the entry only, if the caller is the parent of the found team 2979 if (team_get_current_team_id() == entry->thread) { 2980 team->dead_children.entries.Remove(entry); 2981 team->dead_children.count--; 2982 *_deleteEntry = true; 2983 } else { 2984 *_deleteEntry = false; 2985 } 2986 } 2987 2988 return entry; 2989 } 2990 2991 2992 /*! Quick check to see if we have a valid team ID. */ 2993 bool 2994 team_is_valid(team_id id) 2995 { 2996 if (id <= 0) 2997 return false; 2998 2999 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3000 return team_get_team_struct_locked(id) != NULL; 3001 } 3002 3003 3004 Team* 3005 team_get_team_struct_locked(team_id id) 3006 { 3007 return sTeamHash.Lookup(id); 3008 } 3009 3010 3011 void 3012 team_set_controlling_tty(void* tty) 3013 { 3014 // lock the team, so its session won't change while we're playing with it 3015 Team* team = thread_get_current_thread()->team; 3016 TeamLocker teamLocker(team); 3017 3018 // get and lock the session 3019 ProcessSession* session = team->group->Session(); 3020 AutoLocker<ProcessSession> sessionLocker(session); 3021 3022 // set the session's fields 3023 session->controlling_tty = tty; 3024 session->foreground_group = -1; 3025 } 3026 3027 3028 void* 3029 team_get_controlling_tty() 3030 { 3031 // lock the team, so its session won't change while we're playing with it 3032 Team* team = thread_get_current_thread()->team; 3033 TeamLocker teamLocker(team); 3034 3035 // get and lock the session 3036 ProcessSession* session = team->group->Session(); 3037 AutoLocker<ProcessSession> sessionLocker(session); 3038 3039 // get the session's field 3040 return session->controlling_tty; 3041 } 3042 3043 3044 status_t 3045 team_set_foreground_process_group(void* tty, pid_t processGroupID) 3046 { 3047 // lock the team, so its session won't change while we're playing with it 3048 Thread* thread = thread_get_current_thread(); 3049 Team* team = thread->team; 3050 TeamLocker teamLocker(team); 3051 3052 // get and lock the session 3053 ProcessSession* session = team->group->Session(); 3054 AutoLocker<ProcessSession> sessionLocker(session); 3055 3056 // check given TTY -- must be the controlling tty of the calling process 3057 if (session->controlling_tty != tty) 3058 return ENOTTY; 3059 3060 // check given process group -- must belong to our session 3061 { 3062 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 3063 ProcessGroup* group = sGroupHash.Lookup(processGroupID); 3064 if (group == NULL || group->Session() != session) 3065 return B_BAD_VALUE; 3066 } 3067 3068 // If we are a background group, we can do that unharmed only when we 3069 // ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU. 3070 if (session->foreground_group != -1 3071 && session->foreground_group != team->group_id 3072 && team->SignalActionFor(SIGTTOU).sa_handler != SIG_IGN 3073 && (thread->sig_block_mask & SIGNAL_TO_MASK(SIGTTOU)) == 0) { 3074 InterruptsSpinLocker signalLocker(team->signal_lock); 3075 3076 if (!is_team_signal_blocked(team, SIGTTOU)) { 3077 pid_t groupID = team->group_id; 3078 3079 signalLocker.Unlock(); 3080 sessionLocker.Unlock(); 3081 teamLocker.Unlock(); 3082 3083 Signal signal(SIGTTOU, SI_USER, B_OK, team->id); 3084 send_signal_to_process_group(groupID, signal, 0); 3085 return B_INTERRUPTED; 3086 } 3087 } 3088 3089 session->foreground_group = processGroupID; 3090 3091 return B_OK; 3092 } 3093 3094 3095 uid_t 3096 team_geteuid(team_id id) 3097 { 3098 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3099 Team* team = team_get_team_struct_locked(id); 3100 if (team == NULL) 3101 return (uid_t)-1; 3102 return team->effective_uid; 3103 } 3104 3105 3106 /*! Removes the specified team from the global team hash, from its process 3107 group, and from its parent. 3108 It also moves all of its children to the kernel team. 3109 3110 The caller must hold the following locks: 3111 - \a team's process group's lock, 3112 - the kernel team's lock, 3113 - \a team's parent team's lock (might be the kernel team), and 3114 - \a team's lock. 3115 */ 3116 void 3117 team_remove_team(Team* team, pid_t& _signalGroup) 3118 { 3119 Team* parent = team->parent; 3120 3121 // remember how long this team lasted 3122 parent->dead_children.kernel_time += team->dead_threads_kernel_time 3123 + team->dead_children.kernel_time; 3124 parent->dead_children.user_time += team->dead_threads_user_time 3125 + team->dead_children.user_time; 3126 3127 // remove the team from the hash table 3128 InterruptsWriteSpinLocker teamsLocker(sTeamHashLock); 3129 sTeamHash.Remove(team); 3130 sUsedTeams--; 3131 teamsLocker.Unlock(); 3132 3133 // The team can no longer be accessed by ID. Navigation to it is still 3134 // possible from its process group and its parent and children, but that 3135 // will be rectified shortly. 3136 team->state = TEAM_STATE_DEATH; 3137 3138 // If we're a controlling process (i.e. a session leader with controlling 3139 // terminal), there's a bit of signalling we have to do. We can't do any of 3140 // the signaling here due to the bunch of locks we're holding, but we need 3141 // to determine, whom to signal. 3142 _signalGroup = -1; 3143 bool isSessionLeader = false; 3144 if (team->session_id == team->id 3145 && team->group->Session()->controlling_tty != NULL) { 3146 isSessionLeader = true; 3147 3148 ProcessSession* session = team->group->Session(); 3149 3150 AutoLocker<ProcessSession> sessionLocker(session); 3151 3152 session->controlling_tty = NULL; 3153 _signalGroup = session->foreground_group; 3154 } 3155 3156 // remove us from our process group 3157 remove_team_from_group(team); 3158 3159 // move the team's children to the kernel team 3160 while (Team* child = team->children) { 3161 // remove the child from the current team and add it to the kernel team 3162 TeamLocker childLocker(child); 3163 3164 remove_team_from_parent(team, child); 3165 insert_team_into_parent(sKernelTeam, child); 3166 3167 // move job control entries too 3168 sKernelTeam->stopped_children.entries.MoveFrom( 3169 &team->stopped_children.entries); 3170 sKernelTeam->continued_children.entries.MoveFrom( 3171 &team->continued_children.entries); 3172 3173 // If the team was a session leader with controlling terminal, 3174 // we need to send SIGHUP + SIGCONT to all newly-orphaned process 3175 // groups with stopped processes. Due to locking complications we can't 3176 // do that here, so we only check whether we were a reason for the 3177 // child's process group not being an orphan and, if so, schedule a 3178 // later check (cf. orphaned_process_group_check()). 3179 if (isSessionLeader) { 3180 ProcessGroup* childGroup = child->group; 3181 if (childGroup->Session()->id == team->session_id 3182 && childGroup->id != team->group_id) { 3183 childGroup->ScheduleOrphanedCheck(); 3184 } 3185 } 3186 3187 // Note, we don't move the dead children entries. Those will be deleted 3188 // when the team structure is deleted. 3189 } 3190 3191 // remove us from our parent 3192 remove_team_from_parent(parent, team); 3193 } 3194 3195 3196 /*! Kills all threads but the main thread of the team and shuts down user 3197 debugging for it. 3198 To be called on exit of the team's main thread. No locks must be held. 3199 3200 \param team The team in question. 3201 \return The port of the debugger for the team, -1 if none. To be passed to 3202 team_delete_team(). 3203 */ 3204 port_id 3205 team_shutdown_team(Team* team) 3206 { 3207 ASSERT(thread_get_current_thread() == team->main_thread); 3208 3209 TeamLocker teamLocker(team); 3210 3211 // Make sure debugging changes won't happen anymore. 3212 port_id debuggerPort = -1; 3213 while (true) { 3214 // If a debugger change is in progress for the team, we'll have to 3215 // wait until it is done. 3216 ConditionVariableEntry waitForDebuggerEntry; 3217 bool waitForDebugger = false; 3218 3219 InterruptsSpinLocker debugInfoLocker(team->debug_info.lock); 3220 3221 if (team->debug_info.debugger_changed_condition != NULL) { 3222 team->debug_info.debugger_changed_condition->Add( 3223 &waitForDebuggerEntry); 3224 waitForDebugger = true; 3225 } else if (team->debug_info.flags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) { 3226 // The team is being debugged. That will stop with the termination 3227 // of the nub thread. Since we set the team state to death, no one 3228 // can install a debugger anymore. We fetch the debugger's port to 3229 // send it a message at the bitter end. 3230 debuggerPort = team->debug_info.debugger_port; 3231 } 3232 3233 debugInfoLocker.Unlock(); 3234 3235 if (!waitForDebugger) 3236 break; 3237 3238 // wait for the debugger change to be finished 3239 teamLocker.Unlock(); 3240 3241 waitForDebuggerEntry.Wait(); 3242 3243 teamLocker.Lock(); 3244 } 3245 3246 // Mark the team as shutting down. That will prevent new threads from being 3247 // created and debugger changes from taking place. 3248 team->state = TEAM_STATE_SHUTDOWN; 3249 3250 // delete all timers 3251 team->DeleteUserTimers(false); 3252 3253 // deactivate CPU time user timers for the team 3254 InterruptsSpinLocker timeLocker(team->time_lock); 3255 3256 if (team->HasActiveCPUTimeUserTimers()) 3257 team->DeactivateCPUTimeUserTimers(); 3258 3259 timeLocker.Unlock(); 3260 3261 // kill all threads but the main thread 3262 team_death_entry deathEntry; 3263 deathEntry.condition.Init(team, "team death"); 3264 3265 while (true) { 3266 team->death_entry = &deathEntry; 3267 deathEntry.remaining_threads = 0; 3268 3269 Thread* thread = team->thread_list; 3270 while (thread != NULL) { 3271 if (thread != team->main_thread) { 3272 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id); 3273 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 3274 deathEntry.remaining_threads++; 3275 } 3276 3277 thread = thread->team_next; 3278 } 3279 3280 if (deathEntry.remaining_threads == 0) 3281 break; 3282 3283 // there are threads to wait for 3284 ConditionVariableEntry entry; 3285 deathEntry.condition.Add(&entry); 3286 3287 teamLocker.Unlock(); 3288 3289 entry.Wait(); 3290 3291 teamLocker.Lock(); 3292 } 3293 3294 team->death_entry = NULL; 3295 3296 return debuggerPort; 3297 } 3298 3299 3300 /*! Called on team exit to notify threads waiting on the team and free most 3301 resources associated with it. 3302 The caller shouldn't hold any locks. 3303 */ 3304 void 3305 team_delete_team(Team* team, port_id debuggerPort) 3306 { 3307 // Not quite in our job description, but work that has been left by 3308 // team_remove_team() and that can be done now that we're not holding any 3309 // locks. 3310 orphaned_process_group_check(); 3311 3312 team_id teamID = team->id; 3313 3314 ASSERT(team->num_threads == 0); 3315 3316 // If someone is waiting for this team to be loaded, but it dies 3317 // unexpectedly before being done, we need to notify the waiting 3318 // thread now. 3319 3320 TeamLocker teamLocker(team); 3321 3322 if (team->loading_info != NULL) { 3323 // there's indeed someone waiting 3324 team->loading_info->result = B_ERROR; 3325 3326 // wake up the waiting thread 3327 team->loading_info->condition.NotifyAll(); 3328 team->loading_info = NULL; 3329 } 3330 3331 // notify team watchers 3332 3333 { 3334 // we're not reachable from anyone anymore at this point, so we 3335 // can safely access the list without any locking 3336 struct team_watcher* watcher; 3337 while ((watcher = (struct team_watcher*)list_remove_head_item( 3338 &team->watcher_list)) != NULL) { 3339 watcher->hook(teamID, watcher->data); 3340 free(watcher); 3341 } 3342 } 3343 3344 teamLocker.Unlock(); 3345 3346 sNotificationService.Notify(TEAM_REMOVED, team); 3347 3348 // free team resources 3349 3350 delete_user_mutex_context(team->user_mutex_context); 3351 delete_realtime_sem_context(team->realtime_sem_context); 3352 xsi_sem_undo(team); 3353 remove_images(team); 3354 team->address_space->RemoveAndPut(); 3355 3356 team->ReleaseReference(); 3357 3358 // notify the debugger, that the team is gone 3359 user_debug_team_deleted(teamID, debuggerPort); 3360 } 3361 3362 3363 Team* 3364 team_get_kernel_team(void) 3365 { 3366 return sKernelTeam; 3367 } 3368 3369 3370 team_id 3371 team_get_kernel_team_id(void) 3372 { 3373 if (!sKernelTeam) 3374 return 0; 3375 3376 return sKernelTeam->id; 3377 } 3378 3379 3380 team_id 3381 team_get_current_team_id(void) 3382 { 3383 return thread_get_current_thread()->team->id; 3384 } 3385 3386 3387 status_t 3388 team_get_address_space(team_id id, VMAddressSpace** _addressSpace) 3389 { 3390 if (id == sKernelTeam->id) { 3391 // we're the kernel team, so we don't have to go through all 3392 // the hassle (locking and hash lookup) 3393 *_addressSpace = VMAddressSpace::GetKernel(); 3394 return B_OK; 3395 } 3396 3397 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3398 3399 Team* team = team_get_team_struct_locked(id); 3400 if (team == NULL) 3401 return B_BAD_VALUE; 3402 3403 team->address_space->Get(); 3404 *_addressSpace = team->address_space; 3405 return B_OK; 3406 } 3407 3408 3409 /*! Sets the team's job control state. 3410 The caller must hold the parent team's lock. Interrupts are allowed to be 3411 enabled or disabled. 3412 \a team The team whose job control state shall be set. 3413 \a newState The new state to be set. 3414 \a signal The signal the new state was caused by. Can \c NULL, if none. Then 3415 the caller is responsible for filling in the following fields of the 3416 entry before releasing the parent team's lock, unless the new state is 3417 \c JOB_CONTROL_STATE_NONE: 3418 - \c signal: The number of the signal causing the state change. 3419 - \c signaling_user: The real UID of the user sending the signal. 3420 */ 3421 void 3422 team_set_job_control_state(Team* team, job_control_state newState, 3423 Signal* signal) 3424 { 3425 if (team == NULL || team->job_control_entry == NULL) 3426 return; 3427 3428 // don't touch anything, if the state stays the same or the team is already 3429 // dead 3430 job_control_entry* entry = team->job_control_entry; 3431 if (entry->state == newState || entry->state == JOB_CONTROL_STATE_DEAD) 3432 return; 3433 3434 T(SetJobControlState(team->id, newState, signal)); 3435 3436 // remove from the old list 3437 switch (entry->state) { 3438 case JOB_CONTROL_STATE_NONE: 3439 // entry is in no list ATM 3440 break; 3441 case JOB_CONTROL_STATE_DEAD: 3442 // can't get here 3443 break; 3444 case JOB_CONTROL_STATE_STOPPED: 3445 team->parent->stopped_children.entries.Remove(entry); 3446 break; 3447 case JOB_CONTROL_STATE_CONTINUED: 3448 team->parent->continued_children.entries.Remove(entry); 3449 break; 3450 } 3451 3452 entry->state = newState; 3453 3454 if (signal != NULL) { 3455 entry->signal = signal->Number(); 3456 entry->signaling_user = signal->SendingUser(); 3457 } 3458 3459 // add to new list 3460 team_job_control_children* childList = NULL; 3461 switch (entry->state) { 3462 case JOB_CONTROL_STATE_NONE: 3463 // entry doesn't get into any list 3464 break; 3465 case JOB_CONTROL_STATE_DEAD: 3466 childList = &team->parent->dead_children; 3467 team->parent->dead_children.count++; 3468 break; 3469 case JOB_CONTROL_STATE_STOPPED: 3470 childList = &team->parent->stopped_children; 3471 break; 3472 case JOB_CONTROL_STATE_CONTINUED: 3473 childList = &team->parent->continued_children; 3474 break; 3475 } 3476 3477 if (childList != NULL) { 3478 childList->entries.Add(entry); 3479 team->parent->dead_children.condition_variable.NotifyAll(); 3480 } 3481 } 3482 3483 3484 /*! Inits the given team's exit information, if not yet initialized, to some 3485 generic "killed" status. 3486 The caller must not hold the team's lock. Interrupts must be enabled. 3487 3488 \param team The team whose exit info shall be initialized. 3489 */ 3490 void 3491 team_init_exit_info_on_error(Team* team) 3492 { 3493 TeamLocker teamLocker(team); 3494 3495 if (!team->exit.initialized) { 3496 team->exit.reason = CLD_KILLED; 3497 team->exit.signal = SIGKILL; 3498 team->exit.signaling_user = geteuid(); 3499 team->exit.status = 0; 3500 team->exit.initialized = true; 3501 } 3502 } 3503 3504 3505 /*! Adds a hook to the team that is called as soon as this team goes away. 3506 This call might get public in the future. 3507 */ 3508 status_t 3509 start_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data) 3510 { 3511 if (hook == NULL || teamID < B_OK) 3512 return B_BAD_VALUE; 3513 3514 // create the watcher object 3515 team_watcher* watcher = (team_watcher*)malloc(sizeof(team_watcher)); 3516 if (watcher == NULL) 3517 return B_NO_MEMORY; 3518 3519 watcher->hook = hook; 3520 watcher->data = data; 3521 3522 // add watcher, if the team isn't already dying 3523 // get the team 3524 Team* team = Team::GetAndLock(teamID); 3525 if (team == NULL) { 3526 free(watcher); 3527 return B_BAD_TEAM_ID; 3528 } 3529 3530 list_add_item(&team->watcher_list, watcher); 3531 3532 team->UnlockAndReleaseReference(); 3533 3534 return B_OK; 3535 } 3536 3537 3538 status_t 3539 stop_watching_team(team_id teamID, void (*hook)(team_id, void*), void* data) 3540 { 3541 if (hook == NULL || teamID < 0) 3542 return B_BAD_VALUE; 3543 3544 // get team and remove watcher (if present) 3545 Team* team = Team::GetAndLock(teamID); 3546 if (team == NULL) 3547 return B_BAD_TEAM_ID; 3548 3549 // search for watcher 3550 team_watcher* watcher = NULL; 3551 while ((watcher = (team_watcher*)list_get_next_item( 3552 &team->watcher_list, watcher)) != NULL) { 3553 if (watcher->hook == hook && watcher->data == data) { 3554 // got it! 3555 list_remove_item(&team->watcher_list, watcher); 3556 break; 3557 } 3558 } 3559 3560 team->UnlockAndReleaseReference(); 3561 3562 if (watcher == NULL) 3563 return B_ENTRY_NOT_FOUND; 3564 3565 free(watcher); 3566 return B_OK; 3567 } 3568 3569 3570 /*! Allocates a user_thread structure from the team. 3571 The team lock must be held, unless the function is called for the team's 3572 main thread. Interrupts must be enabled. 3573 */ 3574 struct user_thread* 3575 team_allocate_user_thread(Team* team) 3576 { 3577 if (team->user_data == 0) 3578 return NULL; 3579 3580 // take an entry from the free list, if any 3581 if (struct free_user_thread* entry = team->free_user_threads) { 3582 user_thread* thread = entry->thread; 3583 team->free_user_threads = entry->next; 3584 free(entry); 3585 return thread; 3586 } 3587 3588 while (true) { 3589 // enough space left? 3590 size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE); 3591 if (team->user_data_size - team->used_user_data < needed) { 3592 // try to resize the area 3593 if (resize_area(team->user_data_area, 3594 team->user_data_size + B_PAGE_SIZE) != B_OK) { 3595 return NULL; 3596 } 3597 3598 // resized user area successfully -- try to allocate the user_thread 3599 // again 3600 team->user_data_size += B_PAGE_SIZE; 3601 continue; 3602 } 3603 3604 // allocate the user_thread 3605 user_thread* thread 3606 = (user_thread*)(team->user_data + team->used_user_data); 3607 team->used_user_data += needed; 3608 3609 return thread; 3610 } 3611 } 3612 3613 3614 /*! Frees the given user_thread structure. 3615 The team's lock must not be held. Interrupts must be enabled. 3616 \param team The team the user thread was allocated from. 3617 \param userThread The user thread to free. 3618 */ 3619 void 3620 team_free_user_thread(Team* team, struct user_thread* userThread) 3621 { 3622 if (userThread == NULL) 3623 return; 3624 3625 // create a free list entry 3626 free_user_thread* entry 3627 = (free_user_thread*)malloc(sizeof(free_user_thread)); 3628 if (entry == NULL) { 3629 // we have to leak the user thread :-/ 3630 return; 3631 } 3632 3633 // add to free list 3634 TeamLocker teamLocker(team); 3635 3636 entry->thread = userThread; 3637 entry->next = team->free_user_threads; 3638 team->free_user_threads = entry; 3639 } 3640 3641 3642 // #pragma mark - Associated data interface 3643 3644 3645 AssociatedData::AssociatedData() 3646 : 3647 fOwner(NULL) 3648 { 3649 } 3650 3651 3652 AssociatedData::~AssociatedData() 3653 { 3654 } 3655 3656 3657 void 3658 AssociatedData::OwnerDeleted(AssociatedDataOwner* owner) 3659 { 3660 } 3661 3662 3663 AssociatedDataOwner::AssociatedDataOwner() 3664 { 3665 mutex_init(&fLock, "associated data owner"); 3666 } 3667 3668 3669 AssociatedDataOwner::~AssociatedDataOwner() 3670 { 3671 mutex_destroy(&fLock); 3672 } 3673 3674 3675 bool 3676 AssociatedDataOwner::AddData(AssociatedData* data) 3677 { 3678 MutexLocker locker(fLock); 3679 3680 if (data->Owner() != NULL) 3681 return false; 3682 3683 data->AcquireReference(); 3684 fList.Add(data); 3685 data->SetOwner(this); 3686 3687 return true; 3688 } 3689 3690 3691 bool 3692 AssociatedDataOwner::RemoveData(AssociatedData* data) 3693 { 3694 MutexLocker locker(fLock); 3695 3696 if (data->Owner() != this) 3697 return false; 3698 3699 data->SetOwner(NULL); 3700 fList.Remove(data); 3701 3702 locker.Unlock(); 3703 3704 data->ReleaseReference(); 3705 3706 return true; 3707 } 3708 3709 3710 void 3711 AssociatedDataOwner::PrepareForDeletion() 3712 { 3713 MutexLocker locker(fLock); 3714 3715 // move all data to a temporary list and unset the owner 3716 DataList list; 3717 list.MoveFrom(&fList); 3718 3719 for (DataList::Iterator it = list.GetIterator(); 3720 AssociatedData* data = it.Next();) { 3721 data->SetOwner(NULL); 3722 } 3723 3724 locker.Unlock(); 3725 3726 // call the notification hooks and release our references 3727 while (AssociatedData* data = list.RemoveHead()) { 3728 data->OwnerDeleted(this); 3729 data->ReleaseReference(); 3730 } 3731 } 3732 3733 3734 /*! Associates data with the current team. 3735 When the team is deleted, the data object is notified. 3736 The team acquires a reference to the object. 3737 3738 \param data The data object. 3739 \return \c true on success, \c false otherwise. Fails only when the supplied 3740 data object is already associated with another owner. 3741 */ 3742 bool 3743 team_associate_data(AssociatedData* data) 3744 { 3745 return thread_get_current_thread()->team->AddData(data); 3746 } 3747 3748 3749 /*! Dissociates data from the current team. 3750 Balances an earlier call to team_associate_data(). 3751 3752 \param data The data object. 3753 \return \c true on success, \c false otherwise. Fails only when the data 3754 object is not associated with the current team. 3755 */ 3756 bool 3757 team_dissociate_data(AssociatedData* data) 3758 { 3759 return thread_get_current_thread()->team->RemoveData(data); 3760 } 3761 3762 3763 // #pragma mark - Public kernel API 3764 3765 3766 thread_id 3767 load_image(int32 argCount, const char** args, const char** env) 3768 { 3769 return load_image_etc(argCount, args, env, B_NORMAL_PRIORITY, 3770 B_CURRENT_TEAM, B_WAIT_TILL_LOADED); 3771 } 3772 3773 3774 thread_id 3775 load_image_etc(int32 argCount, const char* const* args, 3776 const char* const* env, int32 priority, team_id parentID, uint32 flags) 3777 { 3778 // we need to flatten the args and environment 3779 3780 if (args == NULL) 3781 return B_BAD_VALUE; 3782 3783 // determine total needed size 3784 int32 argSize = 0; 3785 for (int32 i = 0; i < argCount; i++) 3786 argSize += strlen(args[i]) + 1; 3787 3788 int32 envCount = 0; 3789 int32 envSize = 0; 3790 while (env != NULL && env[envCount] != NULL) 3791 envSize += strlen(env[envCount++]) + 1; 3792 3793 int32 size = (argCount + envCount + 2) * sizeof(char*) + argSize + envSize; 3794 if (size > MAX_PROCESS_ARGS_SIZE) 3795 return B_TOO_MANY_ARGS; 3796 3797 // allocate space 3798 char** flatArgs = (char**)malloc(size); 3799 if (flatArgs == NULL) 3800 return B_NO_MEMORY; 3801 3802 char** slot = flatArgs; 3803 char* stringSpace = (char*)(flatArgs + argCount + envCount + 2); 3804 3805 // copy arguments and environment 3806 for (int32 i = 0; i < argCount; i++) { 3807 int32 argSize = strlen(args[i]) + 1; 3808 memcpy(stringSpace, args[i], argSize); 3809 *slot++ = stringSpace; 3810 stringSpace += argSize; 3811 } 3812 3813 *slot++ = NULL; 3814 3815 for (int32 i = 0; i < envCount; i++) { 3816 int32 envSize = strlen(env[i]) + 1; 3817 memcpy(stringSpace, env[i], envSize); 3818 *slot++ = stringSpace; 3819 stringSpace += envSize; 3820 } 3821 3822 *slot++ = NULL; 3823 3824 thread_id thread = load_image_internal(flatArgs, size, argCount, envCount, 3825 B_NORMAL_PRIORITY, parentID, B_WAIT_TILL_LOADED, -1, 0); 3826 3827 free(flatArgs); 3828 // load_image_internal() unset our variable if it took over ownership 3829 3830 return thread; 3831 } 3832 3833 3834 status_t 3835 wait_for_team(team_id id, status_t* _returnCode) 3836 { 3837 // check whether the team exists 3838 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3839 3840 Team* team = team_get_team_struct_locked(id); 3841 if (team == NULL) 3842 return B_BAD_TEAM_ID; 3843 3844 id = team->id; 3845 3846 teamsLocker.Unlock(); 3847 3848 // wait for the main thread (it has the same ID as the team) 3849 return wait_for_thread(id, _returnCode); 3850 } 3851 3852 3853 status_t 3854 kill_team(team_id id) 3855 { 3856 InterruptsReadSpinLocker teamsLocker(sTeamHashLock); 3857 3858 Team* team = team_get_team_struct_locked(id); 3859 if (team == NULL) 3860 return B_BAD_TEAM_ID; 3861 3862 id = team->id; 3863 3864 teamsLocker.Unlock(); 3865 3866 if (team == sKernelTeam) 3867 return B_NOT_ALLOWED; 3868 3869 // Just kill the team's main thread (it has same ID as the team). The 3870 // cleanup code there will take care of the team. 3871 return kill_thread(id); 3872 } 3873 3874 3875 status_t 3876 _get_team_info(team_id id, team_info* info, size_t size) 3877 { 3878 // get the team 3879 Team* team = Team::Get(id); 3880 if (team == NULL) 3881 return B_BAD_TEAM_ID; 3882 BReference<Team> teamReference(team, true); 3883 3884 // fill in the info 3885 return fill_team_info(team, info, size); 3886 } 3887 3888 3889 status_t 3890 _get_next_team_info(int32* cookie, team_info* info, size_t size) 3891 { 3892 int32 slot = *cookie; 3893 if (slot < 1) 3894 slot = 1; 3895 3896 InterruptsReadSpinLocker locker(sTeamHashLock); 3897 3898 team_id lastTeamID = peek_next_thread_id(); 3899 // TODO: This is broken, since the id can wrap around! 3900 3901 // get next valid team 3902 Team* team = NULL; 3903 while (slot < lastTeamID && !(team = team_get_team_struct_locked(slot))) 3904 slot++; 3905 3906 if (team == NULL) 3907 return B_BAD_TEAM_ID; 3908 3909 // get a reference to the team and unlock 3910 BReference<Team> teamReference(team); 3911 locker.Unlock(); 3912 3913 // fill in the info 3914 *cookie = ++slot; 3915 return fill_team_info(team, info, size); 3916 } 3917 3918 3919 status_t 3920 _get_team_usage_info(team_id id, int32 who, team_usage_info* info, size_t size) 3921 { 3922 if (size != sizeof(team_usage_info)) 3923 return B_BAD_VALUE; 3924 3925 return common_get_team_usage_info(id, who, info, 0); 3926 } 3927 3928 3929 pid_t 3930 getpid(void) 3931 { 3932 return thread_get_current_thread()->team->id; 3933 } 3934 3935 3936 pid_t 3937 getppid() 3938 { 3939 return _getppid(0); 3940 } 3941 3942 3943 pid_t 3944 getpgid(pid_t id) 3945 { 3946 if (id < 0) { 3947 errno = EINVAL; 3948 return -1; 3949 } 3950 3951 if (id == 0) { 3952 // get process group of the calling process 3953 Team* team = thread_get_current_thread()->team; 3954 TeamLocker teamLocker(team); 3955 return team->group_id; 3956 } 3957 3958 // get the team 3959 Team* team = Team::GetAndLock(id); 3960 if (team == NULL) { 3961 errno = ESRCH; 3962 return -1; 3963 } 3964 3965 // get the team's process group ID 3966 pid_t groupID = team->group_id; 3967 3968 team->UnlockAndReleaseReference(); 3969 3970 return groupID; 3971 } 3972 3973 3974 pid_t 3975 getsid(pid_t id) 3976 { 3977 if (id < 0) { 3978 errno = EINVAL; 3979 return -1; 3980 } 3981 3982 if (id == 0) { 3983 // get session of the calling process 3984 Team* team = thread_get_current_thread()->team; 3985 TeamLocker teamLocker(team); 3986 return team->session_id; 3987 } 3988 3989 // get the team 3990 Team* team = Team::GetAndLock(id); 3991 if (team == NULL) { 3992 errno = ESRCH; 3993 return -1; 3994 } 3995 3996 // get the team's session ID 3997 pid_t sessionID = team->session_id; 3998 3999 team->UnlockAndReleaseReference(); 4000 4001 return sessionID; 4002 } 4003 4004 4005 // #pragma mark - User syscalls 4006 4007 4008 status_t 4009 _user_exec(const char* userPath, const char* const* userFlatArgs, 4010 size_t flatArgsSize, int32 argCount, int32 envCount, mode_t umask) 4011 { 4012 // NOTE: Since this function normally doesn't return, don't use automatic 4013 // variables that need destruction in the function scope. 4014 char path[B_PATH_NAME_LENGTH]; 4015 4016 if (!IS_USER_ADDRESS(userPath) || !IS_USER_ADDRESS(userFlatArgs) 4017 || user_strlcpy(path, userPath, sizeof(path)) < B_OK) 4018 return B_BAD_ADDRESS; 4019 4020 // copy and relocate the flat arguments 4021 char** flatArgs; 4022 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize, 4023 argCount, envCount, flatArgs); 4024 4025 if (error == B_OK) { 4026 error = exec_team(path, flatArgs, _ALIGN(flatArgsSize), argCount, 4027 envCount, umask); 4028 // this one only returns in case of error 4029 } 4030 4031 free(flatArgs); 4032 return error; 4033 } 4034 4035 4036 thread_id 4037 _user_fork(void) 4038 { 4039 return fork_team(); 4040 } 4041 4042 4043 pid_t 4044 _user_wait_for_child(thread_id child, uint32 flags, siginfo_t* userInfo, 4045 team_usage_info* usageInfo) 4046 { 4047 if (userInfo != NULL && !IS_USER_ADDRESS(userInfo)) 4048 return B_BAD_ADDRESS; 4049 if (usageInfo != NULL && !IS_USER_ADDRESS(usageInfo)) 4050 return B_BAD_ADDRESS; 4051 4052 siginfo_t info; 4053 team_usage_info usage_info; 4054 pid_t foundChild = wait_for_child(child, flags, info, usage_info); 4055 if (foundChild < 0) 4056 return syscall_restart_handle_post(foundChild); 4057 4058 // copy info back to userland 4059 if (userInfo != NULL && user_memcpy(userInfo, &info, sizeof(info)) != B_OK) 4060 return B_BAD_ADDRESS; 4061 // copy usage_info back to userland 4062 if (usageInfo != NULL && user_memcpy(usageInfo, &usage_info, 4063 sizeof(usage_info)) != B_OK) { 4064 return B_BAD_ADDRESS; 4065 } 4066 4067 return foundChild; 4068 } 4069 4070 4071 pid_t 4072 _user_process_info(pid_t process, int32 which) 4073 { 4074 pid_t result; 4075 switch (which) { 4076 case SESSION_ID: 4077 result = getsid(process); 4078 break; 4079 case GROUP_ID: 4080 result = getpgid(process); 4081 break; 4082 case PARENT_ID: 4083 result = _getppid(process); 4084 break; 4085 default: 4086 return B_BAD_VALUE; 4087 } 4088 4089 return result >= 0 ? result : errno; 4090 } 4091 4092 4093 pid_t 4094 _user_setpgid(pid_t processID, pid_t groupID) 4095 { 4096 // setpgid() can be called either by the parent of the target process or 4097 // by the process itself to do one of two things: 4098 // * Create a new process group with the target process' ID and the target 4099 // process as group leader. 4100 // * Set the target process' process group to an already existing one in the 4101 // same session. 4102 4103 if (groupID < 0) 4104 return B_BAD_VALUE; 4105 4106 Team* currentTeam = thread_get_current_thread()->team; 4107 if (processID == 0) 4108 processID = currentTeam->id; 4109 4110 // if the group ID is not specified, use the target process' ID 4111 if (groupID == 0) 4112 groupID = processID; 4113 4114 // We loop when running into the following race condition: We create a new 4115 // process group, because there isn't one with that ID yet, but later when 4116 // trying to publish it, we find that someone else created and published 4117 // a group with that ID in the meantime. In that case we just restart the 4118 // whole action. 4119 while (true) { 4120 // Look up the process group by ID. If it doesn't exist yet and we are 4121 // allowed to create a new one, do that. 4122 ProcessGroup* group = ProcessGroup::Get(groupID); 4123 bool newGroup = false; 4124 if (group == NULL) { 4125 if (groupID != processID) 4126 return B_NOT_ALLOWED; 4127 4128 group = new(std::nothrow) ProcessGroup(groupID); 4129 if (group == NULL) 4130 return B_NO_MEMORY; 4131 4132 newGroup = true; 4133 } 4134 BReference<ProcessGroup> groupReference(group, true); 4135 4136 // get the target team 4137 Team* team = Team::Get(processID); 4138 if (team == NULL) 4139 return ESRCH; 4140 BReference<Team> teamReference(team, true); 4141 4142 // lock the new process group and the team's current process group 4143 while (true) { 4144 // lock the team's current process group 4145 team->LockProcessGroup(); 4146 4147 ProcessGroup* oldGroup = team->group; 4148 if (oldGroup == NULL) { 4149 // This can only happen if the team is exiting. 4150 ASSERT(team->state >= TEAM_STATE_SHUTDOWN); 4151 return ESRCH; 4152 } 4153 4154 if (oldGroup == group) { 4155 // it's the same as the target group, so just bail out 4156 oldGroup->Unlock(); 4157 return group->id; 4158 } 4159 4160 oldGroup->AcquireReference(); 4161 4162 // lock the target process group, if locking order allows it 4163 if (newGroup || group->id > oldGroup->id) { 4164 group->Lock(); 4165 break; 4166 } 4167 4168 // try to lock 4169 if (group->TryLock()) 4170 break; 4171 4172 // no dice -- unlock the team's current process group and relock in 4173 // the correct order 4174 oldGroup->Unlock(); 4175 4176 group->Lock(); 4177 oldGroup->Lock(); 4178 4179 // check whether things are still the same 4180 TeamLocker teamLocker(team); 4181 if (team->group == oldGroup) 4182 break; 4183 4184 // something changed -- unlock everything and retry 4185 teamLocker.Unlock(); 4186 oldGroup->Unlock(); 4187 group->Unlock(); 4188 oldGroup->ReleaseReference(); 4189 } 4190 4191 // we now have references and locks of both new and old process group 4192 BReference<ProcessGroup> oldGroupReference(team->group, true); 4193 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true); 4194 AutoLocker<ProcessGroup> groupLocker(group, true); 4195 4196 // also lock the target team and its parent 4197 team->LockTeamAndParent(false); 4198 TeamLocker parentLocker(team->parent, true); 4199 TeamLocker teamLocker(team, true); 4200 4201 // perform the checks 4202 if (team == currentTeam) { 4203 // we set our own group 4204 4205 // we must not change our process group ID if we're a session leader 4206 if (is_session_leader(currentTeam)) 4207 return B_NOT_ALLOWED; 4208 } else { 4209 // Calling team != target team. The target team must be a child of 4210 // the calling team and in the same session. (If that's the case it 4211 // isn't a session leader either.) 4212 if (team->parent != currentTeam 4213 || team->session_id != currentTeam->session_id) { 4214 return B_NOT_ALLOWED; 4215 } 4216 4217 // The call is also supposed to fail on a child, when the child has 4218 // already executed exec*() [EACCES]. 4219 if ((team->flags & TEAM_FLAG_EXEC_DONE) != 0) 4220 return EACCES; 4221 } 4222 4223 // If we created a new process group, publish it now. 4224 if (newGroup) { 4225 InterruptsSpinLocker groupHashLocker(sGroupHashLock); 4226 if (sGroupHash.Lookup(groupID)) { 4227 // A group with the group ID appeared since we first checked. 4228 // Back to square one. 4229 continue; 4230 } 4231 4232 group->PublishLocked(team->group->Session()); 4233 } else if (group->Session()->id != team->session_id) { 4234 // The existing target process group belongs to a different session. 4235 // That's not allowed. 4236 return B_NOT_ALLOWED; 4237 } 4238 4239 // Everything is ready -- set the group. 4240 remove_team_from_group(team); 4241 insert_team_into_group(group, team); 4242 4243 // Changing the process group might have changed the situation for a 4244 // parent waiting in wait_for_child(). Hence we notify it. 4245 team->parent->dead_children.condition_variable.NotifyAll(); 4246 4247 return group->id; 4248 } 4249 } 4250 4251 4252 pid_t 4253 _user_setsid(void) 4254 { 4255 Team* team = thread_get_current_thread()->team; 4256 4257 // create a new process group and session 4258 ProcessGroup* group = new(std::nothrow) ProcessGroup(team->id); 4259 if (group == NULL) 4260 return B_NO_MEMORY; 4261 BReference<ProcessGroup> groupReference(group, true); 4262 AutoLocker<ProcessGroup> groupLocker(group); 4263 4264 ProcessSession* session = new(std::nothrow) ProcessSession(group->id); 4265 if (session == NULL) 4266 return B_NO_MEMORY; 4267 BReference<ProcessSession> sessionReference(session, true); 4268 4269 // lock the team's current process group, parent, and the team itself 4270 team->LockTeamParentAndProcessGroup(); 4271 BReference<ProcessGroup> oldGroupReference(team->group); 4272 AutoLocker<ProcessGroup> oldGroupLocker(team->group, true); 4273 TeamLocker parentLocker(team->parent, true); 4274 TeamLocker teamLocker(team, true); 4275 4276 // the team must not already be a process group leader 4277 if (is_process_group_leader(team)) 4278 return B_NOT_ALLOWED; 4279 4280 // remove the team from the old and add it to the new process group 4281 remove_team_from_group(team); 4282 group->Publish(session); 4283 insert_team_into_group(group, team); 4284 4285 // Changing the process group might have changed the situation for a 4286 // parent waiting in wait_for_child(). Hence we notify it. 4287 team->parent->dead_children.condition_variable.NotifyAll(); 4288 4289 return group->id; 4290 } 4291 4292 4293 status_t 4294 _user_wait_for_team(team_id id, status_t* _userReturnCode) 4295 { 4296 status_t returnCode; 4297 status_t status; 4298 4299 if (_userReturnCode != NULL && !IS_USER_ADDRESS(_userReturnCode)) 4300 return B_BAD_ADDRESS; 4301 4302 status = wait_for_team(id, &returnCode); 4303 if (status >= B_OK && _userReturnCode != NULL) { 4304 if (user_memcpy(_userReturnCode, &returnCode, sizeof(returnCode)) 4305 != B_OK) 4306 return B_BAD_ADDRESS; 4307 return B_OK; 4308 } 4309 4310 return syscall_restart_handle_post(status); 4311 } 4312 4313 4314 thread_id 4315 _user_load_image(const char* const* userFlatArgs, size_t flatArgsSize, 4316 int32 argCount, int32 envCount, int32 priority, uint32 flags, 4317 port_id errorPort, uint32 errorToken) 4318 { 4319 TRACE(("_user_load_image: argc = %" B_PRId32 "\n", argCount)); 4320 4321 if (argCount < 1) 4322 return B_BAD_VALUE; 4323 4324 // copy and relocate the flat arguments 4325 char** flatArgs; 4326 status_t error = copy_user_process_args(userFlatArgs, flatArgsSize, 4327 argCount, envCount, flatArgs); 4328 if (error != B_OK) 4329 return error; 4330 4331 thread_id thread = load_image_internal(flatArgs, _ALIGN(flatArgsSize), 4332 argCount, envCount, priority, B_CURRENT_TEAM, flags, errorPort, 4333 errorToken); 4334 4335 free(flatArgs); 4336 // load_image_internal() unset our variable if it took over ownership 4337 4338 return thread; 4339 } 4340 4341 4342 void 4343 _user_exit_team(status_t returnValue) 4344 { 4345 Thread* thread = thread_get_current_thread(); 4346 Team* team = thread->team; 4347 4348 // set this thread's exit status 4349 thread->exit.status = returnValue; 4350 4351 // set the team exit status 4352 TeamLocker teamLocker(team); 4353 4354 if (!team->exit.initialized) { 4355 team->exit.reason = CLD_EXITED; 4356 team->exit.signal = 0; 4357 team->exit.signaling_user = 0; 4358 team->exit.status = returnValue; 4359 team->exit.initialized = true; 4360 } 4361 4362 teamLocker.Unlock(); 4363 4364 // Stop the thread, if the team is being debugged and that has been 4365 // requested. 4366 if ((atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_PREVENT_EXIT) != 0) 4367 user_debug_stop_thread(); 4368 4369 // Send this thread a SIGKILL. This makes sure the thread will not return to 4370 // userland. The signal handling code forwards the signal to the main 4371 // thread (if that's not already this one), which will take the team down. 4372 Signal signal(SIGKILL, SI_USER, B_OK, team->id); 4373 send_signal_to_thread(thread, signal, 0); 4374 } 4375 4376 4377 status_t 4378 _user_kill_team(team_id team) 4379 { 4380 return kill_team(team); 4381 } 4382 4383 4384 status_t 4385 _user_get_team_info(team_id id, team_info* userInfo, size_t size) 4386 { 4387 status_t status; 4388 team_info info; 4389 4390 if (size > sizeof(team_info)) 4391 return B_BAD_VALUE; 4392 4393 if (!IS_USER_ADDRESS(userInfo)) 4394 return B_BAD_ADDRESS; 4395 4396 status = _get_team_info(id, &info, size); 4397 if (status == B_OK) { 4398 if (user_memcpy(userInfo, &info, size) < B_OK) 4399 return B_BAD_ADDRESS; 4400 } 4401 4402 return status; 4403 } 4404 4405 4406 status_t 4407 _user_get_next_team_info(int32* userCookie, team_info* userInfo, size_t size) 4408 { 4409 status_t status; 4410 team_info info; 4411 int32 cookie; 4412 4413 if (size > sizeof(team_info)) 4414 return B_BAD_VALUE; 4415 4416 if (!IS_USER_ADDRESS(userCookie) 4417 || !IS_USER_ADDRESS(userInfo) 4418 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 4419 return B_BAD_ADDRESS; 4420 4421 status = _get_next_team_info(&cookie, &info, size); 4422 if (status != B_OK) 4423 return status; 4424 4425 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 4426 || user_memcpy(userInfo, &info, size) < B_OK) 4427 return B_BAD_ADDRESS; 4428 4429 return status; 4430 } 4431 4432 4433 team_id 4434 _user_get_current_team(void) 4435 { 4436 return team_get_current_team_id(); 4437 } 4438 4439 4440 status_t 4441 _user_get_team_usage_info(team_id team, int32 who, team_usage_info* userInfo, 4442 size_t size) 4443 { 4444 if (size != sizeof(team_usage_info)) 4445 return B_BAD_VALUE; 4446 4447 team_usage_info info; 4448 status_t status = common_get_team_usage_info(team, who, &info, 4449 B_CHECK_PERMISSION); 4450 4451 if (userInfo == NULL || !IS_USER_ADDRESS(userInfo) 4452 || user_memcpy(userInfo, &info, size) != B_OK) { 4453 return B_BAD_ADDRESS; 4454 } 4455 4456 return status; 4457 } 4458 4459 4460 status_t 4461 _user_get_extended_team_info(team_id teamID, uint32 flags, void* buffer, 4462 size_t size, size_t* _sizeNeeded) 4463 { 4464 // check parameters 4465 if ((buffer != NULL && !IS_USER_ADDRESS(buffer)) 4466 || (buffer == NULL && size > 0) 4467 || _sizeNeeded == NULL || !IS_USER_ADDRESS(_sizeNeeded)) { 4468 return B_BAD_ADDRESS; 4469 } 4470 4471 KMessage info; 4472 4473 if ((flags & B_TEAM_INFO_BASIC) != 0) { 4474 // allocate memory for a copy of the needed team data 4475 struct ExtendedTeamData { 4476 team_id id; 4477 pid_t group_id; 4478 pid_t session_id; 4479 uid_t real_uid; 4480 gid_t real_gid; 4481 uid_t effective_uid; 4482 gid_t effective_gid; 4483 char name[B_OS_NAME_LENGTH]; 4484 } teamClone; 4485 4486 io_context* ioContext; 4487 { 4488 // get the team structure 4489 Team* team = Team::GetAndLock(teamID); 4490 if (team == NULL) 4491 return B_BAD_TEAM_ID; 4492 BReference<Team> teamReference(team, true); 4493 TeamLocker teamLocker(team, true); 4494 4495 // copy the data 4496 teamClone.id = team->id; 4497 strlcpy(teamClone.name, team->Name(), sizeof(teamClone.name)); 4498 teamClone.group_id = team->group_id; 4499 teamClone.session_id = team->session_id; 4500 teamClone.real_uid = team->real_uid; 4501 teamClone.real_gid = team->real_gid; 4502 teamClone.effective_uid = team->effective_uid; 4503 teamClone.effective_gid = team->effective_gid; 4504 4505 // also fetch a reference to the I/O context 4506 ioContext = team->io_context; 4507 vfs_get_io_context(ioContext); 4508 } 4509 CObjectDeleter<io_context, void, vfs_put_io_context> 4510 ioContextPutter(ioContext); 4511 4512 // add the basic data to the info message 4513 if (info.AddInt32("id", teamClone.id) != B_OK 4514 || info.AddString("name", teamClone.name) != B_OK 4515 || info.AddInt32("process group", teamClone.group_id) != B_OK 4516 || info.AddInt32("session", teamClone.session_id) != B_OK 4517 || info.AddInt32("uid", teamClone.real_uid) != B_OK 4518 || info.AddInt32("gid", teamClone.real_gid) != B_OK 4519 || info.AddInt32("euid", teamClone.effective_uid) != B_OK 4520 || info.AddInt32("egid", teamClone.effective_gid) != B_OK) { 4521 return B_NO_MEMORY; 4522 } 4523 4524 // get the current working directory from the I/O context 4525 dev_t cwdDevice; 4526 ino_t cwdDirectory; 4527 { 4528 MutexLocker ioContextLocker(ioContext->io_mutex); 4529 vfs_vnode_to_node_ref(ioContext->cwd, &cwdDevice, &cwdDirectory); 4530 } 4531 4532 if (info.AddInt32("cwd device", cwdDevice) != B_OK 4533 || info.AddInt64("cwd directory", cwdDirectory) != B_OK) { 4534 return B_NO_MEMORY; 4535 } 4536 } 4537 4538 // TODO: Support the other flags! 4539 4540 // copy the needed size and, if it fits, the message back to userland 4541 size_t sizeNeeded = info.ContentSize(); 4542 if (user_memcpy(_sizeNeeded, &sizeNeeded, sizeof(sizeNeeded)) != B_OK) 4543 return B_BAD_ADDRESS; 4544 4545 if (sizeNeeded > size) 4546 return B_BUFFER_OVERFLOW; 4547 4548 if (user_memcpy(buffer, info.Buffer(), sizeNeeded) != B_OK) 4549 return B_BAD_ADDRESS; 4550 4551 return B_OK; 4552 } 4553