1 /* 2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com. 3 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 /*! Threading routines */ 13 14 15 #include <thread.h> 16 17 #include <errno.h> 18 #include <malloc.h> 19 #include <stdio.h> 20 #include <stdlib.h> 21 #include <string.h> 22 #include <sys/resource.h> 23 24 #include <algorithm> 25 26 #include <OS.h> 27 28 #include <util/AutoLock.h> 29 30 #include <arch/debug.h> 31 #include <boot/kernel_args.h> 32 #include <condition_variable.h> 33 #include <cpu.h> 34 #include <int.h> 35 #include <kimage.h> 36 #include <kscheduler.h> 37 #include <ksignal.h> 38 #include <Notifications.h> 39 #include <real_time_clock.h> 40 #include <slab/Slab.h> 41 #include <smp.h> 42 #include <syscalls.h> 43 #include <syscall_restart.h> 44 #include <team.h> 45 #include <tls.h> 46 #include <user_runtime.h> 47 #include <user_thread.h> 48 #include <vfs.h> 49 #include <vm/vm.h> 50 #include <vm/VMAddressSpace.h> 51 #include <wait_for_objects.h> 52 53 #include "TeamThreadTables.h" 54 55 56 //#define TRACE_THREAD 57 #ifdef TRACE_THREAD 58 # define TRACE(x) dprintf x 59 #else 60 # define TRACE(x) ; 61 #endif 62 63 64 #define THREAD_MAX_MESSAGE_SIZE 65536 65 66 67 // #pragma mark - ThreadHashTable 68 69 70 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable; 71 72 73 // thread list 74 static Thread sIdleThreads[SMP_MAX_CPUS]; 75 static ThreadHashTable sThreadHash; 76 static rw_spinlock sThreadHashLock = B_RW_SPINLOCK_INITIALIZER; 77 static thread_id sNextThreadID = 2; 78 // ID 1 is allocated for the kernel by Team::Team() behind our back 79 80 // some arbitrarily chosen limits -- should probably depend on the available 81 // memory 82 static int32 sMaxThreads = 4096; 83 static int32 sUsedThreads = 0; 84 85 spinlock gThreadCreationLock = B_SPINLOCK_INITIALIZER; 86 87 88 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> { 89 Thread* thread; 90 team_id teamID; 91 92 UndertakerEntry(Thread* thread, team_id teamID) 93 : 94 thread(thread), 95 teamID(teamID) 96 { 97 } 98 }; 99 100 101 struct ThreadEntryArguments { 102 status_t (*kernelFunction)(void* argument); 103 void* argument; 104 bool enterUserland; 105 }; 106 107 struct UserThreadEntryArguments : ThreadEntryArguments { 108 addr_t userlandEntry; 109 void* userlandArgument1; 110 void* userlandArgument2; 111 pthread_t pthread; 112 arch_fork_arg* forkArgs; 113 uint32 flags; 114 }; 115 116 117 class ThreadNotificationService : public DefaultNotificationService { 118 public: 119 ThreadNotificationService() 120 : DefaultNotificationService("threads") 121 { 122 } 123 124 void Notify(uint32 eventCode, team_id teamID, thread_id threadID, 125 Thread* thread = NULL) 126 { 127 char eventBuffer[180]; 128 KMessage event; 129 event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR); 130 event.AddInt32("event", eventCode); 131 event.AddInt32("team", teamID); 132 event.AddInt32("thread", threadID); 133 if (thread != NULL) 134 event.AddPointer("threadStruct", thread); 135 136 DefaultNotificationService::Notify(event, eventCode); 137 } 138 139 void Notify(uint32 eventCode, Thread* thread) 140 { 141 return Notify(eventCode, thread->id, thread->team->id, thread); 142 } 143 }; 144 145 146 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries; 147 static spinlock sUndertakerLock = B_SPINLOCK_INITIALIZER; 148 static ConditionVariable sUndertakerCondition; 149 static ThreadNotificationService sNotificationService; 150 151 152 // object cache to allocate thread structures from 153 static object_cache* sThreadCache; 154 155 156 // #pragma mark - Thread 157 158 159 /*! Constructs a thread. 160 161 \param name The thread's name. 162 \param threadID The ID to be assigned to the new thread. If 163 \code < 0 \endcode a fresh one is allocated. 164 \param cpu The CPU the thread shall be assigned. 165 */ 166 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 167 : 168 flags(0), 169 serial_number(-1), 170 hash_next(NULL), 171 team_next(NULL), 172 priority(-1), 173 io_priority(-1), 174 cpu(cpu), 175 previous_cpu(NULL), 176 pinned_to_cpu(0), 177 sig_block_mask(0), 178 sigsuspend_original_unblocked_mask(0), 179 user_signal_context(NULL), 180 signal_stack_base(0), 181 signal_stack_size(0), 182 signal_stack_enabled(false), 183 in_kernel(true), 184 has_yielded(false), 185 user_thread(NULL), 186 fault_handler(0), 187 page_faults_allowed(1), 188 team(NULL), 189 select_infos(NULL), 190 kernel_stack_area(-1), 191 kernel_stack_base(0), 192 user_stack_area(-1), 193 user_stack_base(0), 194 user_local_storage(0), 195 kernel_errno(0), 196 user_time(0), 197 kernel_time(0), 198 last_time(0), 199 cpu_clock_offset(0), 200 post_interrupt_callback(NULL), 201 post_interrupt_data(NULL) 202 { 203 id = threadID >= 0 ? threadID : allocate_thread_id(); 204 visible = false; 205 206 // init locks 207 char lockName[32]; 208 snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id); 209 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 210 211 B_INITIALIZE_SPINLOCK(&time_lock); 212 B_INITIALIZE_SPINLOCK(&scheduler_lock); 213 B_INITIALIZE_RW_SPINLOCK(&team_lock); 214 215 // init name 216 if (name != NULL) 217 strlcpy(this->name, name, B_OS_NAME_LENGTH); 218 else 219 strcpy(this->name, "unnamed thread"); 220 221 exit.status = 0; 222 223 list_init(&exit.waiters); 224 225 exit.sem = -1; 226 msg.write_sem = -1; 227 msg.read_sem = -1; 228 229 // add to thread table -- yet invisible 230 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 231 sThreadHash.Insert(this); 232 } 233 234 235 Thread::~Thread() 236 { 237 // Delete resources that should actually be deleted by the thread itself, 238 // when it exited, but that might still exist, if the thread was never run. 239 240 if (user_stack_area >= 0) 241 delete_area(user_stack_area); 242 243 DeleteUserTimers(false); 244 245 // delete the resources, that may remain in either case 246 247 if (kernel_stack_area >= 0) 248 delete_area(kernel_stack_area); 249 250 fPendingSignals.Clear(); 251 252 if (exit.sem >= 0) 253 delete_sem(exit.sem); 254 if (msg.write_sem >= 0) 255 delete_sem(msg.write_sem); 256 if (msg.read_sem >= 0) 257 delete_sem(msg.read_sem); 258 259 scheduler_on_thread_destroy(this); 260 261 mutex_destroy(&fLock); 262 263 // remove from thread table 264 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 265 sThreadHash.Remove(this); 266 } 267 268 269 /*static*/ status_t 270 Thread::Create(const char* name, Thread*& _thread) 271 { 272 Thread* thread = new Thread(name, -1, NULL); 273 if (thread == NULL) 274 return B_NO_MEMORY; 275 276 status_t error = thread->Init(false); 277 if (error != B_OK) { 278 delete thread; 279 return error; 280 } 281 282 _thread = thread; 283 return B_OK; 284 } 285 286 287 /*static*/ Thread* 288 Thread::Get(thread_id id) 289 { 290 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 291 Thread* thread = sThreadHash.Lookup(id); 292 if (thread != NULL) 293 thread->AcquireReference(); 294 return thread; 295 } 296 297 298 /*static*/ Thread* 299 Thread::GetAndLock(thread_id id) 300 { 301 // look it up and acquire a reference 302 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 303 Thread* thread = sThreadHash.Lookup(id); 304 if (thread == NULL) 305 return NULL; 306 307 thread->AcquireReference(); 308 threadHashLocker.Unlock(); 309 310 // lock and check, if it is still in the hash table 311 thread->Lock(); 312 threadHashLocker.Lock(); 313 314 if (sThreadHash.Lookup(id) == thread) 315 return thread; 316 317 threadHashLocker.Unlock(); 318 319 // nope, the thread is no longer in the hash table 320 thread->UnlockAndReleaseReference(); 321 322 return NULL; 323 } 324 325 326 /*static*/ Thread* 327 Thread::GetDebug(thread_id id) 328 { 329 return sThreadHash.Lookup(id, false); 330 } 331 332 333 /*static*/ bool 334 Thread::IsAlive(thread_id id) 335 { 336 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 337 return sThreadHash.Lookup(id) != NULL; 338 } 339 340 341 void* 342 Thread::operator new(size_t size) 343 { 344 return object_cache_alloc(sThreadCache, 0); 345 } 346 347 348 void* 349 Thread::operator new(size_t, void* pointer) 350 { 351 return pointer; 352 } 353 354 355 void 356 Thread::operator delete(void* pointer, size_t size) 357 { 358 object_cache_free(sThreadCache, pointer, 0); 359 } 360 361 362 status_t 363 Thread::Init(bool idleThread) 364 { 365 status_t error = scheduler_on_thread_create(this, idleThread); 366 if (error != B_OK) 367 return error; 368 369 char temp[64]; 370 snprintf(temp, sizeof(temp), "thread_%" B_PRId32 "_retcode_sem", id); 371 exit.sem = create_sem(0, temp); 372 if (exit.sem < 0) 373 return exit.sem; 374 375 snprintf(temp, sizeof(temp), "%s send", name); 376 msg.write_sem = create_sem(1, temp); 377 if (msg.write_sem < 0) 378 return msg.write_sem; 379 380 snprintf(temp, sizeof(temp), "%s receive", name); 381 msg.read_sem = create_sem(0, temp); 382 if (msg.read_sem < 0) 383 return msg.read_sem; 384 385 error = arch_thread_init_thread_struct(this); 386 if (error != B_OK) 387 return error; 388 389 return B_OK; 390 } 391 392 393 /*! Checks whether the thread is still in the thread hash table. 394 */ 395 bool 396 Thread::IsAlive() const 397 { 398 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 399 400 return sThreadHash.Lookup(id) != NULL; 401 } 402 403 404 void 405 Thread::ResetSignalsOnExec() 406 { 407 // We are supposed keep the pending signals and the signal mask. Only the 408 // signal stack, if set, shall be unset. 409 410 sigsuspend_original_unblocked_mask = 0; 411 user_signal_context = NULL; 412 signal_stack_base = 0; 413 signal_stack_size = 0; 414 signal_stack_enabled = false; 415 } 416 417 418 /*! Adds the given user timer to the thread and, if user-defined, assigns it an 419 ID. 420 421 The caller must hold the thread's lock. 422 423 \param timer The timer to be added. If it doesn't have an ID yet, it is 424 considered user-defined and will be assigned an ID. 425 \return \c B_OK, if the timer was added successfully, another error code 426 otherwise. 427 */ 428 status_t 429 Thread::AddUserTimer(UserTimer* timer) 430 { 431 // If the timer is user-defined, check timer limit and increment 432 // user-defined count. 433 if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer()) 434 return EAGAIN; 435 436 fUserTimers.AddTimer(timer); 437 438 return B_OK; 439 } 440 441 442 /*! Removes the given user timer from the thread. 443 444 The caller must hold the thread's lock. 445 446 \param timer The timer to be removed. 447 448 */ 449 void 450 Thread::RemoveUserTimer(UserTimer* timer) 451 { 452 fUserTimers.RemoveTimer(timer); 453 454 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID) 455 team->UserDefinedTimersRemoved(1); 456 } 457 458 459 /*! Deletes all (or all user-defined) user timers of the thread. 460 461 The caller must hold the thread's lock. 462 463 \param userDefinedOnly If \c true, only the user-defined timers are deleted, 464 otherwise all timers are deleted. 465 */ 466 void 467 Thread::DeleteUserTimers(bool userDefinedOnly) 468 { 469 int32 count = fUserTimers.DeleteTimers(userDefinedOnly); 470 if (count > 0) 471 team->UserDefinedTimersRemoved(count); 472 } 473 474 475 void 476 Thread::DeactivateCPUTimeUserTimers() 477 { 478 while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head()) 479 timer->Deactivate(); 480 } 481 482 483 // #pragma mark - ThreadListIterator 484 485 486 ThreadListIterator::ThreadListIterator() 487 { 488 // queue the entry 489 InterruptsWriteSpinLocker locker(sThreadHashLock); 490 sThreadHash.InsertIteratorEntry(&fEntry); 491 } 492 493 494 ThreadListIterator::~ThreadListIterator() 495 { 496 // remove the entry 497 InterruptsWriteSpinLocker locker(sThreadHashLock); 498 sThreadHash.RemoveIteratorEntry(&fEntry); 499 } 500 501 502 Thread* 503 ThreadListIterator::Next() 504 { 505 // get the next team -- if there is one, get reference for it 506 InterruptsWriteSpinLocker locker(sThreadHashLock); 507 Thread* thread = sThreadHash.NextElement(&fEntry); 508 if (thread != NULL) 509 thread->AcquireReference(); 510 511 return thread; 512 } 513 514 515 // #pragma mark - ThreadCreationAttributes 516 517 518 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function, 519 const char* name, int32 priority, void* arg, team_id team, 520 Thread* thread) 521 { 522 this->entry = NULL; 523 this->name = name; 524 this->priority = priority; 525 this->args1 = NULL; 526 this->args2 = NULL; 527 this->stack_address = NULL; 528 this->stack_size = 0; 529 this->guard_size = 0; 530 this->pthread = NULL; 531 this->flags = 0; 532 this->team = team >= 0 ? team : team_get_kernel_team()->id; 533 this->thread = thread; 534 this->signal_mask = 0; 535 this->additional_stack_size = 0; 536 this->kernelEntry = function; 537 this->kernelArgument = arg; 538 this->forkArgs = NULL; 539 } 540 541 542 /*! Initializes the structure from a userland structure. 543 \param userAttributes The userland structure (must be a userland address). 544 \param nameBuffer A character array of at least size B_OS_NAME_LENGTH, 545 which will be used for the \c name field, if the userland structure has 546 a name. The buffer must remain valid as long as this structure is in 547 use afterwards (or until it is reinitialized). 548 \return \c B_OK, if the initialization went fine, another error code 549 otherwise. 550 */ 551 status_t 552 ThreadCreationAttributes::InitFromUserAttributes( 553 const thread_creation_attributes* userAttributes, char* nameBuffer) 554 { 555 if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes) 556 || user_memcpy((thread_creation_attributes*)this, userAttributes, 557 sizeof(thread_creation_attributes)) != B_OK) { 558 return B_BAD_ADDRESS; 559 } 560 561 if (stack_size != 0 562 && (stack_size < MIN_USER_STACK_SIZE 563 || stack_size > MAX_USER_STACK_SIZE)) { 564 return B_BAD_VALUE; 565 } 566 567 if (entry == NULL || !IS_USER_ADDRESS(entry) 568 || (stack_address != NULL && !IS_USER_ADDRESS(stack_address)) 569 || (name != NULL && (!IS_USER_ADDRESS(name) 570 || user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) { 571 return B_BAD_ADDRESS; 572 } 573 574 name = name != NULL ? nameBuffer : "user thread"; 575 576 // kernel only attributes (not in thread_creation_attributes): 577 Thread* currentThread = thread_get_current_thread(); 578 team = currentThread->team->id; 579 thread = NULL; 580 signal_mask = currentThread->sig_block_mask; 581 // inherit the current thread's signal mask 582 additional_stack_size = 0; 583 kernelEntry = NULL; 584 kernelArgument = NULL; 585 forkArgs = NULL; 586 587 return B_OK; 588 } 589 590 591 // #pragma mark - private functions 592 593 594 /*! Inserts a thread into a team. 595 The caller must hold the team's lock, the thread's lock, and the scheduler 596 lock. 597 */ 598 static void 599 insert_thread_into_team(Team *team, Thread *thread) 600 { 601 thread->team_next = team->thread_list; 602 team->thread_list = thread; 603 team->num_threads++; 604 605 if (team->num_threads == 1) { 606 // this was the first thread 607 team->main_thread = thread; 608 } 609 thread->team = team; 610 } 611 612 613 /*! Removes a thread from a team. 614 The caller must hold the team's lock, the thread's lock, and the scheduler 615 lock. 616 */ 617 static void 618 remove_thread_from_team(Team *team, Thread *thread) 619 { 620 Thread *temp, *last = NULL; 621 622 for (temp = team->thread_list; temp != NULL; temp = temp->team_next) { 623 if (temp == thread) { 624 if (last == NULL) 625 team->thread_list = temp->team_next; 626 else 627 last->team_next = temp->team_next; 628 629 team->num_threads--; 630 break; 631 } 632 last = temp; 633 } 634 } 635 636 637 static status_t 638 enter_userspace(Thread* thread, UserThreadEntryArguments* args) 639 { 640 status_t error = arch_thread_init_tls(thread); 641 if (error != B_OK) { 642 dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32 643 ")\n", thread->name, thread->id); 644 free(args->forkArgs); 645 return error; 646 } 647 648 user_debug_update_new_thread_flags(thread); 649 650 // init the thread's user_thread 651 user_thread* userThread = thread->user_thread; 652 set_ac(); 653 userThread->pthread = args->pthread; 654 userThread->flags = 0; 655 userThread->wait_status = B_OK; 656 userThread->defer_signals 657 = (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0; 658 userThread->pending_signals = 0; 659 clear_ac(); 660 661 if (args->forkArgs != NULL) { 662 // This is a fork()ed thread. Copy the fork args onto the stack and 663 // free them. 664 arch_fork_arg archArgs = *args->forkArgs; 665 free(args->forkArgs); 666 667 arch_restore_fork_frame(&archArgs); 668 // this one won't return here 669 return B_ERROR; 670 } 671 672 // Jump to the entry point in user space. Only returns, if something fails. 673 return arch_thread_enter_userspace(thread, args->userlandEntry, 674 args->userlandArgument1, args->userlandArgument2); 675 } 676 677 678 status_t 679 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction, 680 void* argument1, void* argument2) 681 { 682 UserThreadEntryArguments entryArgs; 683 entryArgs.kernelFunction = NULL; 684 entryArgs.argument = NULL; 685 entryArgs.enterUserland = true; 686 entryArgs.userlandEntry = (addr_t)entryFunction; 687 entryArgs.userlandArgument1 = argument1; 688 entryArgs.userlandArgument2 = argument2; 689 entryArgs.pthread = NULL; 690 entryArgs.forkArgs = NULL; 691 entryArgs.flags = 0; 692 693 return enter_userspace(thread, &entryArgs); 694 } 695 696 697 static void 698 common_thread_entry(void* _args) 699 { 700 Thread* thread = thread_get_current_thread(); 701 702 // The thread is new and has been scheduled the first time. 703 704 scheduler_new_thread_entry(thread); 705 706 // unlock the scheduler lock and enable interrupts 707 release_spinlock(&thread->scheduler_lock); 708 enable_interrupts(); 709 710 // call the kernel function, if any 711 ThreadEntryArguments* args = (ThreadEntryArguments*)_args; 712 if (args->kernelFunction != NULL) 713 args->kernelFunction(args->argument); 714 715 // If requested, enter userland, now. 716 if (args->enterUserland) { 717 enter_userspace(thread, (UserThreadEntryArguments*)args); 718 // only returns or error 719 720 // If that's the team's main thread, init the team exit info. 721 if (thread == thread->team->main_thread) 722 team_init_exit_info_on_error(thread->team); 723 } 724 725 // we're done 726 thread_exit(); 727 } 728 729 730 /*! Prepares the given thread's kernel stack for executing its entry function. 731 732 The data pointed to by \a data of size \a dataSize are copied to the 733 thread's kernel stack. A pointer to the copy's data is passed to the entry 734 function. The entry function is common_thread_entry(). 735 736 \param thread The thread. 737 \param data Pointer to data to be copied to the thread's stack and passed 738 to the entry function. 739 \param dataSize The size of \a data. 740 */ 741 static void 742 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize) 743 { 744 uint8* stack = (uint8*)thread->kernel_stack_base; 745 uint8* stackTop = (uint8*)thread->kernel_stack_top; 746 747 // clear (or rather invalidate) the kernel stack contents, if compiled with 748 // debugging 749 #if KDEBUG > 0 750 # if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS) 751 memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc, 752 KERNEL_STACK_SIZE); 753 # else 754 memset(stack, 0xcc, KERNEL_STACK_SIZE); 755 # endif 756 #endif 757 758 // copy the data onto the stack, with 16-byte alignment to be on the safe 759 // side 760 void* clonedData; 761 #ifdef STACK_GROWS_DOWNWARDS 762 clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16); 763 stackTop = (uint8*)clonedData; 764 #else 765 clonedData = (void*)ROUNDUP((addr_t)stack, 16); 766 stack = (uint8*)clonedData + ROUNDUP(dataSize, 16); 767 #endif 768 769 memcpy(clonedData, data, dataSize); 770 771 arch_thread_init_kthread_stack(thread, stack, stackTop, 772 &common_thread_entry, clonedData); 773 } 774 775 776 static status_t 777 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase, 778 size_t stackSize, size_t additionalSize, size_t guardSize, 779 char* nameBuffer) 780 { 781 area_id stackArea = -1; 782 uint8* stackBase = (uint8*)_stackBase; 783 784 if (stackBase != NULL) { 785 // A stack has been specified. It must be large enough to hold the 786 // TLS space at least. Guard pages are ignored for existing stacks. 787 STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE); 788 if (stackSize < MIN_USER_STACK_SIZE) 789 return B_BAD_VALUE; 790 791 stackSize -= TLS_SIZE; 792 } else { 793 // No user-defined stack -- allocate one. For non-main threads the stack 794 // will be between USER_STACK_REGION and the main thread stack area. For 795 // a main thread the position is fixed. 796 797 guardSize = PAGE_ALIGN(guardSize); 798 799 if (stackSize == 0) { 800 // Use the default size (a different one for a main thread). 801 stackSize = thread->id == team->id 802 ? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE; 803 } else { 804 // Verify that the given stack size is large enough. 805 if (stackSize < MIN_USER_STACK_SIZE) 806 return B_BAD_VALUE; 807 808 stackSize = PAGE_ALIGN(stackSize); 809 } 810 811 size_t areaSize = PAGE_ALIGN(guardSize + stackSize + TLS_SIZE 812 + additionalSize); 813 814 snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack", 815 thread->name, thread->id); 816 817 stackBase = (uint8*)USER_STACK_REGION; 818 819 virtual_address_restrictions virtualRestrictions = {}; 820 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; 821 virtualRestrictions.address = (void*)stackBase; 822 823 physical_address_restrictions physicalRestrictions = {}; 824 825 stackArea = create_area_etc(team->id, nameBuffer, 826 areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 827 0, guardSize, &virtualRestrictions, &physicalRestrictions, 828 (void**)&stackBase); 829 if (stackArea < 0) 830 return stackArea; 831 } 832 833 // set the stack 834 ThreadLocker threadLocker(thread); 835 #ifdef STACK_GROWS_DOWNWARDS 836 thread->user_stack_base = (addr_t)stackBase + guardSize; 837 #else 838 thread->user_stack_base = (addr_t)stackBase; 839 #endif 840 thread->user_stack_size = stackSize; 841 thread->user_stack_area = stackArea; 842 843 return B_OK; 844 } 845 846 847 status_t 848 thread_create_user_stack(Team* team, Thread* thread, void* stackBase, 849 size_t stackSize, size_t additionalSize) 850 { 851 char nameBuffer[B_OS_NAME_LENGTH]; 852 return create_thread_user_stack(team, thread, stackBase, stackSize, 853 additionalSize, USER_STACK_GUARD_SIZE, nameBuffer); 854 } 855 856 857 /*! Creates a new thread. 858 859 \param attributes The thread creation attributes, specifying the team in 860 which to create the thread, as well as a whole bunch of other arguments. 861 \param kernel \c true, if a kernel-only thread shall be created, \c false, 862 if the thread shall also be able to run in userland. 863 \return The ID of the newly created thread (>= 0) or an error code on 864 failure. 865 */ 866 thread_id 867 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel) 868 { 869 status_t status = B_OK; 870 871 TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name, 872 attributes.thread, kernel ? "kernel" : "user")); 873 874 // get the team 875 Team* team = Team::Get(attributes.team); 876 if (team == NULL) 877 return B_BAD_TEAM_ID; 878 BReference<Team> teamReference(team, true); 879 880 // If a thread object is given, acquire a reference to it, otherwise create 881 // a new thread object with the given attributes. 882 Thread* thread = attributes.thread; 883 if (thread != NULL) { 884 thread->AcquireReference(); 885 } else { 886 status = Thread::Create(attributes.name, thread); 887 if (status != B_OK) 888 return status; 889 } 890 BReference<Thread> threadReference(thread, true); 891 892 thread->team = team; 893 // set already, so, if something goes wrong, the team pointer is 894 // available for deinitialization 895 thread->priority = attributes.priority == -1 896 ? B_NORMAL_PRIORITY : attributes.priority; 897 thread->priority = std::max(thread->priority, 898 (int32)THREAD_MIN_SET_PRIORITY); 899 thread->priority = std::min(thread->priority, 900 (int32)THREAD_MAX_SET_PRIORITY); 901 thread->state = B_THREAD_SUSPENDED; 902 903 thread->sig_block_mask = attributes.signal_mask; 904 905 // init debug structure 906 init_thread_debug_info(&thread->debug_info); 907 908 // create the kernel stack 909 char stackName[B_OS_NAME_LENGTH]; 910 snprintf(stackName, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_kstack", 911 thread->name, thread->id); 912 virtual_address_restrictions virtualRestrictions = {}; 913 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 914 physical_address_restrictions physicalRestrictions = {}; 915 916 thread->kernel_stack_area = create_area_etc(B_SYSTEM_TEAM, stackName, 917 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 918 B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA 919 | B_KERNEL_STACK_AREA, 0, KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 920 &virtualRestrictions, &physicalRestrictions, 921 (void**)&thread->kernel_stack_base); 922 923 if (thread->kernel_stack_area < 0) { 924 // we're not yet part of a team, so we can just bail out 925 status = thread->kernel_stack_area; 926 927 dprintf("create_thread: error creating kernel stack: %s!\n", 928 strerror(status)); 929 930 return status; 931 } 932 933 thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE 934 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 935 936 if (kernel) { 937 // Init the thread's kernel stack. It will start executing 938 // common_thread_entry() with the arguments we prepare here. 939 ThreadEntryArguments entryArgs; 940 entryArgs.kernelFunction = attributes.kernelEntry; 941 entryArgs.argument = attributes.kernelArgument; 942 entryArgs.enterUserland = false; 943 944 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 945 } else { 946 // create the userland stack, if the thread doesn't have one yet 947 if (thread->user_stack_base == 0) { 948 status = create_thread_user_stack(team, thread, 949 attributes.stack_address, attributes.stack_size, 950 attributes.additional_stack_size, attributes.guard_size, 951 stackName); 952 if (status != B_OK) 953 return status; 954 } 955 956 // Init the thread's kernel stack. It will start executing 957 // common_thread_entry() with the arguments we prepare here. 958 UserThreadEntryArguments entryArgs; 959 entryArgs.kernelFunction = attributes.kernelEntry; 960 entryArgs.argument = attributes.kernelArgument; 961 entryArgs.enterUserland = true; 962 entryArgs.userlandEntry = (addr_t)attributes.entry; 963 entryArgs.userlandArgument1 = attributes.args1; 964 entryArgs.userlandArgument2 = attributes.args2; 965 entryArgs.pthread = attributes.pthread; 966 entryArgs.forkArgs = attributes.forkArgs; 967 entryArgs.flags = attributes.flags; 968 969 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 970 971 // create the pre-defined thread timers 972 status = user_timer_create_thread_timers(team, thread); 973 if (status != B_OK) 974 return status; 975 } 976 977 // lock the team and see, if it is still alive 978 TeamLocker teamLocker(team); 979 if (team->state >= TEAM_STATE_SHUTDOWN) 980 return B_BAD_TEAM_ID; 981 982 bool debugNewThread = false; 983 if (!kernel) { 984 // allocate the user_thread structure, if not already allocated 985 if (thread->user_thread == NULL) { 986 thread->user_thread = team_allocate_user_thread(team); 987 if (thread->user_thread == NULL) 988 return B_NO_MEMORY; 989 } 990 991 // If the new thread belongs to the same team as the current thread, it 992 // may inherit some of the thread debug flags. 993 Thread* currentThread = thread_get_current_thread(); 994 if (currentThread != NULL && currentThread->team == team) { 995 // inherit all user flags... 996 int32 debugFlags = atomic_get(¤tThread->debug_info.flags) 997 & B_THREAD_DEBUG_USER_FLAG_MASK; 998 999 // ... save the syscall tracing flags, unless explicitely specified 1000 if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) { 1001 debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL 1002 | B_THREAD_DEBUG_POST_SYSCALL); 1003 } 1004 1005 thread->debug_info.flags = debugFlags; 1006 1007 // stop the new thread, if desired 1008 debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS; 1009 } 1010 } 1011 1012 // We're going to make the thread live, now. The thread itself will take 1013 // over a reference to its Thread object. We'll acquire another reference 1014 // for our own use (and threadReference remains armed). 1015 1016 ThreadLocker threadLocker(thread); 1017 1018 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock); 1019 WriteSpinLocker threadHashLocker(sThreadHashLock); 1020 1021 // check the thread limit 1022 if (sUsedThreads >= sMaxThreads) { 1023 // Clean up the user_thread structure. It's a bit unfortunate that the 1024 // Thread destructor cannot do that, so we have to do that explicitly. 1025 threadHashLocker.Unlock(); 1026 threadCreationLocker.Unlock(); 1027 1028 user_thread* userThread = thread->user_thread; 1029 thread->user_thread = NULL; 1030 1031 threadLocker.Unlock(); 1032 teamLocker.Unlock(); 1033 1034 if (userThread != NULL) 1035 team_free_user_thread(team, userThread); 1036 1037 return B_NO_MORE_THREADS; 1038 } 1039 1040 // make thread visible in global hash/list 1041 thread->visible = true; 1042 sUsedThreads++; 1043 1044 scheduler_on_thread_init(thread); 1045 1046 thread->AcquireReference(); 1047 1048 // Debug the new thread, if the parent thread required that (see above), 1049 // or the respective global team debug flag is set. But only, if a 1050 // debugger is installed for the team. 1051 if (!kernel) { 1052 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1053 debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0; 1054 if (debugNewThread 1055 && (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) { 1056 thread->debug_info.flags |= B_THREAD_DEBUG_STOP; 1057 } 1058 } 1059 1060 { 1061 SpinLocker signalLocker(team->signal_lock); 1062 SpinLocker timeLocker(team->time_lock); 1063 1064 // insert thread into team 1065 insert_thread_into_team(team, thread); 1066 } 1067 1068 threadHashLocker.Unlock(); 1069 threadCreationLocker.Unlock(); 1070 threadLocker.Unlock(); 1071 teamLocker.Unlock(); 1072 1073 // notify listeners 1074 sNotificationService.Notify(THREAD_ADDED, thread); 1075 1076 return thread->id; 1077 } 1078 1079 1080 static status_t 1081 undertaker(void* /*args*/) 1082 { 1083 while (true) { 1084 // wait for a thread to bury 1085 InterruptsSpinLocker locker(sUndertakerLock); 1086 1087 while (sUndertakerEntries.IsEmpty()) { 1088 ConditionVariableEntry conditionEntry; 1089 sUndertakerCondition.Add(&conditionEntry); 1090 locker.Unlock(); 1091 1092 conditionEntry.Wait(); 1093 1094 locker.Lock(); 1095 } 1096 1097 UndertakerEntry* _entry = sUndertakerEntries.RemoveHead(); 1098 locker.Unlock(); 1099 1100 UndertakerEntry entry = *_entry; 1101 // we need a copy, since the original entry is on the thread's stack 1102 1103 // we've got an entry 1104 Thread* thread = entry.thread; 1105 1106 // make sure the thread isn't running anymore 1107 InterruptsSpinLocker schedulerLocker(thread->scheduler_lock); 1108 ASSERT(thread->state == THREAD_STATE_FREE_ON_RESCHED); 1109 schedulerLocker.Unlock(); 1110 1111 // remove this thread from from the kernel team -- this makes it 1112 // unaccessible 1113 Team* kernelTeam = team_get_kernel_team(); 1114 TeamLocker kernelTeamLocker(kernelTeam); 1115 thread->Lock(); 1116 1117 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock); 1118 SpinLocker signalLocker(kernelTeam->signal_lock); 1119 SpinLocker timeLocker(kernelTeam->time_lock); 1120 1121 remove_thread_from_team(kernelTeam, thread); 1122 1123 timeLocker.Unlock(); 1124 signalLocker.Unlock(); 1125 threadCreationLocker.Unlock(); 1126 1127 kernelTeamLocker.Unlock(); 1128 1129 // free the thread structure 1130 thread->UnlockAndReleaseReference(); 1131 } 1132 1133 // can never get here 1134 return B_OK; 1135 } 1136 1137 1138 /*! Returns the semaphore the thread is currently waiting on. 1139 1140 The return value is purely informative. 1141 The caller must hold the scheduler lock. 1142 1143 \param thread The thread. 1144 \return The ID of the semaphore the thread is currently waiting on or \c -1, 1145 if it isn't waiting on a semaphore. 1146 */ 1147 static sem_id 1148 get_thread_wait_sem(Thread* thread) 1149 { 1150 if (thread->state == B_THREAD_WAITING 1151 && thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) { 1152 return (sem_id)(addr_t)thread->wait.object; 1153 } 1154 return -1; 1155 } 1156 1157 1158 /*! Fills the thread_info structure with information from the specified thread. 1159 The caller must hold the thread's lock and the scheduler lock. 1160 */ 1161 static void 1162 fill_thread_info(Thread *thread, thread_info *info, size_t size) 1163 { 1164 info->thread = thread->id; 1165 info->team = thread->team->id; 1166 1167 strlcpy(info->name, thread->name, B_OS_NAME_LENGTH); 1168 1169 info->sem = -1; 1170 1171 if (thread->state == B_THREAD_WAITING) { 1172 info->state = B_THREAD_WAITING; 1173 1174 switch (thread->wait.type) { 1175 case THREAD_BLOCK_TYPE_SNOOZE: 1176 info->state = B_THREAD_ASLEEP; 1177 break; 1178 1179 case THREAD_BLOCK_TYPE_SEMAPHORE: 1180 { 1181 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1182 if (sem == thread->msg.read_sem) 1183 info->state = B_THREAD_RECEIVING; 1184 else 1185 info->sem = sem; 1186 break; 1187 } 1188 1189 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1190 default: 1191 break; 1192 } 1193 } else 1194 info->state = (thread_state)thread->state; 1195 1196 info->priority = thread->priority; 1197 info->stack_base = (void *)thread->user_stack_base; 1198 info->stack_end = (void *)(thread->user_stack_base 1199 + thread->user_stack_size); 1200 1201 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 1202 info->user_time = thread->user_time; 1203 info->kernel_time = thread->kernel_time; 1204 } 1205 1206 1207 static status_t 1208 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize, 1209 int32 flags) 1210 { 1211 // get the thread 1212 Thread *target = Thread::Get(id); 1213 if (target == NULL) 1214 return B_BAD_THREAD_ID; 1215 BReference<Thread> targetReference(target, true); 1216 1217 // get the write semaphore 1218 ThreadLocker targetLocker(target); 1219 sem_id cachedSem = target->msg.write_sem; 1220 targetLocker.Unlock(); 1221 1222 if (bufferSize > THREAD_MAX_MESSAGE_SIZE) 1223 return B_NO_MEMORY; 1224 1225 status_t status = acquire_sem_etc(cachedSem, 1, flags, 0); 1226 if (status == B_INTERRUPTED) { 1227 // we got interrupted by a signal 1228 return status; 1229 } 1230 if (status != B_OK) { 1231 // Any other acquisition problems may be due to thread deletion 1232 return B_BAD_THREAD_ID; 1233 } 1234 1235 void* data; 1236 if (bufferSize > 0) { 1237 data = malloc(bufferSize); 1238 if (data == NULL) 1239 return B_NO_MEMORY; 1240 if (user_memcpy(data, buffer, bufferSize) != B_OK) { 1241 free(data); 1242 return B_BAD_DATA; 1243 } 1244 } else 1245 data = NULL; 1246 1247 targetLocker.Lock(); 1248 1249 // The target thread could have been deleted at this point. 1250 if (!target->IsAlive()) { 1251 targetLocker.Unlock(); 1252 free(data); 1253 return B_BAD_THREAD_ID; 1254 } 1255 1256 // Save message informations 1257 target->msg.sender = thread_get_current_thread()->id; 1258 target->msg.code = code; 1259 target->msg.size = bufferSize; 1260 target->msg.buffer = data; 1261 cachedSem = target->msg.read_sem; 1262 1263 targetLocker.Unlock(); 1264 1265 release_sem(cachedSem); 1266 return B_OK; 1267 } 1268 1269 1270 static int32 1271 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize, 1272 int32 flags) 1273 { 1274 Thread *thread = thread_get_current_thread(); 1275 size_t size; 1276 int32 code; 1277 1278 status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0); 1279 if (status != B_OK) { 1280 // Actually, we're not supposed to return error codes 1281 // but since the only reason this can fail is that we 1282 // were killed, it's probably okay to do so (but also 1283 // meaningless). 1284 return status; 1285 } 1286 1287 if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) { 1288 size = min_c(bufferSize, thread->msg.size); 1289 status = user_memcpy(buffer, thread->msg.buffer, size); 1290 if (status != B_OK) { 1291 free(thread->msg.buffer); 1292 release_sem(thread->msg.write_sem); 1293 return status; 1294 } 1295 } 1296 1297 *_sender = thread->msg.sender; 1298 code = thread->msg.code; 1299 1300 free(thread->msg.buffer); 1301 release_sem(thread->msg.write_sem); 1302 1303 return code; 1304 } 1305 1306 1307 static status_t 1308 common_getrlimit(int resource, struct rlimit * rlp) 1309 { 1310 if (!rlp) 1311 return B_BAD_ADDRESS; 1312 1313 switch (resource) { 1314 case RLIMIT_AS: 1315 rlp->rlim_cur = __HAIKU_ADDR_MAX; 1316 rlp->rlim_max = __HAIKU_ADDR_MAX; 1317 return B_OK; 1318 1319 case RLIMIT_CORE: 1320 rlp->rlim_cur = 0; 1321 rlp->rlim_max = 0; 1322 return B_OK; 1323 1324 case RLIMIT_DATA: 1325 rlp->rlim_cur = RLIM_INFINITY; 1326 rlp->rlim_max = RLIM_INFINITY; 1327 return B_OK; 1328 1329 case RLIMIT_NOFILE: 1330 case RLIMIT_NOVMON: 1331 return vfs_getrlimit(resource, rlp); 1332 1333 case RLIMIT_STACK: 1334 { 1335 rlp->rlim_cur = USER_MAIN_THREAD_STACK_SIZE; 1336 rlp->rlim_max = USER_MAIN_THREAD_STACK_SIZE; 1337 return B_OK; 1338 } 1339 1340 default: 1341 return EINVAL; 1342 } 1343 1344 return B_OK; 1345 } 1346 1347 1348 static status_t 1349 common_setrlimit(int resource, const struct rlimit * rlp) 1350 { 1351 if (!rlp) 1352 return B_BAD_ADDRESS; 1353 1354 switch (resource) { 1355 case RLIMIT_CORE: 1356 // We don't support core file, so allow settings to 0/0 only. 1357 if (rlp->rlim_cur != 0 || rlp->rlim_max != 0) 1358 return EINVAL; 1359 return B_OK; 1360 1361 case RLIMIT_NOFILE: 1362 case RLIMIT_NOVMON: 1363 return vfs_setrlimit(resource, rlp); 1364 1365 default: 1366 return EINVAL; 1367 } 1368 1369 return B_OK; 1370 } 1371 1372 1373 static status_t 1374 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags, 1375 bigtime_t* _remainingTime) 1376 { 1377 #if KDEBUG 1378 if (!are_interrupts_enabled()) { 1379 panic("common_snooze_etc(): called with interrupts disabled, timeout " 1380 "%" B_PRIdBIGTIME, timeout); 1381 } 1382 #endif 1383 1384 switch (clockID) { 1385 case CLOCK_REALTIME: 1386 // make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall 1387 // through 1388 flags |= B_TIMEOUT_REAL_TIME_BASE; 1389 case CLOCK_MONOTONIC: 1390 { 1391 // Store the start time, for the case that we get interrupted and 1392 // need to return the remaining time. For absolute timeouts we can 1393 // still get he time later, if needed. 1394 bigtime_t startTime 1395 = _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0 1396 ? system_time() : 0; 1397 1398 Thread* thread = thread_get_current_thread(); 1399 1400 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, 1401 NULL); 1402 status_t status = thread_block_with_timeout(flags, timeout); 1403 1404 if (status == B_TIMED_OUT || status == B_WOULD_BLOCK) 1405 return B_OK; 1406 1407 // If interrupted, compute the remaining time, if requested. 1408 if (status == B_INTERRUPTED && _remainingTime != NULL) { 1409 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 1410 *_remainingTime = std::max( 1411 startTime + timeout - system_time(), (bigtime_t)0); 1412 } else { 1413 bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0 1414 ? real_time_clock_usecs() : system_time(); 1415 *_remainingTime = std::max(timeout - now, (bigtime_t)0); 1416 } 1417 } 1418 1419 return status; 1420 } 1421 1422 case CLOCK_THREAD_CPUTIME_ID: 1423 // Waiting for ourselves to do something isn't particularly 1424 // productive. 1425 return B_BAD_VALUE; 1426 1427 case CLOCK_PROCESS_CPUTIME_ID: 1428 default: 1429 // We don't have to support those, but we are allowed to. Could be 1430 // done be creating a UserTimer on the fly with a custom UserEvent 1431 // that would just wake us up. 1432 return ENOTSUP; 1433 } 1434 } 1435 1436 1437 // #pragma mark - debugger calls 1438 1439 1440 static int 1441 make_thread_unreal(int argc, char **argv) 1442 { 1443 int32 id = -1; 1444 1445 if (argc > 2) { 1446 print_debugger_command_usage(argv[0]); 1447 return 0; 1448 } 1449 1450 if (argc > 1) 1451 id = strtoul(argv[1], NULL, 0); 1452 1453 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1454 Thread* thread = it.Next();) { 1455 if (id != -1 && thread->id != id) 1456 continue; 1457 1458 if (thread->priority > B_DISPLAY_PRIORITY) { 1459 scheduler_set_thread_priority(thread, B_NORMAL_PRIORITY); 1460 kprintf("thread %" B_PRId32 " made unreal\n", thread->id); 1461 } 1462 } 1463 1464 return 0; 1465 } 1466 1467 1468 static int 1469 set_thread_prio(int argc, char **argv) 1470 { 1471 int32 id; 1472 int32 prio; 1473 1474 if (argc > 3 || argc < 2) { 1475 print_debugger_command_usage(argv[0]); 1476 return 0; 1477 } 1478 1479 prio = strtoul(argv[1], NULL, 0); 1480 if (prio > THREAD_MAX_SET_PRIORITY) 1481 prio = THREAD_MAX_SET_PRIORITY; 1482 if (prio < THREAD_MIN_SET_PRIORITY) 1483 prio = THREAD_MIN_SET_PRIORITY; 1484 1485 if (argc > 2) 1486 id = strtoul(argv[2], NULL, 0); 1487 else 1488 id = thread_get_current_thread()->id; 1489 1490 bool found = false; 1491 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1492 Thread* thread = it.Next();) { 1493 if (thread->id != id) 1494 continue; 1495 scheduler_set_thread_priority(thread, prio); 1496 kprintf("thread %" B_PRId32 " set to priority %" B_PRId32 "\n", id, prio); 1497 found = true; 1498 break; 1499 } 1500 if (!found) 1501 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1502 1503 return 0; 1504 } 1505 1506 1507 static int 1508 make_thread_suspended(int argc, char **argv) 1509 { 1510 int32 id; 1511 1512 if (argc > 2) { 1513 print_debugger_command_usage(argv[0]); 1514 return 0; 1515 } 1516 1517 if (argc == 1) 1518 id = thread_get_current_thread()->id; 1519 else 1520 id = strtoul(argv[1], NULL, 0); 1521 1522 bool found = false; 1523 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1524 Thread* thread = it.Next();) { 1525 if (thread->id != id) 1526 continue; 1527 1528 Signal signal(SIGSTOP, SI_USER, B_OK, team_get_kernel_team()->id); 1529 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 1530 1531 kprintf("thread %" B_PRId32 " suspended\n", id); 1532 found = true; 1533 break; 1534 } 1535 if (!found) 1536 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1537 1538 return 0; 1539 } 1540 1541 1542 static int 1543 make_thread_resumed(int argc, char **argv) 1544 { 1545 int32 id; 1546 1547 if (argc != 2) { 1548 print_debugger_command_usage(argv[0]); 1549 return 0; 1550 } 1551 1552 // force user to enter a thread id, as using 1553 // the current thread is usually not intended 1554 id = strtoul(argv[1], NULL, 0); 1555 1556 bool found = false; 1557 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1558 Thread* thread = it.Next();) { 1559 if (thread->id != id) 1560 continue; 1561 1562 if (thread->state == B_THREAD_SUSPENDED || thread->state == B_THREAD_ASLEEP 1563 || thread->state == B_THREAD_WAITING) { 1564 scheduler_enqueue_in_run_queue(thread); 1565 kprintf("thread %" B_PRId32 " resumed\n", thread->id); 1566 } else 1567 kprintf("thread %" B_PRId32 " is already running\n", thread->id); 1568 found = true; 1569 break; 1570 } 1571 if (!found) 1572 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1573 1574 return 0; 1575 } 1576 1577 1578 static int 1579 drop_into_debugger(int argc, char **argv) 1580 { 1581 status_t err; 1582 int32 id; 1583 1584 if (argc > 2) { 1585 print_debugger_command_usage(argv[0]); 1586 return 0; 1587 } 1588 1589 if (argc == 1) 1590 id = thread_get_current_thread()->id; 1591 else 1592 id = strtoul(argv[1], NULL, 0); 1593 1594 err = _user_debug_thread(id); 1595 // TODO: This is a non-trivial syscall doing some locking, so this is 1596 // really nasty and may go seriously wrong. 1597 if (err) 1598 kprintf("drop failed\n"); 1599 else 1600 kprintf("thread %" B_PRId32 " dropped into user debugger\n", id); 1601 1602 return 0; 1603 } 1604 1605 1606 /*! Returns a user-readable string for a thread state. 1607 Only for use in the kernel debugger. 1608 */ 1609 static const char * 1610 state_to_text(Thread *thread, int32 state) 1611 { 1612 switch (state) { 1613 case B_THREAD_READY: 1614 return "ready"; 1615 1616 case B_THREAD_RUNNING: 1617 return "running"; 1618 1619 case B_THREAD_WAITING: 1620 { 1621 if (thread != NULL) { 1622 switch (thread->wait.type) { 1623 case THREAD_BLOCK_TYPE_SNOOZE: 1624 return "zzz"; 1625 1626 case THREAD_BLOCK_TYPE_SEMAPHORE: 1627 { 1628 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1629 if (sem == thread->msg.read_sem) 1630 return "receive"; 1631 break; 1632 } 1633 } 1634 } 1635 1636 return "waiting"; 1637 } 1638 1639 case B_THREAD_SUSPENDED: 1640 return "suspended"; 1641 1642 case THREAD_STATE_FREE_ON_RESCHED: 1643 return "death"; 1644 1645 default: 1646 return "UNKNOWN"; 1647 } 1648 } 1649 1650 1651 static void 1652 print_thread_list_table_head() 1653 { 1654 kprintf("%-*s id state wait for %-*s cpu pri %-*s team " 1655 "name\n", 1656 B_PRINTF_POINTER_WIDTH, "thread", B_PRINTF_POINTER_WIDTH, "object", 1657 B_PRINTF_POINTER_WIDTH, "stack"); 1658 } 1659 1660 1661 static void 1662 _dump_thread_info(Thread *thread, bool shortInfo) 1663 { 1664 if (shortInfo) { 1665 kprintf("%p %6" B_PRId32 " %-10s", thread, thread->id, 1666 state_to_text(thread, thread->state)); 1667 1668 // does it block on a semaphore or a condition variable? 1669 if (thread->state == B_THREAD_WAITING) { 1670 switch (thread->wait.type) { 1671 case THREAD_BLOCK_TYPE_SEMAPHORE: 1672 { 1673 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1674 if (sem == thread->msg.read_sem) 1675 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1676 else { 1677 kprintf("sem %-*" B_PRId32, 1678 B_PRINTF_POINTER_WIDTH + 5, sem); 1679 } 1680 break; 1681 } 1682 1683 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1684 kprintf("cvar %p ", thread->wait.object); 1685 break; 1686 1687 case THREAD_BLOCK_TYPE_SNOOZE: 1688 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1689 break; 1690 1691 case THREAD_BLOCK_TYPE_SIGNAL: 1692 kprintf("signal%*s", B_PRINTF_POINTER_WIDTH + 9, ""); 1693 break; 1694 1695 case THREAD_BLOCK_TYPE_MUTEX: 1696 kprintf("mutex %p ", thread->wait.object); 1697 break; 1698 1699 case THREAD_BLOCK_TYPE_RW_LOCK: 1700 kprintf("rwlock %p ", thread->wait.object); 1701 break; 1702 1703 case THREAD_BLOCK_TYPE_USER: 1704 kprintf("user%*s", B_PRINTF_POINTER_WIDTH + 11, ""); 1705 break; 1706 1707 case THREAD_BLOCK_TYPE_OTHER: 1708 kprintf("other%*s", B_PRINTF_POINTER_WIDTH + 10, ""); 1709 break; 1710 1711 default: 1712 kprintf("??? %p ", thread->wait.object); 1713 break; 1714 } 1715 } else 1716 kprintf("-%*s", B_PRINTF_POINTER_WIDTH + 14, ""); 1717 1718 // on which CPU does it run? 1719 if (thread->cpu) 1720 kprintf("%2d", thread->cpu->cpu_num); 1721 else 1722 kprintf(" -"); 1723 1724 kprintf("%4" B_PRId32 " %p%5" B_PRId32 " %s\n", thread->priority, 1725 (void *)thread->kernel_stack_base, thread->team->id, thread->name); 1726 1727 return; 1728 } 1729 1730 // print the long info 1731 1732 struct thread_death_entry *death = NULL; 1733 1734 kprintf("THREAD: %p\n", thread); 1735 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", thread->id, 1736 thread->id); 1737 kprintf("serial_number: %" B_PRId64 "\n", thread->serial_number); 1738 kprintf("name: \"%s\"\n", thread->name); 1739 kprintf("hash_next: %p\nteam_next: %p\n", 1740 thread->hash_next, thread->team_next); 1741 kprintf("priority: %" B_PRId32 " (I/O: %" B_PRId32 ")\n", 1742 thread->priority, thread->io_priority); 1743 kprintf("state: %s\n", state_to_text(thread, thread->state)); 1744 kprintf("cpu: %p ", thread->cpu); 1745 if (thread->cpu) 1746 kprintf("(%d)\n", thread->cpu->cpu_num); 1747 else 1748 kprintf("\n"); 1749 kprintf("sig_pending: %#" B_PRIx64 " (blocked: %#" B_PRIx64 1750 ", before sigsuspend(): %#" B_PRIx64 ")\n", 1751 (int64)thread->ThreadPendingSignals(), 1752 (int64)thread->sig_block_mask, 1753 (int64)thread->sigsuspend_original_unblocked_mask); 1754 kprintf("in_kernel: %d\n", thread->in_kernel); 1755 1756 if (thread->state == B_THREAD_WAITING) { 1757 kprintf("waiting for: "); 1758 1759 switch (thread->wait.type) { 1760 case THREAD_BLOCK_TYPE_SEMAPHORE: 1761 { 1762 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1763 if (sem == thread->msg.read_sem) 1764 kprintf("data\n"); 1765 else 1766 kprintf("semaphore %" B_PRId32 "\n", sem); 1767 break; 1768 } 1769 1770 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1771 kprintf("condition variable %p\n", thread->wait.object); 1772 break; 1773 1774 case THREAD_BLOCK_TYPE_SNOOZE: 1775 kprintf("snooze()\n"); 1776 break; 1777 1778 case THREAD_BLOCK_TYPE_SIGNAL: 1779 kprintf("signal\n"); 1780 break; 1781 1782 case THREAD_BLOCK_TYPE_MUTEX: 1783 kprintf("mutex %p\n", thread->wait.object); 1784 break; 1785 1786 case THREAD_BLOCK_TYPE_RW_LOCK: 1787 kprintf("rwlock %p\n", thread->wait.object); 1788 break; 1789 1790 case THREAD_BLOCK_TYPE_USER: 1791 kprintf("user\n"); 1792 break; 1793 1794 case THREAD_BLOCK_TYPE_OTHER: 1795 kprintf("other (%s)\n", (char*)thread->wait.object); 1796 break; 1797 1798 default: 1799 kprintf("unknown (%p)\n", thread->wait.object); 1800 break; 1801 } 1802 } 1803 1804 kprintf("fault_handler: %p\n", (void *)thread->fault_handler); 1805 kprintf("team: %p, \"%s\"\n", thread->team, 1806 thread->team->Name()); 1807 kprintf(" exit.sem: %" B_PRId32 "\n", thread->exit.sem); 1808 kprintf(" exit.status: %#" B_PRIx32 " (%s)\n", thread->exit.status, 1809 strerror(thread->exit.status)); 1810 kprintf(" exit.waiters:\n"); 1811 while ((death = (struct thread_death_entry*)list_get_next_item( 1812 &thread->exit.waiters, death)) != NULL) { 1813 kprintf("\t%p (thread %" B_PRId32 ")\n", death, death->thread); 1814 } 1815 1816 kprintf("kernel_stack_area: %" B_PRId32 "\n", thread->kernel_stack_area); 1817 kprintf("kernel_stack_base: %p\n", (void *)thread->kernel_stack_base); 1818 kprintf("user_stack_area: %" B_PRId32 "\n", thread->user_stack_area); 1819 kprintf("user_stack_base: %p\n", (void *)thread->user_stack_base); 1820 kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage); 1821 kprintf("user_thread: %p\n", (void *)thread->user_thread); 1822 kprintf("kernel_errno: %#x (%s)\n", thread->kernel_errno, 1823 strerror(thread->kernel_errno)); 1824 kprintf("kernel_time: %" B_PRId64 "\n", thread->kernel_time); 1825 kprintf("user_time: %" B_PRId64 "\n", thread->user_time); 1826 kprintf("flags: 0x%" B_PRIx32 "\n", thread->flags); 1827 kprintf("architecture dependant section:\n"); 1828 arch_thread_dump_info(&thread->arch_info); 1829 kprintf("scheduler data:\n"); 1830 scheduler_dump_thread_data(thread); 1831 } 1832 1833 1834 static int 1835 dump_thread_info(int argc, char **argv) 1836 { 1837 bool shortInfo = false; 1838 int argi = 1; 1839 if (argi < argc && strcmp(argv[argi], "-s") == 0) { 1840 shortInfo = true; 1841 print_thread_list_table_head(); 1842 argi++; 1843 } 1844 1845 if (argi == argc) { 1846 _dump_thread_info(thread_get_current_thread(), shortInfo); 1847 return 0; 1848 } 1849 1850 for (; argi < argc; argi++) { 1851 const char *name = argv[argi]; 1852 ulong arg = strtoul(name, NULL, 0); 1853 1854 if (IS_KERNEL_ADDRESS(arg)) { 1855 // semi-hack 1856 _dump_thread_info((Thread *)arg, shortInfo); 1857 continue; 1858 } 1859 1860 // walk through the thread list, trying to match name or id 1861 bool found = false; 1862 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1863 Thread* thread = it.Next();) { 1864 if (!strcmp(name, thread->name) || thread->id == (thread_id)arg) { 1865 _dump_thread_info(thread, shortInfo); 1866 found = true; 1867 break; 1868 } 1869 } 1870 1871 if (!found) 1872 kprintf("thread \"%s\" (%" B_PRId32 ") doesn't exist!\n", name, (thread_id)arg); 1873 } 1874 1875 return 0; 1876 } 1877 1878 1879 static int 1880 dump_thread_list(int argc, char **argv) 1881 { 1882 bool realTimeOnly = false; 1883 bool calling = false; 1884 const char *callSymbol = NULL; 1885 addr_t callStart = 0; 1886 addr_t callEnd = 0; 1887 int32 requiredState = 0; 1888 team_id team = -1; 1889 sem_id sem = -1; 1890 1891 if (!strcmp(argv[0], "realtime")) 1892 realTimeOnly = true; 1893 else if (!strcmp(argv[0], "ready")) 1894 requiredState = B_THREAD_READY; 1895 else if (!strcmp(argv[0], "running")) 1896 requiredState = B_THREAD_RUNNING; 1897 else if (!strcmp(argv[0], "waiting")) { 1898 requiredState = B_THREAD_WAITING; 1899 1900 if (argc > 1) { 1901 sem = strtoul(argv[1], NULL, 0); 1902 if (sem == 0) 1903 kprintf("ignoring invalid semaphore argument.\n"); 1904 } 1905 } else if (!strcmp(argv[0], "calling")) { 1906 if (argc < 2) { 1907 kprintf("Need to give a symbol name or start and end arguments.\n"); 1908 return 0; 1909 } else if (argc == 3) { 1910 callStart = parse_expression(argv[1]); 1911 callEnd = parse_expression(argv[2]); 1912 } else 1913 callSymbol = argv[1]; 1914 1915 calling = true; 1916 } else if (argc > 1) { 1917 team = strtoul(argv[1], NULL, 0); 1918 if (team == 0) 1919 kprintf("ignoring invalid team argument.\n"); 1920 } 1921 1922 print_thread_list_table_head(); 1923 1924 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1925 Thread* thread = it.Next();) { 1926 // filter out threads not matching the search criteria 1927 if ((requiredState && thread->state != requiredState) 1928 || (calling && !arch_debug_contains_call(thread, callSymbol, 1929 callStart, callEnd)) 1930 || (sem > 0 && get_thread_wait_sem(thread) != sem) 1931 || (team > 0 && thread->team->id != team) 1932 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY)) 1933 continue; 1934 1935 _dump_thread_info(thread, true); 1936 } 1937 return 0; 1938 } 1939 1940 1941 // #pragma mark - private kernel API 1942 1943 1944 void 1945 thread_exit(void) 1946 { 1947 cpu_status state; 1948 Thread* thread = thread_get_current_thread(); 1949 Team* team = thread->team; 1950 Team* kernelTeam = team_get_kernel_team(); 1951 status_t status; 1952 struct thread_debug_info debugInfo; 1953 team_id teamID = team->id; 1954 1955 TRACE(("thread %" B_PRId32 " exiting w/return code %#" B_PRIx32 "\n", 1956 thread->id, thread->exit.status)); 1957 1958 if (!are_interrupts_enabled()) 1959 panic("thread_exit() called with interrupts disabled!\n"); 1960 1961 // boost our priority to get this over with 1962 scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY); 1963 1964 if (team != kernelTeam) { 1965 // Delete all user timers associated with the thread. 1966 ThreadLocker threadLocker(thread); 1967 thread->DeleteUserTimers(false); 1968 1969 // detach the thread's user thread 1970 user_thread* userThread = thread->user_thread; 1971 thread->user_thread = NULL; 1972 1973 threadLocker.Unlock(); 1974 1975 // Delete the thread's user thread, if it's not the main thread. If it 1976 // is, we can save the work, since it will be deleted with the team's 1977 // address space. 1978 if (thread != team->main_thread) 1979 team_free_user_thread(team, userThread); 1980 } 1981 1982 // remember the user stack area -- we will delete it below 1983 area_id userStackArea = -1; 1984 if (team->address_space != NULL && thread->user_stack_area >= 0) { 1985 userStackArea = thread->user_stack_area; 1986 thread->user_stack_area = -1; 1987 } 1988 1989 struct job_control_entry *death = NULL; 1990 struct thread_death_entry* threadDeathEntry = NULL; 1991 bool deleteTeam = false; 1992 port_id debuggerPort = -1; 1993 1994 if (team != kernelTeam) { 1995 user_debug_thread_exiting(thread); 1996 1997 if (team->main_thread == thread) { 1998 // The main thread is exiting. Shut down the whole team. 1999 deleteTeam = true; 2000 2001 // kill off all other threads and the user debugger facilities 2002 debuggerPort = team_shutdown_team(team); 2003 2004 // acquire necessary locks, which are: process group lock, kernel 2005 // team lock, parent team lock, and the team lock 2006 team->LockProcessGroup(); 2007 kernelTeam->Lock(); 2008 team->LockTeamAndParent(true); 2009 } else { 2010 threadDeathEntry 2011 = (thread_death_entry*)malloc(sizeof(thread_death_entry)); 2012 2013 // acquire necessary locks, which are: kernel team lock and the team 2014 // lock 2015 kernelTeam->Lock(); 2016 team->Lock(); 2017 } 2018 2019 ThreadLocker threadLocker(thread); 2020 2021 state = disable_interrupts(); 2022 2023 // swap address spaces, to make sure we're running on the kernel's pgdir 2024 vm_swap_address_space(team->address_space, VMAddressSpace::Kernel()); 2025 2026 WriteSpinLocker teamLocker(thread->team_lock); 2027 SpinLocker threadCreationLocker(gThreadCreationLock); 2028 // removing the thread and putting its death entry to the parent 2029 // team needs to be an atomic operation 2030 2031 // remember how long this thread lasted 2032 bigtime_t now = system_time(); 2033 2034 InterruptsSpinLocker signalLocker(kernelTeam->signal_lock); 2035 SpinLocker teamTimeLocker(kernelTeam->time_lock); 2036 SpinLocker threadTimeLocker(thread->time_lock); 2037 2038 thread->kernel_time += now - thread->last_time; 2039 thread->last_time = now; 2040 2041 team->dead_threads_kernel_time += thread->kernel_time; 2042 team->dead_threads_user_time += thread->user_time; 2043 2044 // stop/update thread/team CPU time user timers 2045 if (thread->HasActiveCPUTimeUserTimers() 2046 || team->HasActiveCPUTimeUserTimers()) { 2047 user_timer_stop_cpu_timers(thread, NULL); 2048 } 2049 2050 // deactivate CPU time user timers for the thread 2051 if (thread->HasActiveCPUTimeUserTimers()) 2052 thread->DeactivateCPUTimeUserTimers(); 2053 2054 threadTimeLocker.Unlock(); 2055 2056 // put the thread into the kernel team until it dies 2057 remove_thread_from_team(team, thread); 2058 insert_thread_into_team(kernelTeam, thread); 2059 2060 teamTimeLocker.Unlock(); 2061 signalLocker.Unlock(); 2062 2063 teamLocker.Unlock(); 2064 2065 if (team->death_entry != NULL) { 2066 if (--team->death_entry->remaining_threads == 0) 2067 team->death_entry->condition.NotifyOne(); 2068 } 2069 2070 if (deleteTeam) { 2071 Team* parent = team->parent; 2072 2073 // Set the team job control state to "dead" and detach the job 2074 // control entry from our team struct. 2075 team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL); 2076 death = team->job_control_entry; 2077 team->job_control_entry = NULL; 2078 2079 if (death != NULL) { 2080 death->InitDeadState(); 2081 2082 // team_set_job_control_state() already moved our entry 2083 // into the parent's list. We just check the soft limit of 2084 // death entries. 2085 if (parent->dead_children.count > MAX_DEAD_CHILDREN) { 2086 death = parent->dead_children.entries.RemoveHead(); 2087 parent->dead_children.count--; 2088 } else 2089 death = NULL; 2090 } 2091 2092 threadCreationLocker.Unlock(); 2093 restore_interrupts(state); 2094 2095 threadLocker.Unlock(); 2096 2097 // Get a temporary reference to the team's process group 2098 // -- team_remove_team() removes the team from the group, which 2099 // might destroy it otherwise and we wouldn't be able to unlock it. 2100 ProcessGroup* group = team->group; 2101 group->AcquireReference(); 2102 2103 pid_t foregroundGroupToSignal; 2104 team_remove_team(team, foregroundGroupToSignal); 2105 2106 // unlock everything but the parent team 2107 team->Unlock(); 2108 if (parent != kernelTeam) 2109 kernelTeam->Unlock(); 2110 group->Unlock(); 2111 group->ReleaseReference(); 2112 2113 // Send SIGCHLD to the parent as long as we still have its lock. 2114 // This makes job control state change + signalling atomic. 2115 Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id); 2116 if (team->exit.reason == CLD_EXITED) { 2117 childSignal.SetStatus(team->exit.status); 2118 } else { 2119 childSignal.SetStatus(team->exit.signal); 2120 childSignal.SetSendingUser(team->exit.signaling_user); 2121 } 2122 send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE); 2123 2124 // also unlock the parent 2125 parent->Unlock(); 2126 2127 // If the team was a session leader with controlling TTY, we have 2128 // to send SIGHUP to the foreground process group. 2129 if (foregroundGroupToSignal >= 0) { 2130 Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id); 2131 send_signal_to_process_group(foregroundGroupToSignal, 2132 groupSignal, B_DO_NOT_RESCHEDULE); 2133 } 2134 } else { 2135 // The thread is not the main thread. We store a thread death entry 2136 // for it, unless someone is already waiting for it. 2137 if (threadDeathEntry != NULL 2138 && list_is_empty(&thread->exit.waiters)) { 2139 threadDeathEntry->thread = thread->id; 2140 threadDeathEntry->status = thread->exit.status; 2141 2142 // add entry -- remove an old one, if we hit the limit 2143 list_add_item(&team->dead_threads, threadDeathEntry); 2144 team->dead_threads_count++; 2145 threadDeathEntry = NULL; 2146 2147 if (team->dead_threads_count > MAX_DEAD_THREADS) { 2148 threadDeathEntry 2149 = (thread_death_entry*)list_remove_head_item( 2150 &team->dead_threads); 2151 team->dead_threads_count--; 2152 } 2153 } 2154 2155 threadCreationLocker.Unlock(); 2156 restore_interrupts(state); 2157 2158 threadLocker.Unlock(); 2159 team->Unlock(); 2160 kernelTeam->Unlock(); 2161 } 2162 2163 TRACE(("thread_exit: thread %" B_PRId32 " now a kernel thread!\n", 2164 thread->id)); 2165 } 2166 2167 free(threadDeathEntry); 2168 2169 // delete the team if we're its main thread 2170 if (deleteTeam) { 2171 team_delete_team(team, debuggerPort); 2172 2173 // we need to delete any death entry that made it to here 2174 delete death; 2175 } 2176 2177 ThreadLocker threadLocker(thread); 2178 2179 state = disable_interrupts(); 2180 SpinLocker threadCreationLocker(gThreadCreationLock); 2181 2182 // mark invisible in global hash/list, so it's no longer accessible 2183 WriteSpinLocker threadHashLocker(sThreadHashLock); 2184 thread->visible = false; 2185 sUsedThreads--; 2186 threadHashLocker.Unlock(); 2187 2188 // Stop debugging for this thread 2189 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2190 debugInfo = thread->debug_info; 2191 clear_thread_debug_info(&thread->debug_info, true); 2192 threadDebugInfoLocker.Unlock(); 2193 2194 // Remove the select infos. We notify them a little later. 2195 select_info* selectInfos = thread->select_infos; 2196 thread->select_infos = NULL; 2197 2198 threadCreationLocker.Unlock(); 2199 restore_interrupts(state); 2200 2201 threadLocker.Unlock(); 2202 2203 destroy_thread_debug_info(&debugInfo); 2204 2205 // notify select infos 2206 select_info* info = selectInfos; 2207 while (info != NULL) { 2208 select_sync* sync = info->sync; 2209 2210 notify_select_events(info, B_EVENT_INVALID); 2211 info = info->next; 2212 put_select_sync(sync); 2213 } 2214 2215 // notify listeners 2216 sNotificationService.Notify(THREAD_REMOVED, thread); 2217 2218 // shutdown the thread messaging 2219 2220 status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0); 2221 if (status == B_WOULD_BLOCK) { 2222 // there is data waiting for us, so let us eat it 2223 thread_id sender; 2224 2225 delete_sem(thread->msg.write_sem); 2226 // first, let's remove all possibly waiting writers 2227 receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT); 2228 } else { 2229 // we probably own the semaphore here, and we're the last to do so 2230 delete_sem(thread->msg.write_sem); 2231 } 2232 // now we can safely remove the msg.read_sem 2233 delete_sem(thread->msg.read_sem); 2234 2235 // fill all death entries and delete the sem that others will use to wait 2236 // for us 2237 { 2238 sem_id cachedExitSem = thread->exit.sem; 2239 2240 ThreadLocker threadLocker(thread); 2241 2242 // make sure no one will grab this semaphore again 2243 thread->exit.sem = -1; 2244 2245 // fill all death entries 2246 thread_death_entry* entry = NULL; 2247 while ((entry = (thread_death_entry*)list_get_next_item( 2248 &thread->exit.waiters, entry)) != NULL) { 2249 entry->status = thread->exit.status; 2250 } 2251 2252 threadLocker.Unlock(); 2253 2254 delete_sem(cachedExitSem); 2255 } 2256 2257 // delete the user stack, if this was a user thread 2258 if (!deleteTeam && userStackArea >= 0) { 2259 // We postponed deleting the user stack until now, since this way all 2260 // notifications for the thread's death are out already and all other 2261 // threads waiting for this thread's death and some object on its stack 2262 // will wake up before we (try to) delete the stack area. Of most 2263 // relevance is probably the case where this is the main thread and 2264 // other threads use objects on its stack -- so we want them terminated 2265 // first. 2266 // When the team is deleted, all areas are deleted anyway, so we don't 2267 // need to do that explicitly in that case. 2268 vm_delete_area(teamID, userStackArea, true); 2269 } 2270 2271 // notify the debugger 2272 if (teamID != kernelTeam->id) 2273 user_debug_thread_deleted(teamID, thread->id); 2274 2275 // enqueue in the undertaker list and reschedule for the last time 2276 UndertakerEntry undertakerEntry(thread, teamID); 2277 2278 disable_interrupts(); 2279 2280 SpinLocker schedulerLocker(thread->scheduler_lock); 2281 2282 SpinLocker undertakerLocker(sUndertakerLock); 2283 sUndertakerEntries.Add(&undertakerEntry); 2284 sUndertakerCondition.NotifyOne(); 2285 undertakerLocker.Unlock(); 2286 2287 scheduler_reschedule(THREAD_STATE_FREE_ON_RESCHED); 2288 2289 panic("never can get here\n"); 2290 } 2291 2292 2293 /*! Called in the interrupt handler code when a thread enters 2294 the kernel for any reason. 2295 Only tracks time for now. 2296 Interrupts are disabled. 2297 */ 2298 void 2299 thread_at_kernel_entry(bigtime_t now) 2300 { 2301 Thread *thread = thread_get_current_thread(); 2302 2303 TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32 "\n", thread->id)); 2304 2305 // track user time 2306 SpinLocker threadTimeLocker(thread->time_lock); 2307 thread->user_time += now - thread->last_time; 2308 thread->last_time = now; 2309 thread->in_kernel = true; 2310 threadTimeLocker.Unlock(); 2311 } 2312 2313 2314 /*! Called whenever a thread exits kernel space to user space. 2315 Tracks time, handles signals, ... 2316 Interrupts must be enabled. When the function returns, interrupts will be 2317 disabled. 2318 The function may not return. This e.g. happens when the thread has received 2319 a deadly signal. 2320 */ 2321 void 2322 thread_at_kernel_exit(void) 2323 { 2324 Thread *thread = thread_get_current_thread(); 2325 2326 TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32 "\n", thread->id)); 2327 2328 handle_signals(thread); 2329 2330 disable_interrupts(); 2331 2332 // track kernel time 2333 bigtime_t now = system_time(); 2334 SpinLocker threadTimeLocker(thread->time_lock); 2335 thread->in_kernel = false; 2336 thread->kernel_time += now - thread->last_time; 2337 thread->last_time = now; 2338 } 2339 2340 2341 /*! The quick version of thread_kernel_exit(), in case no signals are pending 2342 and no debugging shall be done. 2343 Interrupts must be disabled. 2344 */ 2345 void 2346 thread_at_kernel_exit_no_signals(void) 2347 { 2348 Thread *thread = thread_get_current_thread(); 2349 2350 TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32 "\n", 2351 thread->id)); 2352 2353 // track kernel time 2354 bigtime_t now = system_time(); 2355 SpinLocker threadTimeLocker(thread->time_lock); 2356 thread->in_kernel = false; 2357 thread->kernel_time += now - thread->last_time; 2358 thread->last_time = now; 2359 } 2360 2361 2362 void 2363 thread_reset_for_exec(void) 2364 { 2365 Thread* thread = thread_get_current_thread(); 2366 2367 ThreadLocker threadLocker(thread); 2368 2369 // delete user-defined timers 2370 thread->DeleteUserTimers(true); 2371 2372 // cancel pre-defined timer 2373 if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID)) 2374 timer->Cancel(); 2375 2376 // reset user_thread and user stack 2377 thread->user_thread = NULL; 2378 thread->user_stack_area = -1; 2379 thread->user_stack_base = 0; 2380 thread->user_stack_size = 0; 2381 2382 // reset signals 2383 thread->ResetSignalsOnExec(); 2384 2385 // reset thread CPU time clock 2386 InterruptsSpinLocker timeLocker(thread->time_lock); 2387 thread->cpu_clock_offset = -thread->CPUTime(false); 2388 } 2389 2390 2391 thread_id 2392 allocate_thread_id() 2393 { 2394 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 2395 2396 // find the next unused ID 2397 thread_id id; 2398 do { 2399 id = sNextThreadID++; 2400 2401 // deal with integer overflow 2402 if (sNextThreadID < 0) 2403 sNextThreadID = 2; 2404 2405 // check whether the ID is already in use 2406 } while (sThreadHash.Lookup(id, false) != NULL); 2407 2408 return id; 2409 } 2410 2411 2412 thread_id 2413 peek_next_thread_id() 2414 { 2415 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 2416 return sNextThreadID; 2417 } 2418 2419 2420 /*! Yield the CPU to other threads. 2421 Thread will continue to run, if there's no other thread in ready 2422 state, and if it has a higher priority than the other ready threads, it 2423 still has a good chance to continue. 2424 */ 2425 void 2426 thread_yield(void) 2427 { 2428 Thread *thread = thread_get_current_thread(); 2429 if (thread == NULL) 2430 return; 2431 2432 InterruptsSpinLocker _(thread->scheduler_lock); 2433 2434 thread->has_yielded = true; 2435 scheduler_reschedule(B_THREAD_READY); 2436 } 2437 2438 2439 void 2440 thread_map(void (*function)(Thread* thread, void* data), void* data) 2441 { 2442 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 2443 2444 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 2445 Thread* thread = it.Next();) { 2446 function(thread, data); 2447 } 2448 } 2449 2450 2451 /*! Kernel private thread creation function. 2452 */ 2453 thread_id 2454 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority, 2455 void *arg, team_id team) 2456 { 2457 return thread_create_thread( 2458 ThreadCreationAttributes(function, name, priority, arg, team), 2459 true); 2460 } 2461 2462 2463 status_t 2464 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout, 2465 status_t *_returnCode) 2466 { 2467 if (id < 0) 2468 return B_BAD_THREAD_ID; 2469 2470 // get the thread, queue our death entry, and fetch the semaphore we have to 2471 // wait on 2472 sem_id exitSem = B_BAD_THREAD_ID; 2473 struct thread_death_entry death; 2474 2475 Thread* thread = Thread::GetAndLock(id); 2476 if (thread != NULL) { 2477 // remember the semaphore we have to wait on and place our death entry 2478 exitSem = thread->exit.sem; 2479 if (exitSem >= 0) 2480 list_add_link_to_head(&thread->exit.waiters, &death); 2481 2482 thread->UnlockAndReleaseReference(); 2483 2484 if (exitSem < 0) 2485 return B_BAD_THREAD_ID; 2486 } else { 2487 // we couldn't find this thread -- maybe it's already gone, and we'll 2488 // find its death entry in our team 2489 Team* team = thread_get_current_thread()->team; 2490 TeamLocker teamLocker(team); 2491 2492 // check the child death entries first (i.e. main threads of child 2493 // teams) 2494 bool deleteEntry; 2495 job_control_entry* freeDeath 2496 = team_get_death_entry(team, id, &deleteEntry); 2497 if (freeDeath != NULL) { 2498 death.status = freeDeath->status; 2499 if (deleteEntry) 2500 delete freeDeath; 2501 } else { 2502 // check the thread death entries of the team (non-main threads) 2503 thread_death_entry* threadDeathEntry = NULL; 2504 while ((threadDeathEntry = (thread_death_entry*)list_get_next_item( 2505 &team->dead_threads, threadDeathEntry)) != NULL) { 2506 if (threadDeathEntry->thread == id) { 2507 list_remove_item(&team->dead_threads, threadDeathEntry); 2508 team->dead_threads_count--; 2509 death.status = threadDeathEntry->status; 2510 free(threadDeathEntry); 2511 break; 2512 } 2513 } 2514 2515 if (threadDeathEntry == NULL) 2516 return B_BAD_THREAD_ID; 2517 } 2518 2519 // we found the thread's death entry in our team 2520 if (_returnCode) 2521 *_returnCode = death.status; 2522 2523 return B_OK; 2524 } 2525 2526 // we need to wait for the death of the thread 2527 2528 resume_thread(id); 2529 // make sure we don't wait forever on a suspended thread 2530 2531 status_t status = acquire_sem_etc(exitSem, 1, flags, timeout); 2532 2533 if (status == B_OK) { 2534 // this should never happen as the thread deletes the semaphore on exit 2535 panic("could acquire exit_sem for thread %" B_PRId32 "\n", id); 2536 } else if (status == B_BAD_SEM_ID) { 2537 // this is the way the thread normally exits 2538 status = B_OK; 2539 } else { 2540 // We were probably interrupted or the timeout occurred; we need to 2541 // remove our death entry now. 2542 thread = Thread::GetAndLock(id); 2543 if (thread != NULL) { 2544 list_remove_link(&death); 2545 thread->UnlockAndReleaseReference(); 2546 } else { 2547 // The thread is already gone, so we need to wait uninterruptibly 2548 // for its exit semaphore to make sure our death entry stays valid. 2549 // It won't take long, since the thread is apparently already in the 2550 // middle of the cleanup. 2551 acquire_sem(exitSem); 2552 status = B_OK; 2553 } 2554 } 2555 2556 if (status == B_OK && _returnCode != NULL) 2557 *_returnCode = death.status; 2558 2559 return status; 2560 } 2561 2562 2563 status_t 2564 select_thread(int32 id, struct select_info* info, bool kernel) 2565 { 2566 // get and lock the thread 2567 Thread* thread = Thread::GetAndLock(id); 2568 if (thread == NULL) 2569 return B_BAD_THREAD_ID; 2570 BReference<Thread> threadReference(thread, true); 2571 ThreadLocker threadLocker(thread, true); 2572 2573 // We support only B_EVENT_INVALID at the moment. 2574 info->selected_events &= B_EVENT_INVALID; 2575 2576 // add info to list 2577 if (info->selected_events != 0) { 2578 info->next = thread->select_infos; 2579 thread->select_infos = info; 2580 2581 // we need a sync reference 2582 atomic_add(&info->sync->ref_count, 1); 2583 } 2584 2585 return B_OK; 2586 } 2587 2588 2589 status_t 2590 deselect_thread(int32 id, struct select_info* info, bool kernel) 2591 { 2592 // get and lock the thread 2593 Thread* thread = Thread::GetAndLock(id); 2594 if (thread == NULL) 2595 return B_BAD_THREAD_ID; 2596 BReference<Thread> threadReference(thread, true); 2597 ThreadLocker threadLocker(thread, true); 2598 2599 // remove info from list 2600 select_info** infoLocation = &thread->select_infos; 2601 while (*infoLocation != NULL && *infoLocation != info) 2602 infoLocation = &(*infoLocation)->next; 2603 2604 if (*infoLocation != info) 2605 return B_OK; 2606 2607 *infoLocation = info->next; 2608 2609 threadLocker.Unlock(); 2610 2611 // surrender sync reference 2612 put_select_sync(info->sync); 2613 2614 return B_OK; 2615 } 2616 2617 2618 int32 2619 thread_max_threads(void) 2620 { 2621 return sMaxThreads; 2622 } 2623 2624 2625 int32 2626 thread_used_threads(void) 2627 { 2628 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 2629 return sUsedThreads; 2630 } 2631 2632 2633 /*! Returns a user-readable string for a thread state. 2634 Only for use in the kernel debugger. 2635 */ 2636 const char* 2637 thread_state_to_text(Thread* thread, int32 state) 2638 { 2639 return state_to_text(thread, state); 2640 } 2641 2642 2643 int32 2644 thread_get_io_priority(thread_id id) 2645 { 2646 Thread* thread = Thread::GetAndLock(id); 2647 if (thread == NULL) 2648 return B_BAD_THREAD_ID; 2649 BReference<Thread> threadReference(thread, true); 2650 ThreadLocker threadLocker(thread, true); 2651 2652 int32 priority = thread->io_priority; 2653 if (priority < 0) { 2654 // negative I/O priority means using the (CPU) priority 2655 priority = thread->priority; 2656 } 2657 2658 return priority; 2659 } 2660 2661 2662 void 2663 thread_set_io_priority(int32 priority) 2664 { 2665 Thread* thread = thread_get_current_thread(); 2666 ThreadLocker threadLocker(thread); 2667 2668 thread->io_priority = priority; 2669 } 2670 2671 2672 status_t 2673 thread_init(kernel_args *args) 2674 { 2675 TRACE(("thread_init: entry\n")); 2676 2677 // create the thread hash table 2678 new(&sThreadHash) ThreadHashTable(); 2679 if (sThreadHash.Init(128) != B_OK) 2680 panic("thread_init(): failed to init thread hash table!"); 2681 2682 // create the thread structure object cache 2683 sThreadCache = create_object_cache("threads", sizeof(Thread), 64, NULL, 2684 NULL, NULL); 2685 // Note: The x86 port requires 64 byte alignment of thread structures. 2686 if (sThreadCache == NULL) 2687 panic("thread_init(): failed to allocate thread object cache!"); 2688 2689 if (arch_thread_init(args) < B_OK) 2690 panic("arch_thread_init() failed!\n"); 2691 2692 // skip all thread IDs including B_SYSTEM_TEAM, which is reserved 2693 sNextThreadID = B_SYSTEM_TEAM + 1; 2694 2695 // create an idle thread for each cpu 2696 for (uint32 i = 0; i < args->num_cpus; i++) { 2697 Thread *thread; 2698 area_info info; 2699 char name[64]; 2700 2701 sprintf(name, "idle thread %" B_PRIu32, i + 1); 2702 thread = new(&sIdleThreads[i]) Thread(name, 2703 i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]); 2704 if (thread == NULL || thread->Init(true) != B_OK) { 2705 panic("error creating idle thread struct\n"); 2706 return B_NO_MEMORY; 2707 } 2708 2709 gCPU[i].running_thread = thread; 2710 2711 thread->team = team_get_kernel_team(); 2712 thread->priority = B_IDLE_PRIORITY; 2713 thread->state = B_THREAD_RUNNING; 2714 sprintf(name, "idle thread %" B_PRIu32 " kstack", i + 1); 2715 thread->kernel_stack_area = find_area(name); 2716 2717 if (get_area_info(thread->kernel_stack_area, &info) != B_OK) 2718 panic("error finding idle kstack area\n"); 2719 2720 thread->kernel_stack_base = (addr_t)info.address; 2721 thread->kernel_stack_top = thread->kernel_stack_base + info.size; 2722 2723 thread->visible = true; 2724 insert_thread_into_team(thread->team, thread); 2725 2726 scheduler_on_thread_init(thread); 2727 } 2728 sUsedThreads = args->num_cpus; 2729 2730 // init the notification service 2731 new(&sNotificationService) ThreadNotificationService(); 2732 2733 sNotificationService.Register(); 2734 2735 // start the undertaker thread 2736 new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>(); 2737 sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries"); 2738 2739 thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker", 2740 B_DISPLAY_PRIORITY, NULL); 2741 if (undertakerThread < 0) 2742 panic("Failed to create undertaker thread!"); 2743 resume_thread(undertakerThread); 2744 2745 // set up some debugger commands 2746 add_debugger_command_etc("threads", &dump_thread_list, "List all threads", 2747 "[ <team> ]\n" 2748 "Prints a list of all existing threads, or, if a team ID is given,\n" 2749 "all threads of the specified team.\n" 2750 " <team> - The ID of the team whose threads shall be listed.\n", 0); 2751 add_debugger_command_etc("ready", &dump_thread_list, 2752 "List all ready threads", 2753 "\n" 2754 "Prints a list of all threads in ready state.\n", 0); 2755 add_debugger_command_etc("running", &dump_thread_list, 2756 "List all running threads", 2757 "\n" 2758 "Prints a list of all threads in running state.\n", 0); 2759 add_debugger_command_etc("waiting", &dump_thread_list, 2760 "List all waiting threads (optionally for a specific semaphore)", 2761 "[ <sem> ]\n" 2762 "Prints a list of all threads in waiting state. If a semaphore is\n" 2763 "specified, only the threads waiting on that semaphore are listed.\n" 2764 " <sem> - ID of the semaphore.\n", 0); 2765 add_debugger_command_etc("realtime", &dump_thread_list, 2766 "List all realtime threads", 2767 "\n" 2768 "Prints a list of all threads with realtime priority.\n", 0); 2769 add_debugger_command_etc("thread", &dump_thread_info, 2770 "Dump info about a particular thread", 2771 "[ -s ] ( <id> | <address> | <name> )*\n" 2772 "Prints information about the specified thread. If no argument is\n" 2773 "given the current thread is selected.\n" 2774 " -s - Print info in compact table form (like \"threads\").\n" 2775 " <id> - The ID of the thread.\n" 2776 " <address> - The address of the thread structure.\n" 2777 " <name> - The thread's name.\n", 0); 2778 add_debugger_command_etc("calling", &dump_thread_list, 2779 "Show all threads that have a specific address in their call chain", 2780 "{ <symbol-pattern> | <start> <end> }\n", 0); 2781 add_debugger_command_etc("unreal", &make_thread_unreal, 2782 "Set realtime priority threads to normal priority", 2783 "[ <id> ]\n" 2784 "Sets the priority of all realtime threads or, if given, the one\n" 2785 "with the specified ID to \"normal\" priority.\n" 2786 " <id> - The ID of the thread.\n", 0); 2787 add_debugger_command_etc("suspend", &make_thread_suspended, 2788 "Suspend a thread", 2789 "[ <id> ]\n" 2790 "Suspends the thread with the given ID. If no ID argument is given\n" 2791 "the current thread is selected.\n" 2792 " <id> - The ID of the thread.\n", 0); 2793 add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread", 2794 "<id>\n" 2795 "Resumes the specified thread, if it is currently suspended.\n" 2796 " <id> - The ID of the thread.\n", 0); 2797 add_debugger_command_etc("drop", &drop_into_debugger, 2798 "Drop a thread into the userland debugger", 2799 "<id>\n" 2800 "Drops the specified (userland) thread into the userland debugger\n" 2801 "after leaving the kernel debugger.\n" 2802 " <id> - The ID of the thread.\n", 0); 2803 add_debugger_command_etc("priority", &set_thread_prio, 2804 "Set a thread's priority", 2805 "<priority> [ <id> ]\n" 2806 "Sets the priority of the thread with the specified ID to the given\n" 2807 "priority. If no thread ID is given, the current thread is selected.\n" 2808 " <priority> - The thread's new priority (0 - 120)\n" 2809 " <id> - The ID of the thread.\n", 0); 2810 2811 return B_OK; 2812 } 2813 2814 2815 status_t 2816 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum) 2817 { 2818 // set up the cpu pointer in the not yet initialized per-cpu idle thread 2819 // so that get_current_cpu and friends will work, which is crucial for 2820 // a lot of low level routines 2821 sIdleThreads[cpuNum].cpu = &gCPU[cpuNum]; 2822 arch_thread_set_current_thread(&sIdleThreads[cpuNum]); 2823 return B_OK; 2824 } 2825 2826 2827 // #pragma mark - thread blocking API 2828 2829 2830 static status_t 2831 thread_block_timeout(timer* timer) 2832 { 2833 Thread* thread = (Thread*)timer->user_data; 2834 thread_unblock(thread, B_TIMED_OUT); 2835 2836 return B_HANDLED_INTERRUPT; 2837 } 2838 2839 2840 /*! Blocks the current thread. 2841 2842 The thread is blocked until someone else unblock it. Must be called after a 2843 call to thread_prepare_to_block(). If the thread has already been unblocked 2844 after the previous call to thread_prepare_to_block(), this function will 2845 return immediately. Cf. the documentation of thread_prepare_to_block() for 2846 more details. 2847 2848 The caller must hold the scheduler lock. 2849 2850 \param thread The current thread. 2851 \return The error code passed to the unblocking function. thread_interrupt() 2852 uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was 2853 successful while another error code indicates a failure (what that means 2854 depends on the client code). 2855 */ 2856 static inline status_t 2857 thread_block_locked(Thread* thread) 2858 { 2859 if (thread->wait.status == 1) { 2860 // check for signals, if interruptible 2861 if (thread_is_interrupted(thread, thread->wait.flags)) { 2862 thread->wait.status = B_INTERRUPTED; 2863 } else 2864 scheduler_reschedule(B_THREAD_WAITING); 2865 } 2866 2867 return thread->wait.status; 2868 } 2869 2870 2871 /*! Blocks the current thread. 2872 2873 The function acquires the scheduler lock and calls thread_block_locked(). 2874 See there for more information. 2875 */ 2876 status_t 2877 thread_block() 2878 { 2879 InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock); 2880 return thread_block_locked(thread_get_current_thread()); 2881 } 2882 2883 2884 /*! Blocks the current thread with a timeout. 2885 2886 The current thread is blocked until someone else unblock it or the specified 2887 timeout occurs. Must be called after a call to thread_prepare_to_block(). If 2888 the thread has already been unblocked after the previous call to 2889 thread_prepare_to_block(), this function will return immediately. See 2890 thread_prepare_to_block() for more details. 2891 2892 The caller must not hold the scheduler lock. 2893 2894 \param timeoutFlags The standard timeout flags: 2895 - \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait. 2896 - \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when 2897 the timeout shall occur. 2898 - \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT 2899 is specified, too. Specifies that \a timeout is a real time, not a 2900 system time. 2901 If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are 2902 specified, an infinite timeout is implied and the function behaves like 2903 thread_block_locked(). 2904 \return The error code passed to the unblocking function. thread_interrupt() 2905 uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is 2906 returned. By convention \c B_OK means that the wait was successful while 2907 another error code indicates a failure (what that means depends on the 2908 client code). 2909 */ 2910 status_t 2911 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout) 2912 { 2913 Thread* thread = thread_get_current_thread(); 2914 2915 InterruptsSpinLocker locker(thread->scheduler_lock); 2916 2917 if (thread->wait.status != 1) 2918 return thread->wait.status; 2919 2920 bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) 2921 && timeout != B_INFINITE_TIMEOUT; 2922 2923 if (useTimer) { 2924 // Timer flags: absolute/relative. 2925 uint32 timerFlags; 2926 if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) { 2927 timerFlags = B_ONE_SHOT_RELATIVE_TIMER; 2928 } else { 2929 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER; 2930 if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0) 2931 timerFlags |= B_TIMER_REAL_TIME_BASE; 2932 } 2933 2934 // install the timer 2935 thread->wait.unblock_timer.user_data = thread; 2936 add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout, 2937 timerFlags); 2938 } 2939 2940 // block 2941 status_t error = thread_block_locked(thread); 2942 2943 locker.Unlock(); 2944 2945 // cancel timer, if it didn't fire 2946 if (error != B_TIMED_OUT && useTimer) 2947 cancel_timer(&thread->wait.unblock_timer); 2948 2949 return error; 2950 } 2951 2952 2953 /*! Unblocks a thread. 2954 2955 Acquires the scheduler lock and calls thread_unblock_locked(). 2956 See there for more information. 2957 */ 2958 void 2959 thread_unblock(Thread* thread, status_t status) 2960 { 2961 InterruptsSpinLocker locker(thread->scheduler_lock); 2962 thread_unblock_locked(thread, status); 2963 } 2964 2965 2966 /*! Unblocks a userland-blocked thread. 2967 The caller must not hold any locks. 2968 */ 2969 static status_t 2970 user_unblock_thread(thread_id threadID, status_t status) 2971 { 2972 // get the thread 2973 Thread* thread = Thread::GetAndLock(threadID); 2974 if (thread == NULL) 2975 return B_BAD_THREAD_ID; 2976 BReference<Thread> threadReference(thread, true); 2977 ThreadLocker threadLocker(thread, true); 2978 2979 if (thread->user_thread == NULL) 2980 return B_NOT_ALLOWED; 2981 2982 InterruptsSpinLocker locker(thread->scheduler_lock); 2983 2984 set_ac(); 2985 if (thread->user_thread->wait_status > 0) { 2986 thread->user_thread->wait_status = status; 2987 clear_ac(); 2988 2989 // Even if the user_thread->wait_status was > 0, it may be the 2990 // case that this thread is actually blocked on something else. 2991 if (thread->wait.status > 0 2992 && thread->wait.type == THREAD_BLOCK_TYPE_USER) { 2993 thread_unblock_locked(thread, status); 2994 } 2995 } else 2996 clear_ac(); 2997 2998 return B_OK; 2999 } 3000 3001 3002 static bool 3003 thread_check_permissions(const Thread* currentThread, const Thread* thread, 3004 bool kernel) 3005 { 3006 if (kernel) 3007 return true; 3008 3009 if (thread->team->id == team_get_kernel_team_id()) 3010 return false; 3011 3012 if (thread->team == currentThread->team 3013 || currentThread->team->effective_uid == 0 3014 || thread->team->real_uid == currentThread->team->real_uid) 3015 return true; 3016 3017 return false; 3018 } 3019 3020 3021 static status_t 3022 thread_send_signal(thread_id id, uint32 number, int32 signalCode, 3023 int32 errorCode, bool kernel) 3024 { 3025 if (id <= 0) 3026 return B_BAD_VALUE; 3027 3028 Thread* currentThread = thread_get_current_thread(); 3029 Thread* thread = Thread::Get(id); 3030 if (thread == NULL) 3031 return B_BAD_THREAD_ID; 3032 BReference<Thread> threadReference(thread, true); 3033 3034 // check whether sending the signal is allowed 3035 if (!thread_check_permissions(currentThread, thread, kernel)) 3036 return B_NOT_ALLOWED; 3037 3038 Signal signal(number, signalCode, errorCode, currentThread->team->id); 3039 return send_signal_to_thread(thread, signal, 0); 3040 } 3041 3042 3043 // #pragma mark - public kernel API 3044 3045 3046 void 3047 exit_thread(status_t returnValue) 3048 { 3049 Thread *thread = thread_get_current_thread(); 3050 Team* team = thread->team; 3051 3052 thread->exit.status = returnValue; 3053 3054 // if called from a kernel thread, we don't deliver the signal, 3055 // we just exit directly to keep the user space behaviour of 3056 // this function 3057 if (team != team_get_kernel_team()) { 3058 // If this is its main thread, set the team's exit status. 3059 if (thread == team->main_thread) { 3060 TeamLocker teamLocker(team); 3061 3062 if (!team->exit.initialized) { 3063 team->exit.reason = CLD_EXITED; 3064 team->exit.signal = 0; 3065 team->exit.signaling_user = 0; 3066 team->exit.status = returnValue; 3067 team->exit.initialized = true; 3068 } 3069 3070 teamLocker.Unlock(); 3071 } 3072 3073 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id); 3074 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 3075 } else 3076 thread_exit(); 3077 } 3078 3079 3080 static status_t 3081 thread_kill_thread(thread_id id, bool kernel) 3082 { 3083 return thread_send_signal(id, SIGKILLTHR, SI_USER, B_OK, kernel); 3084 } 3085 3086 3087 status_t 3088 kill_thread(thread_id id) 3089 { 3090 return thread_kill_thread(id, true); 3091 } 3092 3093 3094 status_t 3095 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize) 3096 { 3097 return send_data_etc(thread, code, buffer, bufferSize, 0); 3098 } 3099 3100 3101 int32 3102 receive_data(thread_id *sender, void *buffer, size_t bufferSize) 3103 { 3104 return receive_data_etc(sender, buffer, bufferSize, 0); 3105 } 3106 3107 3108 static bool 3109 thread_has_data(thread_id id, bool kernel) 3110 { 3111 Thread* currentThread = thread_get_current_thread(); 3112 Thread* thread; 3113 BReference<Thread> threadReference; 3114 if (id == currentThread->id) { 3115 thread = currentThread; 3116 } else { 3117 thread = Thread::Get(id); 3118 if (thread == NULL) 3119 return false; 3120 3121 threadReference.SetTo(thread, true); 3122 } 3123 3124 if (!kernel && thread->team != currentThread->team) 3125 return false; 3126 3127 int32 count; 3128 if (get_sem_count(thread->msg.read_sem, &count) != B_OK) 3129 return false; 3130 3131 return count == 0 ? false : true; 3132 } 3133 3134 3135 bool 3136 has_data(thread_id thread) 3137 { 3138 return thread_has_data(thread, true); 3139 } 3140 3141 3142 status_t 3143 _get_thread_info(thread_id id, thread_info *info, size_t size) 3144 { 3145 if (info == NULL || size != sizeof(thread_info) || id < B_OK) 3146 return B_BAD_VALUE; 3147 3148 // get the thread 3149 Thread* thread = Thread::GetAndLock(id); 3150 if (thread == NULL) 3151 return B_BAD_THREAD_ID; 3152 BReference<Thread> threadReference(thread, true); 3153 ThreadLocker threadLocker(thread, true); 3154 3155 // fill the info -- also requires the scheduler lock to be held 3156 InterruptsSpinLocker locker(thread->scheduler_lock); 3157 3158 fill_thread_info(thread, info, size); 3159 3160 return B_OK; 3161 } 3162 3163 3164 status_t 3165 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info, 3166 size_t size) 3167 { 3168 if (info == NULL || size != sizeof(thread_info) || teamID < 0) 3169 return B_BAD_VALUE; 3170 3171 int32 lastID = *_cookie; 3172 3173 // get the team 3174 Team* team = Team::GetAndLock(teamID); 3175 if (team == NULL) 3176 return B_BAD_VALUE; 3177 BReference<Team> teamReference(team, true); 3178 TeamLocker teamLocker(team, true); 3179 3180 Thread* thread = NULL; 3181 3182 if (lastID == 0) { 3183 // We start with the main thread 3184 thread = team->main_thread; 3185 } else { 3186 // Find the one thread with an ID greater than ours (as long as the IDs 3187 // don't wrap they are always sorted from highest to lowest). 3188 // TODO: That is broken not only when the IDs wrap, but also for the 3189 // kernel team, to which threads are added when they are dying. 3190 for (Thread* next = team->thread_list; next != NULL; 3191 next = next->team_next) { 3192 if (next->id <= lastID) 3193 break; 3194 3195 thread = next; 3196 } 3197 } 3198 3199 if (thread == NULL) 3200 return B_BAD_VALUE; 3201 3202 lastID = thread->id; 3203 *_cookie = lastID; 3204 3205 ThreadLocker threadLocker(thread); 3206 InterruptsSpinLocker locker(thread->scheduler_lock); 3207 3208 fill_thread_info(thread, info, size); 3209 3210 return B_OK; 3211 } 3212 3213 3214 thread_id 3215 find_thread(const char* name) 3216 { 3217 if (name == NULL) 3218 return thread_get_current_thread_id(); 3219 3220 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 3221 3222 // Scanning the whole hash with the thread hash lock held isn't exactly 3223 // cheap, but since this function is probably used very rarely, and we 3224 // only need a read lock, it's probably acceptable. 3225 3226 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 3227 Thread* thread = it.Next();) { 3228 if (!thread->visible) 3229 continue; 3230 3231 if (strcmp(thread->name, name) == 0) 3232 return thread->id; 3233 } 3234 3235 return B_NAME_NOT_FOUND; 3236 } 3237 3238 3239 status_t 3240 rename_thread(thread_id id, const char* name) 3241 { 3242 if (name == NULL) 3243 return B_BAD_VALUE; 3244 3245 // get the thread 3246 Thread* thread = Thread::GetAndLock(id); 3247 if (thread == NULL) 3248 return B_BAD_THREAD_ID; 3249 BReference<Thread> threadReference(thread, true); 3250 ThreadLocker threadLocker(thread, true); 3251 3252 // check whether the operation is allowed 3253 if (thread->team != thread_get_current_thread()->team) 3254 return B_NOT_ALLOWED; 3255 3256 strlcpy(thread->name, name, B_OS_NAME_LENGTH); 3257 3258 team_id teamID = thread->team->id; 3259 3260 threadLocker.Unlock(); 3261 3262 // notify listeners 3263 sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id); 3264 // don't pass the thread structure, as it's unsafe, if it isn't ours 3265 3266 return B_OK; 3267 } 3268 3269 3270 static status_t 3271 thread_set_thread_priority(thread_id id, int32 priority, bool kernel) 3272 { 3273 // make sure the passed in priority is within bounds 3274 if (priority > THREAD_MAX_SET_PRIORITY) 3275 priority = THREAD_MAX_SET_PRIORITY; 3276 if (priority < THREAD_MIN_SET_PRIORITY) 3277 priority = THREAD_MIN_SET_PRIORITY; 3278 3279 // get the thread 3280 Thread* thread = Thread::GetAndLock(id); 3281 if (thread == NULL) 3282 return B_BAD_THREAD_ID; 3283 BReference<Thread> threadReference(thread, true); 3284 ThreadLocker threadLocker(thread, true); 3285 3286 // check whether the change is allowed 3287 if (thread_is_idle_thread(thread) || !thread_check_permissions( 3288 thread_get_current_thread(), thread, kernel)) 3289 return B_NOT_ALLOWED; 3290 3291 return scheduler_set_thread_priority(thread, priority); 3292 } 3293 3294 3295 status_t 3296 set_thread_priority(thread_id id, int32 priority) 3297 { 3298 return thread_set_thread_priority(id, priority, true); 3299 } 3300 3301 3302 status_t 3303 snooze_etc(bigtime_t timeout, int timebase, uint32 flags) 3304 { 3305 return common_snooze_etc(timeout, timebase, flags, NULL); 3306 } 3307 3308 3309 /*! snooze() for internal kernel use only; doesn't interrupt on signals. */ 3310 status_t 3311 snooze(bigtime_t timeout) 3312 { 3313 return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT); 3314 } 3315 3316 3317 /*! snooze_until() for internal kernel use only; doesn't interrupt on 3318 signals. 3319 */ 3320 status_t 3321 snooze_until(bigtime_t timeout, int timebase) 3322 { 3323 return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT); 3324 } 3325 3326 3327 status_t 3328 wait_for_thread(thread_id thread, status_t *_returnCode) 3329 { 3330 return wait_for_thread_etc(thread, 0, 0, _returnCode); 3331 } 3332 3333 3334 static status_t 3335 thread_suspend_thread(thread_id id, bool kernel) 3336 { 3337 return thread_send_signal(id, SIGSTOP, SI_USER, B_OK, kernel); 3338 } 3339 3340 3341 status_t 3342 suspend_thread(thread_id id) 3343 { 3344 return thread_suspend_thread(id, true); 3345 } 3346 3347 3348 static status_t 3349 thread_resume_thread(thread_id id, bool kernel) 3350 { 3351 // Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains 3352 // compatibility to BeOS which documents the combination of suspend_thread() 3353 // and resume_thread() to interrupt threads waiting on semaphores. 3354 return thread_send_signal(id, SIGNAL_CONTINUE_THREAD, SI_USER, B_OK, kernel); 3355 } 3356 3357 3358 status_t 3359 resume_thread(thread_id id) 3360 { 3361 return thread_resume_thread(id, true); 3362 } 3363 3364 3365 thread_id 3366 spawn_kernel_thread(thread_func function, const char *name, int32 priority, 3367 void *arg) 3368 { 3369 return thread_create_thread( 3370 ThreadCreationAttributes(function, name, priority, arg), 3371 true); 3372 } 3373 3374 3375 int 3376 getrlimit(int resource, struct rlimit * rlp) 3377 { 3378 status_t error = common_getrlimit(resource, rlp); 3379 if (error != B_OK) { 3380 errno = error; 3381 return -1; 3382 } 3383 3384 return 0; 3385 } 3386 3387 3388 int 3389 setrlimit(int resource, const struct rlimit * rlp) 3390 { 3391 status_t error = common_setrlimit(resource, rlp); 3392 if (error != B_OK) { 3393 errno = error; 3394 return -1; 3395 } 3396 3397 return 0; 3398 } 3399 3400 3401 // #pragma mark - syscalls 3402 3403 3404 void 3405 _user_exit_thread(status_t returnValue) 3406 { 3407 exit_thread(returnValue); 3408 } 3409 3410 3411 status_t 3412 _user_kill_thread(thread_id thread) 3413 { 3414 return thread_kill_thread(thread, false); 3415 } 3416 3417 3418 status_t 3419 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int)) 3420 { 3421 // check the cancel function 3422 if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction)) 3423 return B_BAD_VALUE; 3424 3425 // get and lock the thread 3426 Thread* thread = Thread::GetAndLock(threadID); 3427 if (thread == NULL) 3428 return B_BAD_THREAD_ID; 3429 BReference<Thread> threadReference(thread, true); 3430 ThreadLocker threadLocker(thread, true); 3431 3432 // only threads of the same team can be canceled 3433 if (thread->team != thread_get_current_thread()->team) 3434 return B_NOT_ALLOWED; 3435 3436 // set the cancel function 3437 thread->cancel_function = cancelFunction; 3438 3439 // send the cancellation signal to the thread 3440 InterruptsReadSpinLocker teamLocker(thread->team_lock); 3441 SpinLocker locker(thread->team->signal_lock); 3442 return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0); 3443 } 3444 3445 3446 status_t 3447 _user_resume_thread(thread_id thread) 3448 { 3449 return thread_resume_thread(thread, false); 3450 } 3451 3452 3453 status_t 3454 _user_suspend_thread(thread_id thread) 3455 { 3456 return thread_suspend_thread(thread, false); 3457 } 3458 3459 3460 status_t 3461 _user_rename_thread(thread_id thread, const char *userName) 3462 { 3463 char name[B_OS_NAME_LENGTH]; 3464 3465 if (!IS_USER_ADDRESS(userName) 3466 || userName == NULL 3467 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 3468 return B_BAD_ADDRESS; 3469 3470 // rename_thread() forbids thread renames across teams, so we don't 3471 // need a "kernel" flag here. 3472 return rename_thread(thread, name); 3473 } 3474 3475 3476 int32 3477 _user_set_thread_priority(thread_id thread, int32 newPriority) 3478 { 3479 return thread_set_thread_priority(thread, newPriority, false); 3480 } 3481 3482 3483 thread_id 3484 _user_spawn_thread(thread_creation_attributes* userAttributes) 3485 { 3486 // copy the userland structure to the kernel 3487 char nameBuffer[B_OS_NAME_LENGTH]; 3488 ThreadCreationAttributes attributes; 3489 status_t error = attributes.InitFromUserAttributes(userAttributes, 3490 nameBuffer); 3491 if (error != B_OK) 3492 return error; 3493 3494 // create the thread 3495 thread_id threadID = thread_create_thread(attributes, false); 3496 3497 if (threadID >= 0) 3498 user_debug_thread_created(threadID); 3499 3500 return threadID; 3501 } 3502 3503 3504 status_t 3505 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags, 3506 bigtime_t* userRemainingTime) 3507 { 3508 // We need to store more syscall restart parameters than usual and need a 3509 // somewhat different handling. Hence we can't use 3510 // syscall_restart_handle_timeout_pre() but do the job ourselves. 3511 struct restart_parameters { 3512 bigtime_t timeout; 3513 clockid_t timebase; 3514 uint32 flags; 3515 }; 3516 3517 Thread* thread = thread_get_current_thread(); 3518 3519 if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) { 3520 // The syscall was restarted. Fetch the parameters from the stored 3521 // restart parameters. 3522 restart_parameters* restartParameters 3523 = (restart_parameters*)thread->syscall_restart.parameters; 3524 timeout = restartParameters->timeout; 3525 timebase = restartParameters->timebase; 3526 flags = restartParameters->flags; 3527 } else { 3528 // convert relative timeouts to absolute ones 3529 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 3530 // not restarted yet and the flags indicate a relative timeout 3531 3532 // Make sure we use the system time base, so real-time clock changes 3533 // won't affect our wait. 3534 flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE; 3535 if (timebase == CLOCK_REALTIME) 3536 timebase = CLOCK_MONOTONIC; 3537 3538 // get the current time and make the timeout absolute 3539 bigtime_t now; 3540 status_t error = user_timer_get_clock(timebase, now); 3541 if (error != B_OK) 3542 return error; 3543 3544 timeout += now; 3545 3546 // deal with overflow 3547 if (timeout < 0) 3548 timeout = B_INFINITE_TIMEOUT; 3549 3550 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT; 3551 } else 3552 flags |= B_ABSOLUTE_TIMEOUT; 3553 } 3554 3555 // snooze 3556 bigtime_t remainingTime; 3557 status_t error = common_snooze_etc(timeout, timebase, 3558 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, 3559 userRemainingTime != NULL ? &remainingTime : NULL); 3560 3561 // If interrupted, copy the remaining time back to userland and prepare the 3562 // syscall restart. 3563 if (error == B_INTERRUPTED) { 3564 if (userRemainingTime != NULL 3565 && (!IS_USER_ADDRESS(userRemainingTime) 3566 || user_memcpy(userRemainingTime, &remainingTime, 3567 sizeof(remainingTime)) != B_OK)) { 3568 return B_BAD_ADDRESS; 3569 } 3570 3571 // store the normalized values in the restart parameters 3572 restart_parameters* restartParameters 3573 = (restart_parameters*)thread->syscall_restart.parameters; 3574 restartParameters->timeout = timeout; 3575 restartParameters->timebase = timebase; 3576 restartParameters->flags = flags; 3577 3578 // restart the syscall, if possible 3579 atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL); 3580 } 3581 3582 return error; 3583 } 3584 3585 3586 void 3587 _user_thread_yield(void) 3588 { 3589 thread_yield(); 3590 } 3591 3592 3593 status_t 3594 _user_get_thread_info(thread_id id, thread_info *userInfo) 3595 { 3596 thread_info info; 3597 status_t status; 3598 3599 if (!IS_USER_ADDRESS(userInfo)) 3600 return B_BAD_ADDRESS; 3601 3602 status = _get_thread_info(id, &info, sizeof(thread_info)); 3603 3604 if (status >= B_OK 3605 && user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3606 return B_BAD_ADDRESS; 3607 3608 return status; 3609 } 3610 3611 3612 status_t 3613 _user_get_next_thread_info(team_id team, int32 *userCookie, 3614 thread_info *userInfo) 3615 { 3616 status_t status; 3617 thread_info info; 3618 int32 cookie; 3619 3620 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 3621 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 3622 return B_BAD_ADDRESS; 3623 3624 status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info)); 3625 if (status < B_OK) 3626 return status; 3627 3628 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 3629 || user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3630 return B_BAD_ADDRESS; 3631 3632 return status; 3633 } 3634 3635 3636 thread_id 3637 _user_find_thread(const char *userName) 3638 { 3639 char name[B_OS_NAME_LENGTH]; 3640 3641 if (userName == NULL) 3642 return find_thread(NULL); 3643 3644 if (!IS_USER_ADDRESS(userName) 3645 || user_strlcpy(name, userName, sizeof(name)) < B_OK) 3646 return B_BAD_ADDRESS; 3647 3648 return find_thread(name); 3649 } 3650 3651 3652 status_t 3653 _user_wait_for_thread(thread_id id, status_t *userReturnCode) 3654 { 3655 status_t returnCode; 3656 status_t status; 3657 3658 if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode)) 3659 return B_BAD_ADDRESS; 3660 3661 status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode); 3662 3663 if (status == B_OK && userReturnCode != NULL 3664 && user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) { 3665 return B_BAD_ADDRESS; 3666 } 3667 3668 return syscall_restart_handle_post(status); 3669 } 3670 3671 3672 bool 3673 _user_has_data(thread_id thread) 3674 { 3675 return thread_has_data(thread, false); 3676 } 3677 3678 3679 status_t 3680 _user_send_data(thread_id thread, int32 code, const void *buffer, 3681 size_t bufferSize) 3682 { 3683 if (buffer != NULL && !IS_USER_ADDRESS(buffer)) 3684 return B_BAD_ADDRESS; 3685 3686 return send_data_etc(thread, code, buffer, bufferSize, 3687 B_KILL_CAN_INTERRUPT); 3688 // supports userland buffers 3689 } 3690 3691 3692 status_t 3693 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize) 3694 { 3695 thread_id sender; 3696 status_t code; 3697 3698 if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL) 3699 || (!IS_USER_ADDRESS(buffer) && buffer != NULL)) { 3700 return B_BAD_ADDRESS; 3701 } 3702 3703 code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT); 3704 // supports userland buffers 3705 3706 if (_userSender != NULL) 3707 if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK) 3708 return B_BAD_ADDRESS; 3709 3710 return code; 3711 } 3712 3713 3714 status_t 3715 _user_block_thread(uint32 flags, bigtime_t timeout) 3716 { 3717 syscall_restart_handle_timeout_pre(flags, timeout); 3718 flags |= B_CAN_INTERRUPT; 3719 3720 Thread* thread = thread_get_current_thread(); 3721 ThreadLocker threadLocker(thread); 3722 3723 // check, if already done 3724 set_ac(); 3725 if (thread->user_thread->wait_status <= 0) { 3726 status_t status = thread->user_thread->wait_status; 3727 clear_ac(); 3728 return status; 3729 } 3730 clear_ac(); 3731 3732 // nope, so wait 3733 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_USER, NULL); 3734 3735 threadLocker.Unlock(); 3736 3737 status_t status = thread_block_with_timeout(flags, timeout); 3738 3739 threadLocker.Lock(); 3740 3741 // Interruptions or timeouts can race with other threads unblocking us. 3742 // Favor a wake-up by another thread, i.e. if someone changed the wait 3743 // status, use that. 3744 set_ac(); 3745 status_t oldStatus = thread->user_thread->wait_status; 3746 if (oldStatus > 0) { 3747 thread->user_thread->wait_status = status; 3748 clear_ac(); 3749 } else { 3750 clear_ac(); 3751 status = oldStatus; 3752 } 3753 3754 threadLocker.Unlock(); 3755 3756 return syscall_restart_handle_timeout_post(status, timeout); 3757 } 3758 3759 3760 status_t 3761 _user_unblock_thread(thread_id threadID, status_t status) 3762 { 3763 status_t error = user_unblock_thread(threadID, status); 3764 3765 if (error == B_OK) 3766 scheduler_reschedule_if_necessary(); 3767 3768 return error; 3769 } 3770 3771 3772 status_t 3773 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status) 3774 { 3775 enum { 3776 MAX_USER_THREADS_TO_UNBLOCK = 128 3777 }; 3778 3779 if (userThreads == NULL || !IS_USER_ADDRESS(userThreads)) 3780 return B_BAD_ADDRESS; 3781 if (count > MAX_USER_THREADS_TO_UNBLOCK) 3782 return B_BAD_VALUE; 3783 3784 thread_id threads[MAX_USER_THREADS_TO_UNBLOCK]; 3785 if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK) 3786 return B_BAD_ADDRESS; 3787 3788 for (uint32 i = 0; i < count; i++) 3789 user_unblock_thread(threads[i], status); 3790 3791 scheduler_reschedule_if_necessary(); 3792 3793 return B_OK; 3794 } 3795 3796 3797 // TODO: the following two functions don't belong here 3798 3799 3800 int 3801 _user_getrlimit(int resource, struct rlimit *urlp) 3802 { 3803 struct rlimit rl; 3804 int ret; 3805 3806 if (urlp == NULL) 3807 return EINVAL; 3808 3809 if (!IS_USER_ADDRESS(urlp)) 3810 return B_BAD_ADDRESS; 3811 3812 ret = common_getrlimit(resource, &rl); 3813 3814 if (ret == 0) { 3815 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit)); 3816 if (ret < 0) 3817 return ret; 3818 3819 return 0; 3820 } 3821 3822 return ret; 3823 } 3824 3825 3826 int 3827 _user_setrlimit(int resource, const struct rlimit *userResourceLimit) 3828 { 3829 struct rlimit resourceLimit; 3830 3831 if (userResourceLimit == NULL) 3832 return EINVAL; 3833 3834 if (!IS_USER_ADDRESS(userResourceLimit) 3835 || user_memcpy(&resourceLimit, userResourceLimit, 3836 sizeof(struct rlimit)) < B_OK) 3837 return B_BAD_ADDRESS; 3838 3839 return common_setrlimit(resource, &resourceLimit); 3840 } 3841