1 /* 2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com. 3 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 4 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 5 * Distributed under the terms of the MIT License. 6 * 7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 8 * Distributed under the terms of the NewOS License. 9 */ 10 11 12 /*! Threading routines */ 13 14 15 #include <thread.h> 16 17 #include <errno.h> 18 #include <malloc.h> 19 #include <stdio.h> 20 #include <stdlib.h> 21 #include <string.h> 22 #include <sys/resource.h> 23 24 #include <algorithm> 25 26 #include <OS.h> 27 28 #include <util/AutoLock.h> 29 30 #include <arch/debug.h> 31 #include <boot/kernel_args.h> 32 #include <condition_variable.h> 33 #include <cpu.h> 34 #include <int.h> 35 #include <kimage.h> 36 #include <kscheduler.h> 37 #include <ksignal.h> 38 #include <Notifications.h> 39 #include <real_time_clock.h> 40 #include <slab/Slab.h> 41 #include <smp.h> 42 #include <syscalls.h> 43 #include <syscall_restart.h> 44 #include <team.h> 45 #include <tls.h> 46 #include <user_runtime.h> 47 #include <user_thread.h> 48 #include <vfs.h> 49 #include <vm/vm.h> 50 #include <vm/VMAddressSpace.h> 51 #include <wait_for_objects.h> 52 53 #include "TeamThreadTables.h" 54 55 56 //#define TRACE_THREAD 57 #ifdef TRACE_THREAD 58 # define TRACE(x) dprintf x 59 #else 60 # define TRACE(x) ; 61 #endif 62 63 64 #define THREAD_MAX_MESSAGE_SIZE 65536 65 66 67 // #pragma mark - ThreadHashTable 68 69 70 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable; 71 72 73 // thread list 74 static Thread sIdleThreads[SMP_MAX_CPUS]; 75 static ThreadHashTable sThreadHash; 76 static rw_spinlock sThreadHashLock = B_RW_SPINLOCK_INITIALIZER; 77 static thread_id sNextThreadID = 2; 78 // ID 1 is allocated for the kernel by Team::Team() behind our back 79 80 // some arbitrarily chosen limits -- should probably depend on the available 81 // memory 82 static int32 sMaxThreads = 4096; 83 static int32 sUsedThreads = 0; 84 85 spinlock gThreadCreationLock = B_SPINLOCK_INITIALIZER; 86 87 88 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> { 89 Thread* thread; 90 team_id teamID; 91 92 UndertakerEntry(Thread* thread, team_id teamID) 93 : 94 thread(thread), 95 teamID(teamID) 96 { 97 } 98 }; 99 100 101 struct ThreadEntryArguments { 102 status_t (*kernelFunction)(void* argument); 103 void* argument; 104 bool enterUserland; 105 }; 106 107 struct UserThreadEntryArguments : ThreadEntryArguments { 108 addr_t userlandEntry; 109 void* userlandArgument1; 110 void* userlandArgument2; 111 pthread_t pthread; 112 arch_fork_arg* forkArgs; 113 uint32 flags; 114 }; 115 116 117 class ThreadNotificationService : public DefaultNotificationService { 118 public: 119 ThreadNotificationService() 120 : DefaultNotificationService("threads") 121 { 122 } 123 124 void Notify(uint32 eventCode, team_id teamID, thread_id threadID, 125 Thread* thread = NULL) 126 { 127 char eventBuffer[180]; 128 KMessage event; 129 event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR); 130 event.AddInt32("event", eventCode); 131 event.AddInt32("team", teamID); 132 event.AddInt32("thread", threadID); 133 if (thread != NULL) 134 event.AddPointer("threadStruct", thread); 135 136 DefaultNotificationService::Notify(event, eventCode); 137 } 138 139 void Notify(uint32 eventCode, Thread* thread) 140 { 141 return Notify(eventCode, thread->id, thread->team->id, thread); 142 } 143 }; 144 145 146 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries; 147 static spinlock sUndertakerLock = B_SPINLOCK_INITIALIZER; 148 static ConditionVariable sUndertakerCondition; 149 static ThreadNotificationService sNotificationService; 150 151 152 // object cache to allocate thread structures from 153 static object_cache* sThreadCache; 154 155 156 // #pragma mark - Thread 157 158 159 /*! Constructs a thread. 160 161 \param name The thread's name. 162 \param threadID The ID to be assigned to the new thread. If 163 \code < 0 \endcode a fresh one is allocated. 164 \param cpu The CPU the thread shall be assigned. 165 */ 166 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 167 : 168 flags(0), 169 serial_number(-1), 170 hash_next(NULL), 171 team_next(NULL), 172 priority(-1), 173 io_priority(-1), 174 cpu(cpu), 175 previous_cpu(NULL), 176 pinned_to_cpu(0), 177 sig_block_mask(0), 178 sigsuspend_original_unblocked_mask(0), 179 user_signal_context(NULL), 180 signal_stack_base(0), 181 signal_stack_size(0), 182 signal_stack_enabled(false), 183 in_kernel(true), 184 has_yielded(false), 185 user_thread(NULL), 186 fault_handler(0), 187 page_faults_allowed(1), 188 team(NULL), 189 select_infos(NULL), 190 kernel_stack_area(-1), 191 kernel_stack_base(0), 192 user_stack_area(-1), 193 user_stack_base(0), 194 user_local_storage(0), 195 kernel_errno(0), 196 user_time(0), 197 kernel_time(0), 198 last_time(0), 199 cpu_clock_offset(0), 200 post_interrupt_callback(NULL), 201 post_interrupt_data(NULL) 202 { 203 id = threadID >= 0 ? threadID : allocate_thread_id(); 204 visible = false; 205 206 // init locks 207 char lockName[32]; 208 snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id); 209 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 210 211 B_INITIALIZE_SPINLOCK(&time_lock); 212 B_INITIALIZE_SPINLOCK(&scheduler_lock); 213 B_INITIALIZE_RW_SPINLOCK(&team_lock); 214 215 // init name 216 if (name != NULL) 217 strlcpy(this->name, name, B_OS_NAME_LENGTH); 218 else 219 strcpy(this->name, "unnamed thread"); 220 221 exit.status = 0; 222 223 list_init(&exit.waiters); 224 225 exit.sem = -1; 226 msg.write_sem = -1; 227 msg.read_sem = -1; 228 229 // add to thread table -- yet invisible 230 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 231 sThreadHash.Insert(this); 232 } 233 234 235 Thread::~Thread() 236 { 237 // Delete resources that should actually be deleted by the thread itself, 238 // when it exited, but that might still exist, if the thread was never run. 239 240 if (user_stack_area >= 0) 241 delete_area(user_stack_area); 242 243 DeleteUserTimers(false); 244 245 // delete the resources, that may remain in either case 246 247 if (kernel_stack_area >= 0) 248 delete_area(kernel_stack_area); 249 250 fPendingSignals.Clear(); 251 252 if (exit.sem >= 0) 253 delete_sem(exit.sem); 254 if (msg.write_sem >= 0) 255 delete_sem(msg.write_sem); 256 if (msg.read_sem >= 0) 257 delete_sem(msg.read_sem); 258 259 scheduler_on_thread_destroy(this); 260 261 mutex_destroy(&fLock); 262 263 // remove from thread table 264 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 265 sThreadHash.Remove(this); 266 } 267 268 269 /*static*/ status_t 270 Thread::Create(const char* name, Thread*& _thread) 271 { 272 Thread* thread = new Thread(name, -1, NULL); 273 if (thread == NULL) 274 return B_NO_MEMORY; 275 276 status_t error = thread->Init(false); 277 if (error != B_OK) { 278 delete thread; 279 return error; 280 } 281 282 _thread = thread; 283 return B_OK; 284 } 285 286 287 /*static*/ Thread* 288 Thread::Get(thread_id id) 289 { 290 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 291 Thread* thread = sThreadHash.Lookup(id); 292 if (thread != NULL) 293 thread->AcquireReference(); 294 return thread; 295 } 296 297 298 /*static*/ Thread* 299 Thread::GetAndLock(thread_id id) 300 { 301 // look it up and acquire a reference 302 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 303 Thread* thread = sThreadHash.Lookup(id); 304 if (thread == NULL) 305 return NULL; 306 307 thread->AcquireReference(); 308 threadHashLocker.Unlock(); 309 310 // lock and check, if it is still in the hash table 311 thread->Lock(); 312 threadHashLocker.Lock(); 313 314 if (sThreadHash.Lookup(id) == thread) 315 return thread; 316 317 threadHashLocker.Unlock(); 318 319 // nope, the thread is no longer in the hash table 320 thread->UnlockAndReleaseReference(); 321 322 return NULL; 323 } 324 325 326 /*static*/ Thread* 327 Thread::GetDebug(thread_id id) 328 { 329 return sThreadHash.Lookup(id, false); 330 } 331 332 333 /*static*/ bool 334 Thread::IsAlive(thread_id id) 335 { 336 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 337 return sThreadHash.Lookup(id) != NULL; 338 } 339 340 341 void* 342 Thread::operator new(size_t size) 343 { 344 return object_cache_alloc(sThreadCache, 0); 345 } 346 347 348 void* 349 Thread::operator new(size_t, void* pointer) 350 { 351 return pointer; 352 } 353 354 355 void 356 Thread::operator delete(void* pointer, size_t size) 357 { 358 object_cache_free(sThreadCache, pointer, 0); 359 } 360 361 362 status_t 363 Thread::Init(bool idleThread) 364 { 365 status_t error = scheduler_on_thread_create(this, idleThread); 366 if (error != B_OK) 367 return error; 368 369 char temp[64]; 370 snprintf(temp, sizeof(temp), "thread_%" B_PRId32 "_retcode_sem", id); 371 exit.sem = create_sem(0, temp); 372 if (exit.sem < 0) 373 return exit.sem; 374 375 snprintf(temp, sizeof(temp), "%s send", name); 376 msg.write_sem = create_sem(1, temp); 377 if (msg.write_sem < 0) 378 return msg.write_sem; 379 380 snprintf(temp, sizeof(temp), "%s receive", name); 381 msg.read_sem = create_sem(0, temp); 382 if (msg.read_sem < 0) 383 return msg.read_sem; 384 385 error = arch_thread_init_thread_struct(this); 386 if (error != B_OK) 387 return error; 388 389 return B_OK; 390 } 391 392 393 /*! Checks whether the thread is still in the thread hash table. 394 */ 395 bool 396 Thread::IsAlive() const 397 { 398 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 399 400 return sThreadHash.Lookup(id) != NULL; 401 } 402 403 404 void 405 Thread::ResetSignalsOnExec() 406 { 407 // We are supposed keep the pending signals and the signal mask. Only the 408 // signal stack, if set, shall be unset. 409 410 sigsuspend_original_unblocked_mask = 0; 411 user_signal_context = NULL; 412 signal_stack_base = 0; 413 signal_stack_size = 0; 414 signal_stack_enabled = false; 415 } 416 417 418 /*! Adds the given user timer to the thread and, if user-defined, assigns it an 419 ID. 420 421 The caller must hold the thread's lock. 422 423 \param timer The timer to be added. If it doesn't have an ID yet, it is 424 considered user-defined and will be assigned an ID. 425 \return \c B_OK, if the timer was added successfully, another error code 426 otherwise. 427 */ 428 status_t 429 Thread::AddUserTimer(UserTimer* timer) 430 { 431 // If the timer is user-defined, check timer limit and increment 432 // user-defined count. 433 if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer()) 434 return EAGAIN; 435 436 fUserTimers.AddTimer(timer); 437 438 return B_OK; 439 } 440 441 442 /*! Removes the given user timer from the thread. 443 444 The caller must hold the thread's lock. 445 446 \param timer The timer to be removed. 447 448 */ 449 void 450 Thread::RemoveUserTimer(UserTimer* timer) 451 { 452 fUserTimers.RemoveTimer(timer); 453 454 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID) 455 team->UserDefinedTimersRemoved(1); 456 } 457 458 459 /*! Deletes all (or all user-defined) user timers of the thread. 460 461 The caller must hold the thread's lock. 462 463 \param userDefinedOnly If \c true, only the user-defined timers are deleted, 464 otherwise all timers are deleted. 465 */ 466 void 467 Thread::DeleteUserTimers(bool userDefinedOnly) 468 { 469 int32 count = fUserTimers.DeleteTimers(userDefinedOnly); 470 if (count > 0) 471 team->UserDefinedTimersRemoved(count); 472 } 473 474 475 void 476 Thread::DeactivateCPUTimeUserTimers() 477 { 478 while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head()) 479 timer->Deactivate(); 480 } 481 482 483 // #pragma mark - ThreadListIterator 484 485 486 ThreadListIterator::ThreadListIterator() 487 { 488 // queue the entry 489 InterruptsWriteSpinLocker locker(sThreadHashLock); 490 sThreadHash.InsertIteratorEntry(&fEntry); 491 } 492 493 494 ThreadListIterator::~ThreadListIterator() 495 { 496 // remove the entry 497 InterruptsWriteSpinLocker locker(sThreadHashLock); 498 sThreadHash.RemoveIteratorEntry(&fEntry); 499 } 500 501 502 Thread* 503 ThreadListIterator::Next() 504 { 505 // get the next team -- if there is one, get reference for it 506 InterruptsWriteSpinLocker locker(sThreadHashLock); 507 Thread* thread = sThreadHash.NextElement(&fEntry); 508 if (thread != NULL) 509 thread->AcquireReference(); 510 511 return thread; 512 } 513 514 515 // #pragma mark - ThreadCreationAttributes 516 517 518 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function, 519 const char* name, int32 priority, void* arg, team_id team, 520 Thread* thread) 521 { 522 this->entry = NULL; 523 this->name = name; 524 this->priority = priority; 525 this->args1 = NULL; 526 this->args2 = NULL; 527 this->stack_address = NULL; 528 this->stack_size = 0; 529 this->guard_size = 0; 530 this->pthread = NULL; 531 this->flags = 0; 532 this->team = team >= 0 ? team : team_get_kernel_team()->id; 533 this->thread = thread; 534 this->signal_mask = 0; 535 this->additional_stack_size = 0; 536 this->kernelEntry = function; 537 this->kernelArgument = arg; 538 this->forkArgs = NULL; 539 } 540 541 542 /*! Initializes the structure from a userland structure. 543 \param userAttributes The userland structure (must be a userland address). 544 \param nameBuffer A character array of at least size B_OS_NAME_LENGTH, 545 which will be used for the \c name field, if the userland structure has 546 a name. The buffer must remain valid as long as this structure is in 547 use afterwards (or until it is reinitialized). 548 \return \c B_OK, if the initialization went fine, another error code 549 otherwise. 550 */ 551 status_t 552 ThreadCreationAttributes::InitFromUserAttributes( 553 const thread_creation_attributes* userAttributes, char* nameBuffer) 554 { 555 if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes) 556 || user_memcpy((thread_creation_attributes*)this, userAttributes, 557 sizeof(thread_creation_attributes)) != B_OK) { 558 return B_BAD_ADDRESS; 559 } 560 561 if (stack_size != 0 562 && (stack_size < MIN_USER_STACK_SIZE 563 || stack_size > MAX_USER_STACK_SIZE)) { 564 return B_BAD_VALUE; 565 } 566 567 if (entry == NULL || !IS_USER_ADDRESS(entry) 568 || (stack_address != NULL && !IS_USER_ADDRESS(stack_address)) 569 || (name != NULL && (!IS_USER_ADDRESS(name) 570 || user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) { 571 return B_BAD_ADDRESS; 572 } 573 574 name = name != NULL ? nameBuffer : "user thread"; 575 576 // kernel only attributes (not in thread_creation_attributes): 577 Thread* currentThread = thread_get_current_thread(); 578 team = currentThread->team->id; 579 thread = NULL; 580 signal_mask = currentThread->sig_block_mask; 581 // inherit the current thread's signal mask 582 additional_stack_size = 0; 583 kernelEntry = NULL; 584 kernelArgument = NULL; 585 forkArgs = NULL; 586 587 return B_OK; 588 } 589 590 591 // #pragma mark - private functions 592 593 594 /*! Inserts a thread into a team. 595 The caller must hold the team's lock, the thread's lock, and the scheduler 596 lock. 597 */ 598 static void 599 insert_thread_into_team(Team *team, Thread *thread) 600 { 601 thread->team_next = team->thread_list; 602 team->thread_list = thread; 603 team->num_threads++; 604 605 if (team->num_threads == 1) { 606 // this was the first thread 607 team->main_thread = thread; 608 } 609 thread->team = team; 610 } 611 612 613 /*! Removes a thread from a team. 614 The caller must hold the team's lock, the thread's lock, and the scheduler 615 lock. 616 */ 617 static void 618 remove_thread_from_team(Team *team, Thread *thread) 619 { 620 Thread *temp, *last = NULL; 621 622 for (temp = team->thread_list; temp != NULL; temp = temp->team_next) { 623 if (temp == thread) { 624 if (last == NULL) 625 team->thread_list = temp->team_next; 626 else 627 last->team_next = temp->team_next; 628 629 team->num_threads--; 630 break; 631 } 632 last = temp; 633 } 634 } 635 636 637 static status_t 638 enter_userspace(Thread* thread, UserThreadEntryArguments* args) 639 { 640 status_t error = arch_thread_init_tls(thread); 641 if (error != B_OK) { 642 dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32 643 ")\n", thread->name, thread->id); 644 free(args->forkArgs); 645 return error; 646 } 647 648 user_debug_update_new_thread_flags(thread); 649 650 // init the thread's user_thread 651 user_thread* userThread = thread->user_thread; 652 set_ac(); 653 userThread->pthread = args->pthread; 654 userThread->flags = 0; 655 userThread->wait_status = B_OK; 656 userThread->defer_signals 657 = (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0; 658 userThread->pending_signals = 0; 659 clear_ac(); 660 661 if (args->forkArgs != NULL) { 662 // This is a fork()ed thread. Copy the fork args onto the stack and 663 // free them. 664 arch_fork_arg archArgs = *args->forkArgs; 665 free(args->forkArgs); 666 667 arch_restore_fork_frame(&archArgs); 668 // this one won't return here 669 return B_ERROR; 670 } 671 672 // Jump to the entry point in user space. Only returns, if something fails. 673 return arch_thread_enter_userspace(thread, args->userlandEntry, 674 args->userlandArgument1, args->userlandArgument2); 675 } 676 677 678 status_t 679 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction, 680 void* argument1, void* argument2) 681 { 682 UserThreadEntryArguments entryArgs; 683 entryArgs.kernelFunction = NULL; 684 entryArgs.argument = NULL; 685 entryArgs.enterUserland = true; 686 entryArgs.userlandEntry = (addr_t)entryFunction; 687 entryArgs.userlandArgument1 = argument1; 688 entryArgs.userlandArgument2 = argument2; 689 entryArgs.pthread = NULL; 690 entryArgs.forkArgs = NULL; 691 entryArgs.flags = 0; 692 693 return enter_userspace(thread, &entryArgs); 694 } 695 696 697 static void 698 common_thread_entry(void* _args) 699 { 700 Thread* thread = thread_get_current_thread(); 701 702 // The thread is new and has been scheduled the first time. 703 704 scheduler_new_thread_entry(thread); 705 706 // unlock the scheduler lock and enable interrupts 707 release_spinlock(&thread->scheduler_lock); 708 enable_interrupts(); 709 710 // call the kernel function, if any 711 ThreadEntryArguments* args = (ThreadEntryArguments*)_args; 712 if (args->kernelFunction != NULL) 713 args->kernelFunction(args->argument); 714 715 // If requested, enter userland, now. 716 if (args->enterUserland) { 717 enter_userspace(thread, (UserThreadEntryArguments*)args); 718 // only returns or error 719 720 // If that's the team's main thread, init the team exit info. 721 if (thread == thread->team->main_thread) 722 team_init_exit_info_on_error(thread->team); 723 } 724 725 // we're done 726 thread_exit(); 727 } 728 729 730 /*! Prepares the given thread's kernel stack for executing its entry function. 731 732 The data pointed to by \a data of size \a dataSize are copied to the 733 thread's kernel stack. A pointer to the copy's data is passed to the entry 734 function. The entry function is common_thread_entry(). 735 736 \param thread The thread. 737 \param data Pointer to data to be copied to the thread's stack and passed 738 to the entry function. 739 \param dataSize The size of \a data. 740 */ 741 static void 742 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize) 743 { 744 uint8* stack = (uint8*)thread->kernel_stack_base; 745 uint8* stackTop = (uint8*)thread->kernel_stack_top; 746 747 // clear (or rather invalidate) the kernel stack contents, if compiled with 748 // debugging 749 #if KDEBUG > 0 750 # if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS) 751 memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc, 752 KERNEL_STACK_SIZE); 753 # else 754 memset(stack, 0xcc, KERNEL_STACK_SIZE); 755 # endif 756 #endif 757 758 // copy the data onto the stack, with 16-byte alignment to be on the safe 759 // side 760 void* clonedData; 761 #ifdef STACK_GROWS_DOWNWARDS 762 clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16); 763 stackTop = (uint8*)clonedData; 764 #else 765 clonedData = (void*)ROUNDUP((addr_t)stack, 16); 766 stack = (uint8*)clonedData + ROUNDUP(dataSize, 16); 767 #endif 768 769 memcpy(clonedData, data, dataSize); 770 771 arch_thread_init_kthread_stack(thread, stack, stackTop, 772 &common_thread_entry, clonedData); 773 } 774 775 776 static status_t 777 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase, 778 size_t stackSize, size_t additionalSize, size_t guardSize, 779 char* nameBuffer) 780 { 781 area_id stackArea = -1; 782 uint8* stackBase = (uint8*)_stackBase; 783 784 if (stackBase != NULL) { 785 // A stack has been specified. It must be large enough to hold the 786 // TLS space at least. Guard pages are ignored for existing stacks. 787 STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE); 788 if (stackSize < MIN_USER_STACK_SIZE) 789 return B_BAD_VALUE; 790 791 stackSize -= TLS_SIZE; 792 } else { 793 // No user-defined stack -- allocate one. For non-main threads the stack 794 // will be between USER_STACK_REGION and the main thread stack area. For 795 // a main thread the position is fixed. 796 797 guardSize = PAGE_ALIGN(guardSize); 798 799 if (stackSize == 0) { 800 // Use the default size (a different one for a main thread). 801 stackSize = thread->id == team->id 802 ? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE; 803 } else { 804 // Verify that the given stack size is large enough. 805 if (stackSize < MIN_USER_STACK_SIZE) 806 return B_BAD_VALUE; 807 808 stackSize = PAGE_ALIGN(stackSize); 809 } 810 811 size_t areaSize = PAGE_ALIGN(guardSize + stackSize + TLS_SIZE 812 + additionalSize); 813 814 snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack", 815 thread->name, thread->id); 816 817 stackBase = (uint8*)USER_STACK_REGION; 818 819 virtual_address_restrictions virtualRestrictions = {}; 820 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; 821 virtualRestrictions.address = (void*)stackBase; 822 823 physical_address_restrictions physicalRestrictions = {}; 824 825 stackArea = create_area_etc(team->id, nameBuffer, 826 areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 827 0, guardSize, &virtualRestrictions, &physicalRestrictions, 828 (void**)&stackBase); 829 if (stackArea < 0) 830 return stackArea; 831 } 832 833 // set the stack 834 ThreadLocker threadLocker(thread); 835 #ifdef STACK_GROWS_DOWNWARDS 836 thread->user_stack_base = (addr_t)stackBase + guardSize; 837 #else 838 thread->user_stack_base = (addr_t)stackBase; 839 #endif 840 thread->user_stack_size = stackSize; 841 thread->user_stack_area = stackArea; 842 843 return B_OK; 844 } 845 846 847 status_t 848 thread_create_user_stack(Team* team, Thread* thread, void* stackBase, 849 size_t stackSize, size_t additionalSize) 850 { 851 char nameBuffer[B_OS_NAME_LENGTH]; 852 return create_thread_user_stack(team, thread, stackBase, stackSize, 853 additionalSize, USER_STACK_GUARD_SIZE, nameBuffer); 854 } 855 856 857 /*! Creates a new thread. 858 859 \param attributes The thread creation attributes, specifying the team in 860 which to create the thread, as well as a whole bunch of other arguments. 861 \param kernel \c true, if a kernel-only thread shall be created, \c false, 862 if the thread shall also be able to run in userland. 863 \return The ID of the newly created thread (>= 0) or an error code on 864 failure. 865 */ 866 thread_id 867 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel) 868 { 869 status_t status = B_OK; 870 871 TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name, 872 attributes.thread, kernel ? "kernel" : "user")); 873 874 // get the team 875 Team* team = Team::Get(attributes.team); 876 if (team == NULL) 877 return B_BAD_TEAM_ID; 878 BReference<Team> teamReference(team, true); 879 880 // If a thread object is given, acquire a reference to it, otherwise create 881 // a new thread object with the given attributes. 882 Thread* thread = attributes.thread; 883 if (thread != NULL) { 884 thread->AcquireReference(); 885 } else { 886 status = Thread::Create(attributes.name, thread); 887 if (status != B_OK) 888 return status; 889 } 890 BReference<Thread> threadReference(thread, true); 891 892 thread->team = team; 893 // set already, so, if something goes wrong, the team pointer is 894 // available for deinitialization 895 thread->priority = attributes.priority == -1 896 ? B_NORMAL_PRIORITY : attributes.priority; 897 thread->priority = std::max(thread->priority, 898 (int32)THREAD_MIN_SET_PRIORITY); 899 thread->priority = std::min(thread->priority, 900 (int32)THREAD_MAX_SET_PRIORITY); 901 thread->state = B_THREAD_SUSPENDED; 902 903 thread->sig_block_mask = attributes.signal_mask; 904 905 // init debug structure 906 init_thread_debug_info(&thread->debug_info); 907 908 // create the kernel stack 909 char stackName[B_OS_NAME_LENGTH]; 910 snprintf(stackName, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_kstack", 911 thread->name, thread->id); 912 virtual_address_restrictions virtualRestrictions = {}; 913 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 914 physical_address_restrictions physicalRestrictions = {}; 915 916 thread->kernel_stack_area = create_area_etc(B_SYSTEM_TEAM, stackName, 917 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 918 B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA 919 | B_KERNEL_STACK_AREA, 0, KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 920 &virtualRestrictions, &physicalRestrictions, 921 (void**)&thread->kernel_stack_base); 922 923 if (thread->kernel_stack_area < 0) { 924 // we're not yet part of a team, so we can just bail out 925 status = thread->kernel_stack_area; 926 927 dprintf("create_thread: error creating kernel stack: %s!\n", 928 strerror(status)); 929 930 return status; 931 } 932 933 thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE 934 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 935 936 if (kernel) { 937 // Init the thread's kernel stack. It will start executing 938 // common_thread_entry() with the arguments we prepare here. 939 ThreadEntryArguments entryArgs; 940 entryArgs.kernelFunction = attributes.kernelEntry; 941 entryArgs.argument = attributes.kernelArgument; 942 entryArgs.enterUserland = false; 943 944 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 945 } else { 946 // create the userland stack, if the thread doesn't have one yet 947 if (thread->user_stack_base == 0) { 948 status = create_thread_user_stack(team, thread, 949 attributes.stack_address, attributes.stack_size, 950 attributes.additional_stack_size, attributes.guard_size, 951 stackName); 952 if (status != B_OK) 953 return status; 954 } 955 956 // Init the thread's kernel stack. It will start executing 957 // common_thread_entry() with the arguments we prepare here. 958 UserThreadEntryArguments entryArgs; 959 entryArgs.kernelFunction = attributes.kernelEntry; 960 entryArgs.argument = attributes.kernelArgument; 961 entryArgs.enterUserland = true; 962 entryArgs.userlandEntry = (addr_t)attributes.entry; 963 entryArgs.userlandArgument1 = attributes.args1; 964 entryArgs.userlandArgument2 = attributes.args2; 965 entryArgs.pthread = attributes.pthread; 966 entryArgs.forkArgs = attributes.forkArgs; 967 entryArgs.flags = attributes.flags; 968 969 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 970 971 // create the pre-defined thread timers 972 status = user_timer_create_thread_timers(team, thread); 973 if (status != B_OK) 974 return status; 975 } 976 977 // lock the team and see, if it is still alive 978 TeamLocker teamLocker(team); 979 if (team->state >= TEAM_STATE_SHUTDOWN) 980 return B_BAD_TEAM_ID; 981 982 bool debugNewThread = false; 983 if (!kernel) { 984 // allocate the user_thread structure, if not already allocated 985 if (thread->user_thread == NULL) { 986 thread->user_thread = team_allocate_user_thread(team); 987 if (thread->user_thread == NULL) 988 return B_NO_MEMORY; 989 } 990 991 // If the new thread belongs to the same team as the current thread, it 992 // may inherit some of the thread debug flags. 993 Thread* currentThread = thread_get_current_thread(); 994 if (currentThread != NULL && currentThread->team == team) { 995 // inherit all user flags... 996 int32 debugFlags = atomic_get(¤tThread->debug_info.flags) 997 & B_THREAD_DEBUG_USER_FLAG_MASK; 998 999 // ... save the syscall tracing flags, unless explicitely specified 1000 if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) { 1001 debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL 1002 | B_THREAD_DEBUG_POST_SYSCALL); 1003 } 1004 1005 thread->debug_info.flags = debugFlags; 1006 1007 // stop the new thread, if desired 1008 debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS; 1009 } 1010 } 1011 1012 // We're going to make the thread live, now. The thread itself will take 1013 // over a reference to its Thread object. We'll acquire another reference 1014 // for our own use (and threadReference remains armed). 1015 1016 ThreadLocker threadLocker(thread); 1017 1018 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock); 1019 WriteSpinLocker threadHashLocker(sThreadHashLock); 1020 1021 // check the thread limit 1022 if (sUsedThreads >= sMaxThreads) { 1023 // Clean up the user_thread structure. It's a bit unfortunate that the 1024 // Thread destructor cannot do that, so we have to do that explicitly. 1025 threadHashLocker.Unlock(); 1026 threadCreationLocker.Unlock(); 1027 1028 user_thread* userThread = thread->user_thread; 1029 thread->user_thread = NULL; 1030 1031 threadLocker.Unlock(); 1032 teamLocker.Unlock(); 1033 1034 if (userThread != NULL) 1035 team_free_user_thread(team, userThread); 1036 1037 return B_NO_MORE_THREADS; 1038 } 1039 1040 // make thread visible in global hash/list 1041 thread->visible = true; 1042 sUsedThreads++; 1043 1044 scheduler_on_thread_init(thread); 1045 1046 thread->AcquireReference(); 1047 1048 // Debug the new thread, if the parent thread required that (see above), 1049 // or the respective global team debug flag is set. But only, if a 1050 // debugger is installed for the team. 1051 if (!kernel) { 1052 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1053 debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0; 1054 if (debugNewThread 1055 && (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) { 1056 thread->debug_info.flags |= B_THREAD_DEBUG_STOP; 1057 } 1058 } 1059 1060 { 1061 SpinLocker signalLocker(team->signal_lock); 1062 SpinLocker timeLocker(team->time_lock); 1063 1064 // insert thread into team 1065 insert_thread_into_team(team, thread); 1066 } 1067 1068 threadHashLocker.Unlock(); 1069 threadCreationLocker.Unlock(); 1070 threadLocker.Unlock(); 1071 teamLocker.Unlock(); 1072 1073 // notify listeners 1074 sNotificationService.Notify(THREAD_ADDED, thread); 1075 1076 return thread->id; 1077 } 1078 1079 1080 static status_t 1081 undertaker(void* /*args*/) 1082 { 1083 while (true) { 1084 // wait for a thread to bury 1085 InterruptsSpinLocker locker(sUndertakerLock); 1086 1087 while (sUndertakerEntries.IsEmpty()) { 1088 ConditionVariableEntry conditionEntry; 1089 sUndertakerCondition.Add(&conditionEntry); 1090 locker.Unlock(); 1091 1092 conditionEntry.Wait(); 1093 1094 locker.Lock(); 1095 } 1096 1097 UndertakerEntry* _entry = sUndertakerEntries.RemoveHead(); 1098 locker.Unlock(); 1099 1100 UndertakerEntry entry = *_entry; 1101 // we need a copy, since the original entry is on the thread's stack 1102 1103 // we've got an entry 1104 Thread* thread = entry.thread; 1105 1106 // make sure the thread isn't running anymore 1107 InterruptsSpinLocker schedulerLocker(thread->scheduler_lock); 1108 ASSERT(thread->state == THREAD_STATE_FREE_ON_RESCHED); 1109 schedulerLocker.Unlock(); 1110 1111 // remove this thread from from the kernel team -- this makes it 1112 // unaccessible 1113 Team* kernelTeam = team_get_kernel_team(); 1114 TeamLocker kernelTeamLocker(kernelTeam); 1115 thread->Lock(); 1116 1117 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock); 1118 SpinLocker signalLocker(kernelTeam->signal_lock); 1119 SpinLocker timeLocker(kernelTeam->time_lock); 1120 1121 remove_thread_from_team(kernelTeam, thread); 1122 1123 timeLocker.Unlock(); 1124 signalLocker.Unlock(); 1125 threadCreationLocker.Unlock(); 1126 1127 kernelTeamLocker.Unlock(); 1128 1129 // free the thread structure 1130 thread->UnlockAndReleaseReference(); 1131 } 1132 1133 // can never get here 1134 return B_OK; 1135 } 1136 1137 1138 /*! Returns the semaphore the thread is currently waiting on. 1139 1140 The return value is purely informative. 1141 The caller must hold the scheduler lock. 1142 1143 \param thread The thread. 1144 \return The ID of the semaphore the thread is currently waiting on or \c -1, 1145 if it isn't waiting on a semaphore. 1146 */ 1147 static sem_id 1148 get_thread_wait_sem(Thread* thread) 1149 { 1150 if (thread->state == B_THREAD_WAITING 1151 && thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) { 1152 return (sem_id)(addr_t)thread->wait.object; 1153 } 1154 return -1; 1155 } 1156 1157 1158 /*! Fills the thread_info structure with information from the specified thread. 1159 The caller must hold the thread's lock and the scheduler lock. 1160 */ 1161 static void 1162 fill_thread_info(Thread *thread, thread_info *info, size_t size) 1163 { 1164 info->thread = thread->id; 1165 info->team = thread->team->id; 1166 1167 strlcpy(info->name, thread->name, B_OS_NAME_LENGTH); 1168 1169 info->sem = -1; 1170 1171 if (thread->state == B_THREAD_WAITING) { 1172 info->state = B_THREAD_WAITING; 1173 1174 switch (thread->wait.type) { 1175 case THREAD_BLOCK_TYPE_SNOOZE: 1176 info->state = B_THREAD_ASLEEP; 1177 break; 1178 1179 case THREAD_BLOCK_TYPE_SEMAPHORE: 1180 { 1181 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1182 if (sem == thread->msg.read_sem) 1183 info->state = B_THREAD_RECEIVING; 1184 else 1185 info->sem = sem; 1186 break; 1187 } 1188 1189 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1190 default: 1191 break; 1192 } 1193 } else 1194 info->state = (thread_state)thread->state; 1195 1196 info->priority = thread->priority; 1197 info->stack_base = (void *)thread->user_stack_base; 1198 info->stack_end = (void *)(thread->user_stack_base 1199 + thread->user_stack_size); 1200 1201 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 1202 info->user_time = thread->user_time; 1203 info->kernel_time = thread->kernel_time; 1204 } 1205 1206 1207 static status_t 1208 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize, 1209 int32 flags) 1210 { 1211 // get the thread 1212 Thread *target = Thread::Get(id); 1213 if (target == NULL) 1214 return B_BAD_THREAD_ID; 1215 BReference<Thread> targetReference(target, true); 1216 1217 // get the write semaphore 1218 ThreadLocker targetLocker(target); 1219 sem_id cachedSem = target->msg.write_sem; 1220 targetLocker.Unlock(); 1221 1222 if (bufferSize > THREAD_MAX_MESSAGE_SIZE) 1223 return B_NO_MEMORY; 1224 1225 status_t status = acquire_sem_etc(cachedSem, 1, flags, 0); 1226 if (status == B_INTERRUPTED) { 1227 // we got interrupted by a signal 1228 return status; 1229 } 1230 if (status != B_OK) { 1231 // Any other acquisition problems may be due to thread deletion 1232 return B_BAD_THREAD_ID; 1233 } 1234 1235 void* data; 1236 if (bufferSize > 0) { 1237 data = malloc(bufferSize); 1238 if (data == NULL) 1239 return B_NO_MEMORY; 1240 if (user_memcpy(data, buffer, bufferSize) != B_OK) { 1241 free(data); 1242 return B_BAD_DATA; 1243 } 1244 } else 1245 data = NULL; 1246 1247 targetLocker.Lock(); 1248 1249 // The target thread could have been deleted at this point. 1250 if (!target->IsAlive()) { 1251 targetLocker.Unlock(); 1252 free(data); 1253 return B_BAD_THREAD_ID; 1254 } 1255 1256 // Save message informations 1257 target->msg.sender = thread_get_current_thread()->id; 1258 target->msg.code = code; 1259 target->msg.size = bufferSize; 1260 target->msg.buffer = data; 1261 cachedSem = target->msg.read_sem; 1262 1263 targetLocker.Unlock(); 1264 1265 release_sem(cachedSem); 1266 return B_OK; 1267 } 1268 1269 1270 static int32 1271 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize, 1272 int32 flags) 1273 { 1274 Thread *thread = thread_get_current_thread(); 1275 size_t size; 1276 int32 code; 1277 1278 status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0); 1279 if (status != B_OK) { 1280 // Actually, we're not supposed to return error codes 1281 // but since the only reason this can fail is that we 1282 // were killed, it's probably okay to do so (but also 1283 // meaningless). 1284 return status; 1285 } 1286 1287 if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) { 1288 size = min_c(bufferSize, thread->msg.size); 1289 status = user_memcpy(buffer, thread->msg.buffer, size); 1290 if (status != B_OK) { 1291 free(thread->msg.buffer); 1292 release_sem(thread->msg.write_sem); 1293 return status; 1294 } 1295 } 1296 1297 *_sender = thread->msg.sender; 1298 code = thread->msg.code; 1299 1300 free(thread->msg.buffer); 1301 release_sem(thread->msg.write_sem); 1302 1303 return code; 1304 } 1305 1306 1307 static status_t 1308 common_getrlimit(int resource, struct rlimit * rlp) 1309 { 1310 if (!rlp) 1311 return B_BAD_ADDRESS; 1312 1313 switch (resource) { 1314 case RLIMIT_AS: 1315 rlp->rlim_cur = __HAIKU_ADDR_MAX; 1316 rlp->rlim_max = __HAIKU_ADDR_MAX; 1317 return B_OK; 1318 1319 case RLIMIT_CORE: 1320 rlp->rlim_cur = 0; 1321 rlp->rlim_max = 0; 1322 return B_OK; 1323 1324 case RLIMIT_DATA: 1325 rlp->rlim_cur = RLIM_INFINITY; 1326 rlp->rlim_max = RLIM_INFINITY; 1327 return B_OK; 1328 1329 case RLIMIT_NOFILE: 1330 case RLIMIT_NOVMON: 1331 return vfs_getrlimit(resource, rlp); 1332 1333 case RLIMIT_STACK: 1334 { 1335 rlp->rlim_cur = USER_MAIN_THREAD_STACK_SIZE; 1336 rlp->rlim_max = USER_MAIN_THREAD_STACK_SIZE; 1337 return B_OK; 1338 } 1339 1340 default: 1341 return EINVAL; 1342 } 1343 1344 return B_OK; 1345 } 1346 1347 1348 static status_t 1349 common_setrlimit(int resource, const struct rlimit * rlp) 1350 { 1351 if (!rlp) 1352 return B_BAD_ADDRESS; 1353 1354 switch (resource) { 1355 case RLIMIT_CORE: 1356 // We don't support core file, so allow settings to 0/0 only. 1357 if (rlp->rlim_cur != 0 || rlp->rlim_max != 0) 1358 return EINVAL; 1359 return B_OK; 1360 1361 case RLIMIT_NOFILE: 1362 case RLIMIT_NOVMON: 1363 return vfs_setrlimit(resource, rlp); 1364 1365 default: 1366 return EINVAL; 1367 } 1368 1369 return B_OK; 1370 } 1371 1372 1373 static status_t 1374 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags, 1375 bigtime_t* _remainingTime) 1376 { 1377 #if KDEBUG 1378 if (!are_interrupts_enabled()) { 1379 panic("common_snooze_etc(): called with interrupts disabled, timeout " 1380 "%" B_PRIdBIGTIME, timeout); 1381 } 1382 #endif 1383 1384 switch (clockID) { 1385 case CLOCK_REALTIME: 1386 // make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall 1387 // through 1388 flags |= B_TIMEOUT_REAL_TIME_BASE; 1389 case CLOCK_MONOTONIC: 1390 { 1391 // Store the start time, for the case that we get interrupted and 1392 // need to return the remaining time. For absolute timeouts we can 1393 // still get he time later, if needed. 1394 bigtime_t startTime 1395 = _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0 1396 ? system_time() : 0; 1397 1398 Thread* thread = thread_get_current_thread(); 1399 1400 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, 1401 NULL); 1402 status_t status = thread_block_with_timeout(flags, timeout); 1403 1404 if (status == B_TIMED_OUT || status == B_WOULD_BLOCK) 1405 return B_OK; 1406 1407 // If interrupted, compute the remaining time, if requested. 1408 if (status == B_INTERRUPTED && _remainingTime != NULL) { 1409 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 1410 *_remainingTime = std::max( 1411 startTime + timeout - system_time(), (bigtime_t)0); 1412 } else { 1413 bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0 1414 ? real_time_clock_usecs() : system_time(); 1415 *_remainingTime = std::max(timeout - now, (bigtime_t)0); 1416 } 1417 } 1418 1419 return status; 1420 } 1421 1422 case CLOCK_THREAD_CPUTIME_ID: 1423 // Waiting for ourselves to do something isn't particularly 1424 // productive. 1425 return B_BAD_VALUE; 1426 1427 case CLOCK_PROCESS_CPUTIME_ID: 1428 default: 1429 // We don't have to support those, but we are allowed to. Could be 1430 // done be creating a UserTimer on the fly with a custom UserEvent 1431 // that would just wake us up. 1432 return ENOTSUP; 1433 } 1434 } 1435 1436 1437 // #pragma mark - debugger calls 1438 1439 1440 static int 1441 make_thread_unreal(int argc, char **argv) 1442 { 1443 int32 id = -1; 1444 1445 if (argc > 2) { 1446 print_debugger_command_usage(argv[0]); 1447 return 0; 1448 } 1449 1450 if (argc > 1) 1451 id = strtoul(argv[1], NULL, 0); 1452 1453 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1454 Thread* thread = it.Next();) { 1455 if (id != -1 && thread->id != id) 1456 continue; 1457 1458 if (thread->priority > B_DISPLAY_PRIORITY) { 1459 scheduler_set_thread_priority(thread, B_NORMAL_PRIORITY); 1460 kprintf("thread %" B_PRId32 " made unreal\n", thread->id); 1461 } 1462 } 1463 1464 return 0; 1465 } 1466 1467 1468 static int 1469 set_thread_prio(int argc, char **argv) 1470 { 1471 int32 id; 1472 int32 prio; 1473 1474 if (argc > 3 || argc < 2) { 1475 print_debugger_command_usage(argv[0]); 1476 return 0; 1477 } 1478 1479 prio = strtoul(argv[1], NULL, 0); 1480 if (prio > THREAD_MAX_SET_PRIORITY) 1481 prio = THREAD_MAX_SET_PRIORITY; 1482 if (prio < THREAD_MIN_SET_PRIORITY) 1483 prio = THREAD_MIN_SET_PRIORITY; 1484 1485 if (argc > 2) 1486 id = strtoul(argv[2], NULL, 0); 1487 else 1488 id = thread_get_current_thread()->id; 1489 1490 bool found = false; 1491 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1492 Thread* thread = it.Next();) { 1493 if (thread->id != id) 1494 continue; 1495 scheduler_set_thread_priority(thread, prio); 1496 kprintf("thread %" B_PRId32 " set to priority %" B_PRId32 "\n", id, prio); 1497 found = true; 1498 break; 1499 } 1500 if (!found) 1501 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1502 1503 return 0; 1504 } 1505 1506 1507 static int 1508 make_thread_suspended(int argc, char **argv) 1509 { 1510 int32 id; 1511 1512 if (argc > 2) { 1513 print_debugger_command_usage(argv[0]); 1514 return 0; 1515 } 1516 1517 if (argc == 1) 1518 id = thread_get_current_thread()->id; 1519 else 1520 id = strtoul(argv[1], NULL, 0); 1521 1522 bool found = false; 1523 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1524 Thread* thread = it.Next();) { 1525 if (thread->id != id) 1526 continue; 1527 1528 Signal signal(SIGSTOP, SI_USER, B_OK, team_get_kernel_team()->id); 1529 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 1530 1531 kprintf("thread %" B_PRId32 " suspended\n", id); 1532 found = true; 1533 break; 1534 } 1535 if (!found) 1536 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1537 1538 return 0; 1539 } 1540 1541 1542 static int 1543 make_thread_resumed(int argc, char **argv) 1544 { 1545 int32 id; 1546 1547 if (argc != 2) { 1548 print_debugger_command_usage(argv[0]); 1549 return 0; 1550 } 1551 1552 // force user to enter a thread id, as using 1553 // the current thread is usually not intended 1554 id = strtoul(argv[1], NULL, 0); 1555 1556 bool found = false; 1557 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1558 Thread* thread = it.Next();) { 1559 if (thread->id != id) 1560 continue; 1561 1562 if (thread->state == B_THREAD_SUSPENDED || thread->state == B_THREAD_ASLEEP 1563 || thread->state == B_THREAD_WAITING) { 1564 scheduler_enqueue_in_run_queue(thread); 1565 kprintf("thread %" B_PRId32 " resumed\n", thread->id); 1566 } else 1567 kprintf("thread %" B_PRId32 " is already running\n", thread->id); 1568 found = true; 1569 break; 1570 } 1571 if (!found) 1572 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1573 1574 return 0; 1575 } 1576 1577 1578 static int 1579 drop_into_debugger(int argc, char **argv) 1580 { 1581 status_t err; 1582 int32 id; 1583 1584 if (argc > 2) { 1585 print_debugger_command_usage(argv[0]); 1586 return 0; 1587 } 1588 1589 if (argc == 1) 1590 id = thread_get_current_thread()->id; 1591 else 1592 id = strtoul(argv[1], NULL, 0); 1593 1594 err = _user_debug_thread(id); 1595 // TODO: This is a non-trivial syscall doing some locking, so this is 1596 // really nasty and may go seriously wrong. 1597 if (err) 1598 kprintf("drop failed\n"); 1599 else 1600 kprintf("thread %" B_PRId32 " dropped into user debugger\n", id); 1601 1602 return 0; 1603 } 1604 1605 1606 /*! Returns a user-readable string for a thread state. 1607 Only for use in the kernel debugger. 1608 */ 1609 static const char * 1610 state_to_text(Thread *thread, int32 state) 1611 { 1612 switch (state) { 1613 case B_THREAD_READY: 1614 return "ready"; 1615 1616 case B_THREAD_RUNNING: 1617 return "running"; 1618 1619 case B_THREAD_WAITING: 1620 { 1621 if (thread != NULL) { 1622 switch (thread->wait.type) { 1623 case THREAD_BLOCK_TYPE_SNOOZE: 1624 return "zzz"; 1625 1626 case THREAD_BLOCK_TYPE_SEMAPHORE: 1627 { 1628 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1629 if (sem == thread->msg.read_sem) 1630 return "receive"; 1631 break; 1632 } 1633 } 1634 } 1635 1636 return "waiting"; 1637 } 1638 1639 case B_THREAD_SUSPENDED: 1640 return "suspended"; 1641 1642 case THREAD_STATE_FREE_ON_RESCHED: 1643 return "death"; 1644 1645 default: 1646 return "UNKNOWN"; 1647 } 1648 } 1649 1650 1651 static void 1652 print_thread_list_table_head() 1653 { 1654 kprintf("%-*s id state wait for %-*s cpu pri %-*s team " 1655 "name\n", 1656 B_PRINTF_POINTER_WIDTH, "thread", B_PRINTF_POINTER_WIDTH, "object", 1657 B_PRINTF_POINTER_WIDTH, "stack"); 1658 } 1659 1660 1661 static void 1662 _dump_thread_info(Thread *thread, bool shortInfo) 1663 { 1664 if (shortInfo) { 1665 kprintf("%p %6" B_PRId32 " %-10s", thread, thread->id, 1666 state_to_text(thread, thread->state)); 1667 1668 // does it block on a semaphore or a condition variable? 1669 if (thread->state == B_THREAD_WAITING) { 1670 switch (thread->wait.type) { 1671 case THREAD_BLOCK_TYPE_SEMAPHORE: 1672 { 1673 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1674 if (sem == thread->msg.read_sem) 1675 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1676 else { 1677 kprintf("sem %-*" B_PRId32, 1678 B_PRINTF_POINTER_WIDTH + 5, sem); 1679 } 1680 break; 1681 } 1682 1683 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1684 kprintf("cvar %p ", thread->wait.object); 1685 break; 1686 1687 case THREAD_BLOCK_TYPE_SNOOZE: 1688 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1689 break; 1690 1691 case THREAD_BLOCK_TYPE_SIGNAL: 1692 kprintf("signal%*s", B_PRINTF_POINTER_WIDTH + 9, ""); 1693 break; 1694 1695 case THREAD_BLOCK_TYPE_MUTEX: 1696 kprintf("mutex %p ", thread->wait.object); 1697 break; 1698 1699 case THREAD_BLOCK_TYPE_RW_LOCK: 1700 kprintf("rwlock %p ", thread->wait.object); 1701 break; 1702 1703 case THREAD_BLOCK_TYPE_USER: 1704 kprintf("user%*s", B_PRINTF_POINTER_WIDTH + 11, ""); 1705 break; 1706 1707 case THREAD_BLOCK_TYPE_OTHER: 1708 kprintf("other%*s", B_PRINTF_POINTER_WIDTH + 10, ""); 1709 break; 1710 1711 default: 1712 kprintf("??? %p ", thread->wait.object); 1713 break; 1714 } 1715 } else 1716 kprintf("-%*s", B_PRINTF_POINTER_WIDTH + 14, ""); 1717 1718 // on which CPU does it run? 1719 if (thread->cpu) 1720 kprintf("%2d", thread->cpu->cpu_num); 1721 else 1722 kprintf(" -"); 1723 1724 kprintf("%4" B_PRId32 " %p%5" B_PRId32 " %s\n", thread->priority, 1725 (void *)thread->kernel_stack_base, thread->team->id, thread->name); 1726 1727 return; 1728 } 1729 1730 // print the long info 1731 1732 struct thread_death_entry *death = NULL; 1733 1734 kprintf("THREAD: %p\n", thread); 1735 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", thread->id, 1736 thread->id); 1737 kprintf("serial_number: %" B_PRId64 "\n", thread->serial_number); 1738 kprintf("name: \"%s\"\n", thread->name); 1739 kprintf("hash_next: %p\nteam_next: %p\n", 1740 thread->hash_next, thread->team_next); 1741 kprintf("priority: %" B_PRId32 " (I/O: %" B_PRId32 ")\n", 1742 thread->priority, thread->io_priority); 1743 kprintf("state: %s\n", state_to_text(thread, thread->state)); 1744 kprintf("cpu: %p ", thread->cpu); 1745 if (thread->cpu) 1746 kprintf("(%d)\n", thread->cpu->cpu_num); 1747 else 1748 kprintf("\n"); 1749 kprintf("sig_pending: %#" B_PRIx64 " (blocked: %#" B_PRIx64 1750 ", before sigsuspend(): %#" B_PRIx64 ")\n", 1751 (int64)thread->ThreadPendingSignals(), 1752 (int64)thread->sig_block_mask, 1753 (int64)thread->sigsuspend_original_unblocked_mask); 1754 kprintf("in_kernel: %d\n", thread->in_kernel); 1755 1756 if (thread->state == B_THREAD_WAITING) { 1757 kprintf("waiting for: "); 1758 1759 switch (thread->wait.type) { 1760 case THREAD_BLOCK_TYPE_SEMAPHORE: 1761 { 1762 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1763 if (sem == thread->msg.read_sem) 1764 kprintf("data\n"); 1765 else 1766 kprintf("semaphore %" B_PRId32 "\n", sem); 1767 break; 1768 } 1769 1770 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1771 kprintf("condition variable %p\n", thread->wait.object); 1772 break; 1773 1774 case THREAD_BLOCK_TYPE_SNOOZE: 1775 kprintf("snooze()\n"); 1776 break; 1777 1778 case THREAD_BLOCK_TYPE_SIGNAL: 1779 kprintf("signal\n"); 1780 break; 1781 1782 case THREAD_BLOCK_TYPE_MUTEX: 1783 kprintf("mutex %p\n", thread->wait.object); 1784 break; 1785 1786 case THREAD_BLOCK_TYPE_RW_LOCK: 1787 kprintf("rwlock %p\n", thread->wait.object); 1788 break; 1789 1790 case THREAD_BLOCK_TYPE_USER: 1791 kprintf("user\n"); 1792 break; 1793 1794 case THREAD_BLOCK_TYPE_OTHER: 1795 kprintf("other (%s)\n", (char*)thread->wait.object); 1796 break; 1797 1798 default: 1799 kprintf("unknown (%p)\n", thread->wait.object); 1800 break; 1801 } 1802 } 1803 1804 kprintf("fault_handler: %p\n", (void *)thread->fault_handler); 1805 kprintf("team: %p, \"%s\"\n", thread->team, 1806 thread->team->Name()); 1807 kprintf(" exit.sem: %" B_PRId32 "\n", thread->exit.sem); 1808 kprintf(" exit.status: %#" B_PRIx32 " (%s)\n", thread->exit.status, 1809 strerror(thread->exit.status)); 1810 kprintf(" exit.waiters:\n"); 1811 while ((death = (struct thread_death_entry*)list_get_next_item( 1812 &thread->exit.waiters, death)) != NULL) { 1813 kprintf("\t%p (thread %" B_PRId32 ")\n", death, death->thread); 1814 } 1815 1816 kprintf("kernel_stack_area: %" B_PRId32 "\n", thread->kernel_stack_area); 1817 kprintf("kernel_stack_base: %p\n", (void *)thread->kernel_stack_base); 1818 kprintf("user_stack_area: %" B_PRId32 "\n", thread->user_stack_area); 1819 kprintf("user_stack_base: %p\n", (void *)thread->user_stack_base); 1820 kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage); 1821 kprintf("user_thread: %p\n", (void *)thread->user_thread); 1822 kprintf("kernel_errno: %#x (%s)\n", thread->kernel_errno, 1823 strerror(thread->kernel_errno)); 1824 kprintf("kernel_time: %" B_PRId64 "\n", thread->kernel_time); 1825 kprintf("user_time: %" B_PRId64 "\n", thread->user_time); 1826 kprintf("flags: 0x%" B_PRIx32 "\n", thread->flags); 1827 kprintf("architecture dependant section:\n"); 1828 arch_thread_dump_info(&thread->arch_info); 1829 kprintf("scheduler data:\n"); 1830 scheduler_dump_thread_data(thread); 1831 } 1832 1833 1834 static int 1835 dump_thread_info(int argc, char **argv) 1836 { 1837 bool shortInfo = false; 1838 int argi = 1; 1839 if (argi < argc && strcmp(argv[argi], "-s") == 0) { 1840 shortInfo = true; 1841 print_thread_list_table_head(); 1842 argi++; 1843 } 1844 1845 if (argi == argc) { 1846 _dump_thread_info(thread_get_current_thread(), shortInfo); 1847 return 0; 1848 } 1849 1850 for (; argi < argc; argi++) { 1851 const char *name = argv[argi]; 1852 ulong arg = strtoul(name, NULL, 0); 1853 1854 if (IS_KERNEL_ADDRESS(arg)) { 1855 // semi-hack 1856 _dump_thread_info((Thread *)arg, shortInfo); 1857 continue; 1858 } 1859 1860 // walk through the thread list, trying to match name or id 1861 bool found = false; 1862 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1863 Thread* thread = it.Next();) { 1864 if (!strcmp(name, thread->name) || thread->id == (thread_id)arg) { 1865 _dump_thread_info(thread, shortInfo); 1866 found = true; 1867 break; 1868 } 1869 } 1870 1871 if (!found) 1872 kprintf("thread \"%s\" (%" B_PRId32 ") doesn't exist!\n", name, (thread_id)arg); 1873 } 1874 1875 return 0; 1876 } 1877 1878 1879 static int 1880 dump_thread_list(int argc, char **argv) 1881 { 1882 bool realTimeOnly = false; 1883 bool calling = false; 1884 const char *callSymbol = NULL; 1885 addr_t callStart = 0; 1886 addr_t callEnd = 0; 1887 int32 requiredState = 0; 1888 team_id team = -1; 1889 sem_id sem = -1; 1890 1891 if (!strcmp(argv[0], "realtime")) 1892 realTimeOnly = true; 1893 else if (!strcmp(argv[0], "ready")) 1894 requiredState = B_THREAD_READY; 1895 else if (!strcmp(argv[0], "running")) 1896 requiredState = B_THREAD_RUNNING; 1897 else if (!strcmp(argv[0], "waiting")) { 1898 requiredState = B_THREAD_WAITING; 1899 1900 if (argc > 1) { 1901 sem = strtoul(argv[1], NULL, 0); 1902 if (sem == 0) 1903 kprintf("ignoring invalid semaphore argument.\n"); 1904 } 1905 } else if (!strcmp(argv[0], "calling")) { 1906 if (argc < 2) { 1907 kprintf("Need to give a symbol name or start and end arguments.\n"); 1908 return 0; 1909 } else if (argc == 3) { 1910 callStart = parse_expression(argv[1]); 1911 callEnd = parse_expression(argv[2]); 1912 } else 1913 callSymbol = argv[1]; 1914 1915 calling = true; 1916 } else if (argc > 1) { 1917 team = strtoul(argv[1], NULL, 0); 1918 if (team == 0) 1919 kprintf("ignoring invalid team argument.\n"); 1920 } 1921 1922 print_thread_list_table_head(); 1923 1924 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1925 Thread* thread = it.Next();) { 1926 // filter out threads not matching the search criteria 1927 if ((requiredState && thread->state != requiredState) 1928 || (calling && !arch_debug_contains_call(thread, callSymbol, 1929 callStart, callEnd)) 1930 || (sem > 0 && get_thread_wait_sem(thread) != sem) 1931 || (team > 0 && thread->team->id != team) 1932 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY)) 1933 continue; 1934 1935 _dump_thread_info(thread, true); 1936 } 1937 return 0; 1938 } 1939 1940 1941 // #pragma mark - private kernel API 1942 1943 1944 void 1945 thread_exit(void) 1946 { 1947 cpu_status state; 1948 Thread* thread = thread_get_current_thread(); 1949 Team* team = thread->team; 1950 Team* kernelTeam = team_get_kernel_team(); 1951 status_t status; 1952 struct thread_debug_info debugInfo; 1953 team_id teamID = team->id; 1954 1955 TRACE(("thread %" B_PRId32 " exiting w/return code %#" B_PRIx32 "\n", 1956 thread->id, thread->exit.status)); 1957 1958 if (!are_interrupts_enabled()) 1959 panic("thread_exit() called with interrupts disabled!\n"); 1960 1961 // boost our priority to get this over with 1962 scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY); 1963 1964 if (team != kernelTeam) { 1965 // Delete all user timers associated with the thread. 1966 ThreadLocker threadLocker(thread); 1967 thread->DeleteUserTimers(false); 1968 1969 // detach the thread's user thread 1970 user_thread* userThread = thread->user_thread; 1971 thread->user_thread = NULL; 1972 1973 threadLocker.Unlock(); 1974 1975 // Delete the thread's user thread, if it's not the main thread. If it 1976 // is, we can save the work, since it will be deleted with the team's 1977 // address space. 1978 if (thread != team->main_thread) 1979 team_free_user_thread(team, userThread); 1980 } 1981 1982 // remember the user stack area -- we will delete it below 1983 area_id userStackArea = -1; 1984 if (team->address_space != NULL && thread->user_stack_area >= 0) { 1985 userStackArea = thread->user_stack_area; 1986 thread->user_stack_area = -1; 1987 } 1988 1989 struct job_control_entry *death = NULL; 1990 struct thread_death_entry* threadDeathEntry = NULL; 1991 bool deleteTeam = false; 1992 port_id debuggerPort = -1; 1993 1994 if (team != kernelTeam) { 1995 user_debug_thread_exiting(thread); 1996 1997 if (team->main_thread == thread) { 1998 // The main thread is exiting. Shut down the whole team. 1999 deleteTeam = true; 2000 2001 // kill off all other threads and the user debugger facilities 2002 debuggerPort = team_shutdown_team(team); 2003 2004 // acquire necessary locks, which are: process group lock, kernel 2005 // team lock, parent team lock, and the team lock 2006 team->LockProcessGroup(); 2007 kernelTeam->Lock(); 2008 team->LockTeamAndParent(true); 2009 } else { 2010 threadDeathEntry 2011 = (thread_death_entry*)malloc(sizeof(thread_death_entry)); 2012 2013 // acquire necessary locks, which are: kernel team lock and the team 2014 // lock 2015 kernelTeam->Lock(); 2016 team->Lock(); 2017 } 2018 2019 ThreadLocker threadLocker(thread); 2020 2021 state = disable_interrupts(); 2022 2023 // swap address spaces, to make sure we're running on the kernel's pgdir 2024 vm_swap_address_space(team->address_space, VMAddressSpace::Kernel()); 2025 2026 WriteSpinLocker teamLocker(thread->team_lock); 2027 SpinLocker threadCreationLocker(gThreadCreationLock); 2028 // removing the thread and putting its death entry to the parent 2029 // team needs to be an atomic operation 2030 2031 // remember how long this thread lasted 2032 bigtime_t now = system_time(); 2033 2034 InterruptsSpinLocker signalLocker(kernelTeam->signal_lock); 2035 SpinLocker teamTimeLocker(kernelTeam->time_lock); 2036 SpinLocker threadTimeLocker(thread->time_lock); 2037 2038 thread->kernel_time += now - thread->last_time; 2039 thread->last_time = now; 2040 2041 team->dead_threads_kernel_time += thread->kernel_time; 2042 team->dead_threads_user_time += thread->user_time; 2043 2044 // stop/update thread/team CPU time user timers 2045 if (thread->HasActiveCPUTimeUserTimers() 2046 || team->HasActiveCPUTimeUserTimers()) { 2047 user_timer_stop_cpu_timers(thread, NULL); 2048 } 2049 2050 // deactivate CPU time user timers for the thread 2051 if (thread->HasActiveCPUTimeUserTimers()) 2052 thread->DeactivateCPUTimeUserTimers(); 2053 2054 threadTimeLocker.Unlock(); 2055 2056 // put the thread into the kernel team until it dies 2057 remove_thread_from_team(team, thread); 2058 insert_thread_into_team(kernelTeam, thread); 2059 2060 teamTimeLocker.Unlock(); 2061 signalLocker.Unlock(); 2062 2063 teamLocker.Unlock(); 2064 2065 if (team->death_entry != NULL) { 2066 if (--team->death_entry->remaining_threads == 0) 2067 team->death_entry->condition.NotifyOne(); 2068 } 2069 2070 if (deleteTeam) { 2071 Team* parent = team->parent; 2072 2073 // Set the team job control state to "dead" and detach the job 2074 // control entry from our team struct. 2075 team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL); 2076 death = team->job_control_entry; 2077 team->job_control_entry = NULL; 2078 2079 if (death != NULL) { 2080 death->InitDeadState(); 2081 2082 // team_set_job_control_state() already moved our entry 2083 // into the parent's list. We just check the soft limit of 2084 // death entries. 2085 if (parent->dead_children.count > MAX_DEAD_CHILDREN) { 2086 death = parent->dead_children.entries.RemoveHead(); 2087 parent->dead_children.count--; 2088 } else 2089 death = NULL; 2090 } 2091 2092 threadCreationLocker.Unlock(); 2093 restore_interrupts(state); 2094 2095 threadLocker.Unlock(); 2096 2097 // Get a temporary reference to the team's process group 2098 // -- team_remove_team() removes the team from the group, which 2099 // might destroy it otherwise and we wouldn't be able to unlock it. 2100 ProcessGroup* group = team->group; 2101 group->AcquireReference(); 2102 2103 pid_t foregroundGroupToSignal; 2104 team_remove_team(team, foregroundGroupToSignal); 2105 2106 // unlock everything but the parent team 2107 team->Unlock(); 2108 if (parent != kernelTeam) 2109 kernelTeam->Unlock(); 2110 group->Unlock(); 2111 group->ReleaseReference(); 2112 2113 // Send SIGCHLD to the parent as long as we still have its lock. 2114 // This makes job control state change + signalling atomic. 2115 Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id); 2116 if (team->exit.reason == CLD_EXITED) { 2117 childSignal.SetStatus(team->exit.status); 2118 } else { 2119 childSignal.SetStatus(team->exit.signal); 2120 childSignal.SetSendingUser(team->exit.signaling_user); 2121 } 2122 send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE); 2123 2124 // also unlock the parent 2125 parent->Unlock(); 2126 2127 // If the team was a session leader with controlling TTY, we have 2128 // to send SIGHUP to the foreground process group. 2129 if (foregroundGroupToSignal >= 0) { 2130 Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id); 2131 send_signal_to_process_group(foregroundGroupToSignal, 2132 groupSignal, B_DO_NOT_RESCHEDULE); 2133 } 2134 } else { 2135 // The thread is not the main thread. We store a thread death entry 2136 // for it, unless someone is already waiting for it. 2137 if (threadDeathEntry != NULL 2138 && list_is_empty(&thread->exit.waiters)) { 2139 threadDeathEntry->thread = thread->id; 2140 threadDeathEntry->status = thread->exit.status; 2141 2142 // add entry to dead thread list 2143 list_add_item(&team->dead_threads, threadDeathEntry); 2144 } 2145 2146 threadCreationLocker.Unlock(); 2147 restore_interrupts(state); 2148 2149 threadLocker.Unlock(); 2150 team->Unlock(); 2151 kernelTeam->Unlock(); 2152 } 2153 2154 TRACE(("thread_exit: thread %" B_PRId32 " now a kernel thread!\n", 2155 thread->id)); 2156 } 2157 2158 // delete the team if we're its main thread 2159 if (deleteTeam) { 2160 team_delete_team(team, debuggerPort); 2161 2162 // we need to delete any death entry that made it to here 2163 delete death; 2164 } 2165 2166 ThreadLocker threadLocker(thread); 2167 2168 state = disable_interrupts(); 2169 SpinLocker threadCreationLocker(gThreadCreationLock); 2170 2171 // mark invisible in global hash/list, so it's no longer accessible 2172 WriteSpinLocker threadHashLocker(sThreadHashLock); 2173 thread->visible = false; 2174 sUsedThreads--; 2175 threadHashLocker.Unlock(); 2176 2177 // Stop debugging for this thread 2178 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2179 debugInfo = thread->debug_info; 2180 clear_thread_debug_info(&thread->debug_info, true); 2181 threadDebugInfoLocker.Unlock(); 2182 2183 // Remove the select infos. We notify them a little later. 2184 select_info* selectInfos = thread->select_infos; 2185 thread->select_infos = NULL; 2186 2187 threadCreationLocker.Unlock(); 2188 restore_interrupts(state); 2189 2190 threadLocker.Unlock(); 2191 2192 destroy_thread_debug_info(&debugInfo); 2193 2194 // notify select infos 2195 select_info* info = selectInfos; 2196 while (info != NULL) { 2197 select_sync* sync = info->sync; 2198 2199 notify_select_events(info, B_EVENT_INVALID); 2200 info = info->next; 2201 put_select_sync(sync); 2202 } 2203 2204 // notify listeners 2205 sNotificationService.Notify(THREAD_REMOVED, thread); 2206 2207 // shutdown the thread messaging 2208 2209 status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0); 2210 if (status == B_WOULD_BLOCK) { 2211 // there is data waiting for us, so let us eat it 2212 thread_id sender; 2213 2214 delete_sem(thread->msg.write_sem); 2215 // first, let's remove all possibly waiting writers 2216 receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT); 2217 } else { 2218 // we probably own the semaphore here, and we're the last to do so 2219 delete_sem(thread->msg.write_sem); 2220 } 2221 // now we can safely remove the msg.read_sem 2222 delete_sem(thread->msg.read_sem); 2223 2224 // fill all death entries and delete the sem that others will use to wait 2225 // for us 2226 { 2227 sem_id cachedExitSem = thread->exit.sem; 2228 2229 ThreadLocker threadLocker(thread); 2230 2231 // make sure no one will grab this semaphore again 2232 thread->exit.sem = -1; 2233 2234 // fill all death entries 2235 thread_death_entry* entry = NULL; 2236 while ((entry = (thread_death_entry*)list_get_next_item( 2237 &thread->exit.waiters, entry)) != NULL) { 2238 entry->status = thread->exit.status; 2239 } 2240 2241 threadLocker.Unlock(); 2242 2243 delete_sem(cachedExitSem); 2244 } 2245 2246 // delete the user stack, if this was a user thread 2247 if (!deleteTeam && userStackArea >= 0) { 2248 // We postponed deleting the user stack until now, since this way all 2249 // notifications for the thread's death are out already and all other 2250 // threads waiting for this thread's death and some object on its stack 2251 // will wake up before we (try to) delete the stack area. Of most 2252 // relevance is probably the case where this is the main thread and 2253 // other threads use objects on its stack -- so we want them terminated 2254 // first. 2255 // When the team is deleted, all areas are deleted anyway, so we don't 2256 // need to do that explicitly in that case. 2257 vm_delete_area(teamID, userStackArea, true); 2258 } 2259 2260 // notify the debugger 2261 if (teamID != kernelTeam->id) 2262 user_debug_thread_deleted(teamID, thread->id); 2263 2264 // enqueue in the undertaker list and reschedule for the last time 2265 UndertakerEntry undertakerEntry(thread, teamID); 2266 2267 disable_interrupts(); 2268 2269 SpinLocker schedulerLocker(thread->scheduler_lock); 2270 2271 SpinLocker undertakerLocker(sUndertakerLock); 2272 sUndertakerEntries.Add(&undertakerEntry); 2273 sUndertakerCondition.NotifyOne(); 2274 undertakerLocker.Unlock(); 2275 2276 scheduler_reschedule(THREAD_STATE_FREE_ON_RESCHED); 2277 2278 panic("never can get here\n"); 2279 } 2280 2281 2282 /*! Called in the interrupt handler code when a thread enters 2283 the kernel for any reason. 2284 Only tracks time for now. 2285 Interrupts are disabled. 2286 */ 2287 void 2288 thread_at_kernel_entry(bigtime_t now) 2289 { 2290 Thread *thread = thread_get_current_thread(); 2291 2292 TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32 "\n", thread->id)); 2293 2294 // track user time 2295 SpinLocker threadTimeLocker(thread->time_lock); 2296 thread->user_time += now - thread->last_time; 2297 thread->last_time = now; 2298 thread->in_kernel = true; 2299 threadTimeLocker.Unlock(); 2300 } 2301 2302 2303 /*! Called whenever a thread exits kernel space to user space. 2304 Tracks time, handles signals, ... 2305 Interrupts must be enabled. When the function returns, interrupts will be 2306 disabled. 2307 The function may not return. This e.g. happens when the thread has received 2308 a deadly signal. 2309 */ 2310 void 2311 thread_at_kernel_exit(void) 2312 { 2313 Thread *thread = thread_get_current_thread(); 2314 2315 TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32 "\n", thread->id)); 2316 2317 handle_signals(thread); 2318 2319 disable_interrupts(); 2320 2321 // track kernel time 2322 bigtime_t now = system_time(); 2323 SpinLocker threadTimeLocker(thread->time_lock); 2324 thread->in_kernel = false; 2325 thread->kernel_time += now - thread->last_time; 2326 thread->last_time = now; 2327 } 2328 2329 2330 /*! The quick version of thread_kernel_exit(), in case no signals are pending 2331 and no debugging shall be done. 2332 Interrupts must be disabled. 2333 */ 2334 void 2335 thread_at_kernel_exit_no_signals(void) 2336 { 2337 Thread *thread = thread_get_current_thread(); 2338 2339 TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32 "\n", 2340 thread->id)); 2341 2342 // track kernel time 2343 bigtime_t now = system_time(); 2344 SpinLocker threadTimeLocker(thread->time_lock); 2345 thread->in_kernel = false; 2346 thread->kernel_time += now - thread->last_time; 2347 thread->last_time = now; 2348 } 2349 2350 2351 void 2352 thread_reset_for_exec(void) 2353 { 2354 Thread* thread = thread_get_current_thread(); 2355 2356 ThreadLocker threadLocker(thread); 2357 2358 // delete user-defined timers 2359 thread->DeleteUserTimers(true); 2360 2361 // cancel pre-defined timer 2362 if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID)) 2363 timer->Cancel(); 2364 2365 // reset user_thread and user stack 2366 thread->user_thread = NULL; 2367 thread->user_stack_area = -1; 2368 thread->user_stack_base = 0; 2369 thread->user_stack_size = 0; 2370 2371 // reset signals 2372 thread->ResetSignalsOnExec(); 2373 2374 // reset thread CPU time clock 2375 InterruptsSpinLocker timeLocker(thread->time_lock); 2376 thread->cpu_clock_offset = -thread->CPUTime(false); 2377 } 2378 2379 2380 thread_id 2381 allocate_thread_id() 2382 { 2383 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 2384 2385 // find the next unused ID 2386 thread_id id; 2387 do { 2388 id = sNextThreadID++; 2389 2390 // deal with integer overflow 2391 if (sNextThreadID < 0) 2392 sNextThreadID = 2; 2393 2394 // check whether the ID is already in use 2395 } while (sThreadHash.Lookup(id, false) != NULL); 2396 2397 return id; 2398 } 2399 2400 2401 thread_id 2402 peek_next_thread_id() 2403 { 2404 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 2405 return sNextThreadID; 2406 } 2407 2408 2409 /*! Yield the CPU to other threads. 2410 Thread will continue to run, if there's no other thread in ready 2411 state, and if it has a higher priority than the other ready threads, it 2412 still has a good chance to continue. 2413 */ 2414 void 2415 thread_yield(void) 2416 { 2417 Thread *thread = thread_get_current_thread(); 2418 if (thread == NULL) 2419 return; 2420 2421 InterruptsSpinLocker _(thread->scheduler_lock); 2422 2423 thread->has_yielded = true; 2424 scheduler_reschedule(B_THREAD_READY); 2425 } 2426 2427 2428 void 2429 thread_map(void (*function)(Thread* thread, void* data), void* data) 2430 { 2431 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock); 2432 2433 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 2434 Thread* thread = it.Next();) { 2435 function(thread, data); 2436 } 2437 } 2438 2439 2440 /*! Kernel private thread creation function. 2441 */ 2442 thread_id 2443 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority, 2444 void *arg, team_id team) 2445 { 2446 return thread_create_thread( 2447 ThreadCreationAttributes(function, name, priority, arg, team), 2448 true); 2449 } 2450 2451 2452 status_t 2453 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout, 2454 status_t *_returnCode) 2455 { 2456 if (id < 0) 2457 return B_BAD_THREAD_ID; 2458 2459 // get the thread, queue our death entry, and fetch the semaphore we have to 2460 // wait on 2461 sem_id exitSem = B_BAD_THREAD_ID; 2462 struct thread_death_entry death; 2463 2464 Thread* thread = Thread::GetAndLock(id); 2465 if (thread != NULL) { 2466 // remember the semaphore we have to wait on and place our death entry 2467 exitSem = thread->exit.sem; 2468 if (exitSem >= 0) 2469 list_add_link_to_head(&thread->exit.waiters, &death); 2470 2471 thread->UnlockAndReleaseReference(); 2472 2473 if (exitSem < 0) 2474 return B_BAD_THREAD_ID; 2475 } else { 2476 // we couldn't find this thread -- maybe it's already gone, and we'll 2477 // find its death entry in our team 2478 Team* team = thread_get_current_thread()->team; 2479 TeamLocker teamLocker(team); 2480 2481 // check the child death entries first (i.e. main threads of child 2482 // teams) 2483 bool deleteEntry; 2484 job_control_entry* freeDeath 2485 = team_get_death_entry(team, id, &deleteEntry); 2486 if (freeDeath != NULL) { 2487 death.status = freeDeath->status; 2488 if (deleteEntry) 2489 delete freeDeath; 2490 } else { 2491 // check the thread death entries of the team (non-main threads) 2492 thread_death_entry* threadDeathEntry = NULL; 2493 while ((threadDeathEntry = (thread_death_entry*)list_get_next_item( 2494 &team->dead_threads, threadDeathEntry)) != NULL) { 2495 if (threadDeathEntry->thread == id) { 2496 list_remove_item(&team->dead_threads, threadDeathEntry); 2497 death.status = threadDeathEntry->status; 2498 free(threadDeathEntry); 2499 break; 2500 } 2501 } 2502 2503 if (threadDeathEntry == NULL) 2504 return B_BAD_THREAD_ID; 2505 } 2506 2507 // we found the thread's death entry in our team 2508 if (_returnCode) 2509 *_returnCode = death.status; 2510 2511 return B_OK; 2512 } 2513 2514 // we need to wait for the death of the thread 2515 2516 resume_thread(id); 2517 // make sure we don't wait forever on a suspended thread 2518 2519 status_t status = acquire_sem_etc(exitSem, 1, flags, timeout); 2520 2521 if (status == B_OK) { 2522 // this should never happen as the thread deletes the semaphore on exit 2523 panic("could acquire exit_sem for thread %" B_PRId32 "\n", id); 2524 } else if (status == B_BAD_SEM_ID) { 2525 // this is the way the thread normally exits 2526 status = B_OK; 2527 } else { 2528 // We were probably interrupted or the timeout occurred; we need to 2529 // remove our death entry now. 2530 thread = Thread::GetAndLock(id); 2531 if (thread != NULL) { 2532 list_remove_link(&death); 2533 thread->UnlockAndReleaseReference(); 2534 } else { 2535 // The thread is already gone, so we need to wait uninterruptibly 2536 // for its exit semaphore to make sure our death entry stays valid. 2537 // It won't take long, since the thread is apparently already in the 2538 // middle of the cleanup. 2539 acquire_sem(exitSem); 2540 status = B_OK; 2541 } 2542 } 2543 2544 if (status == B_OK && _returnCode != NULL) 2545 *_returnCode = death.status; 2546 2547 return status; 2548 } 2549 2550 2551 status_t 2552 select_thread(int32 id, struct select_info* info, bool kernel) 2553 { 2554 // get and lock the thread 2555 Thread* thread = Thread::GetAndLock(id); 2556 if (thread == NULL) 2557 return B_BAD_THREAD_ID; 2558 BReference<Thread> threadReference(thread, true); 2559 ThreadLocker threadLocker(thread, true); 2560 2561 // We support only B_EVENT_INVALID at the moment. 2562 info->selected_events &= B_EVENT_INVALID; 2563 2564 // add info to list 2565 if (info->selected_events != 0) { 2566 info->next = thread->select_infos; 2567 thread->select_infos = info; 2568 2569 // we need a sync reference 2570 atomic_add(&info->sync->ref_count, 1); 2571 } 2572 2573 return B_OK; 2574 } 2575 2576 2577 status_t 2578 deselect_thread(int32 id, struct select_info* info, bool kernel) 2579 { 2580 // get and lock the thread 2581 Thread* thread = Thread::GetAndLock(id); 2582 if (thread == NULL) 2583 return B_BAD_THREAD_ID; 2584 BReference<Thread> threadReference(thread, true); 2585 ThreadLocker threadLocker(thread, true); 2586 2587 // remove info from list 2588 select_info** infoLocation = &thread->select_infos; 2589 while (*infoLocation != NULL && *infoLocation != info) 2590 infoLocation = &(*infoLocation)->next; 2591 2592 if (*infoLocation != info) 2593 return B_OK; 2594 2595 *infoLocation = info->next; 2596 2597 threadLocker.Unlock(); 2598 2599 // surrender sync reference 2600 put_select_sync(info->sync); 2601 2602 return B_OK; 2603 } 2604 2605 2606 int32 2607 thread_max_threads(void) 2608 { 2609 return sMaxThreads; 2610 } 2611 2612 2613 int32 2614 thread_used_threads(void) 2615 { 2616 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 2617 return sUsedThreads; 2618 } 2619 2620 2621 /*! Returns a user-readable string for a thread state. 2622 Only for use in the kernel debugger. 2623 */ 2624 const char* 2625 thread_state_to_text(Thread* thread, int32 state) 2626 { 2627 return state_to_text(thread, state); 2628 } 2629 2630 2631 int32 2632 thread_get_io_priority(thread_id id) 2633 { 2634 Thread* thread = Thread::GetAndLock(id); 2635 if (thread == NULL) 2636 return B_BAD_THREAD_ID; 2637 BReference<Thread> threadReference(thread, true); 2638 ThreadLocker threadLocker(thread, true); 2639 2640 int32 priority = thread->io_priority; 2641 if (priority < 0) { 2642 // negative I/O priority means using the (CPU) priority 2643 priority = thread->priority; 2644 } 2645 2646 return priority; 2647 } 2648 2649 2650 void 2651 thread_set_io_priority(int32 priority) 2652 { 2653 Thread* thread = thread_get_current_thread(); 2654 ThreadLocker threadLocker(thread); 2655 2656 thread->io_priority = priority; 2657 } 2658 2659 2660 status_t 2661 thread_init(kernel_args *args) 2662 { 2663 TRACE(("thread_init: entry\n")); 2664 2665 // create the thread hash table 2666 new(&sThreadHash) ThreadHashTable(); 2667 if (sThreadHash.Init(128) != B_OK) 2668 panic("thread_init(): failed to init thread hash table!"); 2669 2670 // create the thread structure object cache 2671 sThreadCache = create_object_cache("threads", sizeof(Thread), 64, NULL, 2672 NULL, NULL); 2673 // Note: The x86 port requires 64 byte alignment of thread structures. 2674 if (sThreadCache == NULL) 2675 panic("thread_init(): failed to allocate thread object cache!"); 2676 2677 if (arch_thread_init(args) < B_OK) 2678 panic("arch_thread_init() failed!\n"); 2679 2680 // skip all thread IDs including B_SYSTEM_TEAM, which is reserved 2681 sNextThreadID = B_SYSTEM_TEAM + 1; 2682 2683 // create an idle thread for each cpu 2684 for (uint32 i = 0; i < args->num_cpus; i++) { 2685 Thread *thread; 2686 area_info info; 2687 char name[64]; 2688 2689 sprintf(name, "idle thread %" B_PRIu32, i + 1); 2690 thread = new(&sIdleThreads[i]) Thread(name, 2691 i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]); 2692 if (thread == NULL || thread->Init(true) != B_OK) { 2693 panic("error creating idle thread struct\n"); 2694 return B_NO_MEMORY; 2695 } 2696 2697 gCPU[i].running_thread = thread; 2698 2699 thread->team = team_get_kernel_team(); 2700 thread->priority = B_IDLE_PRIORITY; 2701 thread->state = B_THREAD_RUNNING; 2702 sprintf(name, "idle thread %" B_PRIu32 " kstack", i + 1); 2703 thread->kernel_stack_area = find_area(name); 2704 2705 if (get_area_info(thread->kernel_stack_area, &info) != B_OK) 2706 panic("error finding idle kstack area\n"); 2707 2708 thread->kernel_stack_base = (addr_t)info.address; 2709 thread->kernel_stack_top = thread->kernel_stack_base + info.size; 2710 2711 thread->visible = true; 2712 insert_thread_into_team(thread->team, thread); 2713 2714 scheduler_on_thread_init(thread); 2715 } 2716 sUsedThreads = args->num_cpus; 2717 2718 // init the notification service 2719 new(&sNotificationService) ThreadNotificationService(); 2720 2721 sNotificationService.Register(); 2722 2723 // start the undertaker thread 2724 new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>(); 2725 sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries"); 2726 2727 thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker", 2728 B_DISPLAY_PRIORITY, NULL); 2729 if (undertakerThread < 0) 2730 panic("Failed to create undertaker thread!"); 2731 resume_thread(undertakerThread); 2732 2733 // set up some debugger commands 2734 add_debugger_command_etc("threads", &dump_thread_list, "List all threads", 2735 "[ <team> ]\n" 2736 "Prints a list of all existing threads, or, if a team ID is given,\n" 2737 "all threads of the specified team.\n" 2738 " <team> - The ID of the team whose threads shall be listed.\n", 0); 2739 add_debugger_command_etc("ready", &dump_thread_list, 2740 "List all ready threads", 2741 "\n" 2742 "Prints a list of all threads in ready state.\n", 0); 2743 add_debugger_command_etc("running", &dump_thread_list, 2744 "List all running threads", 2745 "\n" 2746 "Prints a list of all threads in running state.\n", 0); 2747 add_debugger_command_etc("waiting", &dump_thread_list, 2748 "List all waiting threads (optionally for a specific semaphore)", 2749 "[ <sem> ]\n" 2750 "Prints a list of all threads in waiting state. If a semaphore is\n" 2751 "specified, only the threads waiting on that semaphore are listed.\n" 2752 " <sem> - ID of the semaphore.\n", 0); 2753 add_debugger_command_etc("realtime", &dump_thread_list, 2754 "List all realtime threads", 2755 "\n" 2756 "Prints a list of all threads with realtime priority.\n", 0); 2757 add_debugger_command_etc("thread", &dump_thread_info, 2758 "Dump info about a particular thread", 2759 "[ -s ] ( <id> | <address> | <name> )*\n" 2760 "Prints information about the specified thread. If no argument is\n" 2761 "given the current thread is selected.\n" 2762 " -s - Print info in compact table form (like \"threads\").\n" 2763 " <id> - The ID of the thread.\n" 2764 " <address> - The address of the thread structure.\n" 2765 " <name> - The thread's name.\n", 0); 2766 add_debugger_command_etc("calling", &dump_thread_list, 2767 "Show all threads that have a specific address in their call chain", 2768 "{ <symbol-pattern> | <start> <end> }\n", 0); 2769 add_debugger_command_etc("unreal", &make_thread_unreal, 2770 "Set realtime priority threads to normal priority", 2771 "[ <id> ]\n" 2772 "Sets the priority of all realtime threads or, if given, the one\n" 2773 "with the specified ID to \"normal\" priority.\n" 2774 " <id> - The ID of the thread.\n", 0); 2775 add_debugger_command_etc("suspend", &make_thread_suspended, 2776 "Suspend a thread", 2777 "[ <id> ]\n" 2778 "Suspends the thread with the given ID. If no ID argument is given\n" 2779 "the current thread is selected.\n" 2780 " <id> - The ID of the thread.\n", 0); 2781 add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread", 2782 "<id>\n" 2783 "Resumes the specified thread, if it is currently suspended.\n" 2784 " <id> - The ID of the thread.\n", 0); 2785 add_debugger_command_etc("drop", &drop_into_debugger, 2786 "Drop a thread into the userland debugger", 2787 "<id>\n" 2788 "Drops the specified (userland) thread into the userland debugger\n" 2789 "after leaving the kernel debugger.\n" 2790 " <id> - The ID of the thread.\n", 0); 2791 add_debugger_command_etc("priority", &set_thread_prio, 2792 "Set a thread's priority", 2793 "<priority> [ <id> ]\n" 2794 "Sets the priority of the thread with the specified ID to the given\n" 2795 "priority. If no thread ID is given, the current thread is selected.\n" 2796 " <priority> - The thread's new priority (0 - 120)\n" 2797 " <id> - The ID of the thread.\n", 0); 2798 2799 return B_OK; 2800 } 2801 2802 2803 status_t 2804 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum) 2805 { 2806 // set up the cpu pointer in the not yet initialized per-cpu idle thread 2807 // so that get_current_cpu and friends will work, which is crucial for 2808 // a lot of low level routines 2809 sIdleThreads[cpuNum].cpu = &gCPU[cpuNum]; 2810 arch_thread_set_current_thread(&sIdleThreads[cpuNum]); 2811 return B_OK; 2812 } 2813 2814 2815 // #pragma mark - thread blocking API 2816 2817 2818 static status_t 2819 thread_block_timeout(timer* timer) 2820 { 2821 Thread* thread = (Thread*)timer->user_data; 2822 thread_unblock(thread, B_TIMED_OUT); 2823 2824 return B_HANDLED_INTERRUPT; 2825 } 2826 2827 2828 /*! Blocks the current thread. 2829 2830 The thread is blocked until someone else unblock it. Must be called after a 2831 call to thread_prepare_to_block(). If the thread has already been unblocked 2832 after the previous call to thread_prepare_to_block(), this function will 2833 return immediately. Cf. the documentation of thread_prepare_to_block() for 2834 more details. 2835 2836 The caller must hold the scheduler lock. 2837 2838 \param thread The current thread. 2839 \return The error code passed to the unblocking function. thread_interrupt() 2840 uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was 2841 successful while another error code indicates a failure (what that means 2842 depends on the client code). 2843 */ 2844 static inline status_t 2845 thread_block_locked(Thread* thread) 2846 { 2847 if (thread->wait.status == 1) { 2848 // check for signals, if interruptible 2849 if (thread_is_interrupted(thread, thread->wait.flags)) { 2850 thread->wait.status = B_INTERRUPTED; 2851 } else 2852 scheduler_reschedule(B_THREAD_WAITING); 2853 } 2854 2855 return thread->wait.status; 2856 } 2857 2858 2859 /*! Blocks the current thread. 2860 2861 The function acquires the scheduler lock and calls thread_block_locked(). 2862 See there for more information. 2863 */ 2864 status_t 2865 thread_block() 2866 { 2867 InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock); 2868 return thread_block_locked(thread_get_current_thread()); 2869 } 2870 2871 2872 /*! Blocks the current thread with a timeout. 2873 2874 The current thread is blocked until someone else unblock it or the specified 2875 timeout occurs. Must be called after a call to thread_prepare_to_block(). If 2876 the thread has already been unblocked after the previous call to 2877 thread_prepare_to_block(), this function will return immediately. See 2878 thread_prepare_to_block() for more details. 2879 2880 The caller must not hold the scheduler lock. 2881 2882 \param timeoutFlags The standard timeout flags: 2883 - \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait. 2884 - \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when 2885 the timeout shall occur. 2886 - \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT 2887 is specified, too. Specifies that \a timeout is a real time, not a 2888 system time. 2889 If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are 2890 specified, an infinite timeout is implied and the function behaves like 2891 thread_block_locked(). 2892 \return The error code passed to the unblocking function. thread_interrupt() 2893 uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is 2894 returned. By convention \c B_OK means that the wait was successful while 2895 another error code indicates a failure (what that means depends on the 2896 client code). 2897 */ 2898 status_t 2899 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout) 2900 { 2901 Thread* thread = thread_get_current_thread(); 2902 2903 InterruptsSpinLocker locker(thread->scheduler_lock); 2904 2905 if (thread->wait.status != 1) 2906 return thread->wait.status; 2907 2908 bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) 2909 && timeout != B_INFINITE_TIMEOUT; 2910 2911 if (useTimer) { 2912 // Timer flags: absolute/relative. 2913 uint32 timerFlags; 2914 if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) { 2915 timerFlags = B_ONE_SHOT_RELATIVE_TIMER; 2916 } else { 2917 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER; 2918 if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0) 2919 timerFlags |= B_TIMER_REAL_TIME_BASE; 2920 } 2921 2922 // install the timer 2923 thread->wait.unblock_timer.user_data = thread; 2924 add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout, 2925 timerFlags); 2926 } 2927 2928 // block 2929 status_t error = thread_block_locked(thread); 2930 2931 locker.Unlock(); 2932 2933 // cancel timer, if it didn't fire 2934 if (error != B_TIMED_OUT && useTimer) 2935 cancel_timer(&thread->wait.unblock_timer); 2936 2937 return error; 2938 } 2939 2940 2941 /*! Unblocks a thread. 2942 2943 Acquires the scheduler lock and calls thread_unblock_locked(). 2944 See there for more information. 2945 */ 2946 void 2947 thread_unblock(Thread* thread, status_t status) 2948 { 2949 InterruptsSpinLocker locker(thread->scheduler_lock); 2950 thread_unblock_locked(thread, status); 2951 } 2952 2953 2954 /*! Unblocks a userland-blocked thread. 2955 The caller must not hold any locks. 2956 */ 2957 static status_t 2958 user_unblock_thread(thread_id threadID, status_t status) 2959 { 2960 // get the thread 2961 Thread* thread = Thread::GetAndLock(threadID); 2962 if (thread == NULL) 2963 return B_BAD_THREAD_ID; 2964 BReference<Thread> threadReference(thread, true); 2965 ThreadLocker threadLocker(thread, true); 2966 2967 if (thread->user_thread == NULL) 2968 return B_NOT_ALLOWED; 2969 2970 InterruptsSpinLocker locker(thread->scheduler_lock); 2971 2972 set_ac(); 2973 if (thread->user_thread->wait_status > 0) { 2974 thread->user_thread->wait_status = status; 2975 clear_ac(); 2976 2977 // Even if the user_thread->wait_status was > 0, it may be the 2978 // case that this thread is actually blocked on something else. 2979 if (thread->wait.status > 0 2980 && thread->wait.type == THREAD_BLOCK_TYPE_USER) { 2981 thread_unblock_locked(thread, status); 2982 } 2983 } else 2984 clear_ac(); 2985 2986 return B_OK; 2987 } 2988 2989 2990 static bool 2991 thread_check_permissions(const Thread* currentThread, const Thread* thread, 2992 bool kernel) 2993 { 2994 if (kernel) 2995 return true; 2996 2997 if (thread->team->id == team_get_kernel_team_id()) 2998 return false; 2999 3000 if (thread->team == currentThread->team 3001 || currentThread->team->effective_uid == 0 3002 || thread->team->real_uid == currentThread->team->real_uid) 3003 return true; 3004 3005 return false; 3006 } 3007 3008 3009 static status_t 3010 thread_send_signal(thread_id id, uint32 number, int32 signalCode, 3011 int32 errorCode, bool kernel) 3012 { 3013 if (id <= 0) 3014 return B_BAD_VALUE; 3015 3016 Thread* currentThread = thread_get_current_thread(); 3017 Thread* thread = Thread::Get(id); 3018 if (thread == NULL) 3019 return B_BAD_THREAD_ID; 3020 BReference<Thread> threadReference(thread, true); 3021 3022 // check whether sending the signal is allowed 3023 if (!thread_check_permissions(currentThread, thread, kernel)) 3024 return B_NOT_ALLOWED; 3025 3026 Signal signal(number, signalCode, errorCode, currentThread->team->id); 3027 return send_signal_to_thread(thread, signal, 0); 3028 } 3029 3030 3031 // #pragma mark - public kernel API 3032 3033 3034 void 3035 exit_thread(status_t returnValue) 3036 { 3037 Thread *thread = thread_get_current_thread(); 3038 Team* team = thread->team; 3039 3040 thread->exit.status = returnValue; 3041 3042 // if called from a kernel thread, we don't deliver the signal, 3043 // we just exit directly to keep the user space behaviour of 3044 // this function 3045 if (team != team_get_kernel_team()) { 3046 // If this is its main thread, set the team's exit status. 3047 if (thread == team->main_thread) { 3048 TeamLocker teamLocker(team); 3049 3050 if (!team->exit.initialized) { 3051 team->exit.reason = CLD_EXITED; 3052 team->exit.signal = 0; 3053 team->exit.signaling_user = 0; 3054 team->exit.status = returnValue; 3055 team->exit.initialized = true; 3056 } 3057 3058 teamLocker.Unlock(); 3059 } 3060 3061 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id); 3062 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 3063 } else 3064 thread_exit(); 3065 } 3066 3067 3068 static status_t 3069 thread_kill_thread(thread_id id, bool kernel) 3070 { 3071 return thread_send_signal(id, SIGKILLTHR, SI_USER, B_OK, kernel); 3072 } 3073 3074 3075 status_t 3076 kill_thread(thread_id id) 3077 { 3078 return thread_kill_thread(id, true); 3079 } 3080 3081 3082 status_t 3083 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize) 3084 { 3085 return send_data_etc(thread, code, buffer, bufferSize, 0); 3086 } 3087 3088 3089 int32 3090 receive_data(thread_id *sender, void *buffer, size_t bufferSize) 3091 { 3092 return receive_data_etc(sender, buffer, bufferSize, 0); 3093 } 3094 3095 3096 static bool 3097 thread_has_data(thread_id id, bool kernel) 3098 { 3099 Thread* currentThread = thread_get_current_thread(); 3100 Thread* thread; 3101 BReference<Thread> threadReference; 3102 if (id == currentThread->id) { 3103 thread = currentThread; 3104 } else { 3105 thread = Thread::Get(id); 3106 if (thread == NULL) 3107 return false; 3108 3109 threadReference.SetTo(thread, true); 3110 } 3111 3112 if (!kernel && thread->team != currentThread->team) 3113 return false; 3114 3115 int32 count; 3116 if (get_sem_count(thread->msg.read_sem, &count) != B_OK) 3117 return false; 3118 3119 return count == 0 ? false : true; 3120 } 3121 3122 3123 bool 3124 has_data(thread_id thread) 3125 { 3126 return thread_has_data(thread, true); 3127 } 3128 3129 3130 status_t 3131 _get_thread_info(thread_id id, thread_info *info, size_t size) 3132 { 3133 if (info == NULL || size != sizeof(thread_info) || id < B_OK) 3134 return B_BAD_VALUE; 3135 3136 // get the thread 3137 Thread* thread = Thread::GetAndLock(id); 3138 if (thread == NULL) 3139 return B_BAD_THREAD_ID; 3140 BReference<Thread> threadReference(thread, true); 3141 ThreadLocker threadLocker(thread, true); 3142 3143 // fill the info -- also requires the scheduler lock to be held 3144 InterruptsSpinLocker locker(thread->scheduler_lock); 3145 3146 fill_thread_info(thread, info, size); 3147 3148 return B_OK; 3149 } 3150 3151 3152 status_t 3153 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info, 3154 size_t size) 3155 { 3156 if (info == NULL || size != sizeof(thread_info) || teamID < 0) 3157 return B_BAD_VALUE; 3158 3159 int32 lastID = *_cookie; 3160 3161 // get the team 3162 Team* team = Team::GetAndLock(teamID); 3163 if (team == NULL) 3164 return B_BAD_VALUE; 3165 BReference<Team> teamReference(team, true); 3166 TeamLocker teamLocker(team, true); 3167 3168 Thread* thread = NULL; 3169 3170 if (lastID == 0) { 3171 // We start with the main thread 3172 thread = team->main_thread; 3173 } else { 3174 // Find the one thread with an ID greater than ours (as long as the IDs 3175 // don't wrap they are always sorted from highest to lowest). 3176 // TODO: That is broken not only when the IDs wrap, but also for the 3177 // kernel team, to which threads are added when they are dying. 3178 for (Thread* next = team->thread_list; next != NULL; 3179 next = next->team_next) { 3180 if (next->id <= lastID) 3181 break; 3182 3183 thread = next; 3184 } 3185 } 3186 3187 if (thread == NULL) 3188 return B_BAD_VALUE; 3189 3190 lastID = thread->id; 3191 *_cookie = lastID; 3192 3193 ThreadLocker threadLocker(thread); 3194 InterruptsSpinLocker locker(thread->scheduler_lock); 3195 3196 fill_thread_info(thread, info, size); 3197 3198 return B_OK; 3199 } 3200 3201 3202 thread_id 3203 find_thread(const char* name) 3204 { 3205 if (name == NULL) 3206 return thread_get_current_thread_id(); 3207 3208 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock); 3209 3210 // Scanning the whole hash with the thread hash lock held isn't exactly 3211 // cheap, but since this function is probably used very rarely, and we 3212 // only need a read lock, it's probably acceptable. 3213 3214 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 3215 Thread* thread = it.Next();) { 3216 if (!thread->visible) 3217 continue; 3218 3219 if (strcmp(thread->name, name) == 0) 3220 return thread->id; 3221 } 3222 3223 return B_NAME_NOT_FOUND; 3224 } 3225 3226 3227 status_t 3228 rename_thread(thread_id id, const char* name) 3229 { 3230 if (name == NULL) 3231 return B_BAD_VALUE; 3232 3233 // get the thread 3234 Thread* thread = Thread::GetAndLock(id); 3235 if (thread == NULL) 3236 return B_BAD_THREAD_ID; 3237 BReference<Thread> threadReference(thread, true); 3238 ThreadLocker threadLocker(thread, true); 3239 3240 // check whether the operation is allowed 3241 if (thread->team != thread_get_current_thread()->team) 3242 return B_NOT_ALLOWED; 3243 3244 strlcpy(thread->name, name, B_OS_NAME_LENGTH); 3245 3246 team_id teamID = thread->team->id; 3247 3248 threadLocker.Unlock(); 3249 3250 // notify listeners 3251 sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id); 3252 // don't pass the thread structure, as it's unsafe, if it isn't ours 3253 3254 return B_OK; 3255 } 3256 3257 3258 static status_t 3259 thread_set_thread_priority(thread_id id, int32 priority, bool kernel) 3260 { 3261 // make sure the passed in priority is within bounds 3262 if (priority > THREAD_MAX_SET_PRIORITY) 3263 priority = THREAD_MAX_SET_PRIORITY; 3264 if (priority < THREAD_MIN_SET_PRIORITY) 3265 priority = THREAD_MIN_SET_PRIORITY; 3266 3267 // get the thread 3268 Thread* thread = Thread::GetAndLock(id); 3269 if (thread == NULL) 3270 return B_BAD_THREAD_ID; 3271 BReference<Thread> threadReference(thread, true); 3272 ThreadLocker threadLocker(thread, true); 3273 3274 // check whether the change is allowed 3275 if (thread_is_idle_thread(thread) || !thread_check_permissions( 3276 thread_get_current_thread(), thread, kernel)) 3277 return B_NOT_ALLOWED; 3278 3279 return scheduler_set_thread_priority(thread, priority); 3280 } 3281 3282 3283 status_t 3284 set_thread_priority(thread_id id, int32 priority) 3285 { 3286 return thread_set_thread_priority(id, priority, true); 3287 } 3288 3289 3290 status_t 3291 snooze_etc(bigtime_t timeout, int timebase, uint32 flags) 3292 { 3293 return common_snooze_etc(timeout, timebase, flags, NULL); 3294 } 3295 3296 3297 /*! snooze() for internal kernel use only; doesn't interrupt on signals. */ 3298 status_t 3299 snooze(bigtime_t timeout) 3300 { 3301 return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT); 3302 } 3303 3304 3305 /*! snooze_until() for internal kernel use only; doesn't interrupt on 3306 signals. 3307 */ 3308 status_t 3309 snooze_until(bigtime_t timeout, int timebase) 3310 { 3311 return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT); 3312 } 3313 3314 3315 status_t 3316 wait_for_thread(thread_id thread, status_t *_returnCode) 3317 { 3318 return wait_for_thread_etc(thread, 0, 0, _returnCode); 3319 } 3320 3321 3322 static status_t 3323 thread_suspend_thread(thread_id id, bool kernel) 3324 { 3325 return thread_send_signal(id, SIGSTOP, SI_USER, B_OK, kernel); 3326 } 3327 3328 3329 status_t 3330 suspend_thread(thread_id id) 3331 { 3332 return thread_suspend_thread(id, true); 3333 } 3334 3335 3336 static status_t 3337 thread_resume_thread(thread_id id, bool kernel) 3338 { 3339 // Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains 3340 // compatibility to BeOS which documents the combination of suspend_thread() 3341 // and resume_thread() to interrupt threads waiting on semaphores. 3342 return thread_send_signal(id, SIGNAL_CONTINUE_THREAD, SI_USER, B_OK, kernel); 3343 } 3344 3345 3346 status_t 3347 resume_thread(thread_id id) 3348 { 3349 return thread_resume_thread(id, true); 3350 } 3351 3352 3353 thread_id 3354 spawn_kernel_thread(thread_func function, const char *name, int32 priority, 3355 void *arg) 3356 { 3357 return thread_create_thread( 3358 ThreadCreationAttributes(function, name, priority, arg), 3359 true); 3360 } 3361 3362 3363 int 3364 getrlimit(int resource, struct rlimit * rlp) 3365 { 3366 status_t error = common_getrlimit(resource, rlp); 3367 if (error != B_OK) { 3368 errno = error; 3369 return -1; 3370 } 3371 3372 return 0; 3373 } 3374 3375 3376 int 3377 setrlimit(int resource, const struct rlimit * rlp) 3378 { 3379 status_t error = common_setrlimit(resource, rlp); 3380 if (error != B_OK) { 3381 errno = error; 3382 return -1; 3383 } 3384 3385 return 0; 3386 } 3387 3388 3389 // #pragma mark - syscalls 3390 3391 3392 void 3393 _user_exit_thread(status_t returnValue) 3394 { 3395 exit_thread(returnValue); 3396 } 3397 3398 3399 status_t 3400 _user_kill_thread(thread_id thread) 3401 { 3402 return thread_kill_thread(thread, false); 3403 } 3404 3405 3406 status_t 3407 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int)) 3408 { 3409 // check the cancel function 3410 if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction)) 3411 return B_BAD_VALUE; 3412 3413 // get and lock the thread 3414 Thread* thread = Thread::GetAndLock(threadID); 3415 if (thread == NULL) 3416 return B_BAD_THREAD_ID; 3417 BReference<Thread> threadReference(thread, true); 3418 ThreadLocker threadLocker(thread, true); 3419 3420 // only threads of the same team can be canceled 3421 if (thread->team != thread_get_current_thread()->team) 3422 return B_NOT_ALLOWED; 3423 3424 // set the cancel function 3425 thread->cancel_function = cancelFunction; 3426 3427 // send the cancellation signal to the thread 3428 InterruptsReadSpinLocker teamLocker(thread->team_lock); 3429 SpinLocker locker(thread->team->signal_lock); 3430 return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0); 3431 } 3432 3433 3434 status_t 3435 _user_resume_thread(thread_id thread) 3436 { 3437 return thread_resume_thread(thread, false); 3438 } 3439 3440 3441 status_t 3442 _user_suspend_thread(thread_id thread) 3443 { 3444 return thread_suspend_thread(thread, false); 3445 } 3446 3447 3448 status_t 3449 _user_rename_thread(thread_id thread, const char *userName) 3450 { 3451 char name[B_OS_NAME_LENGTH]; 3452 3453 if (!IS_USER_ADDRESS(userName) 3454 || userName == NULL 3455 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 3456 return B_BAD_ADDRESS; 3457 3458 // rename_thread() forbids thread renames across teams, so we don't 3459 // need a "kernel" flag here. 3460 return rename_thread(thread, name); 3461 } 3462 3463 3464 int32 3465 _user_set_thread_priority(thread_id thread, int32 newPriority) 3466 { 3467 return thread_set_thread_priority(thread, newPriority, false); 3468 } 3469 3470 3471 thread_id 3472 _user_spawn_thread(thread_creation_attributes* userAttributes) 3473 { 3474 // copy the userland structure to the kernel 3475 char nameBuffer[B_OS_NAME_LENGTH]; 3476 ThreadCreationAttributes attributes; 3477 status_t error = attributes.InitFromUserAttributes(userAttributes, 3478 nameBuffer); 3479 if (error != B_OK) 3480 return error; 3481 3482 // create the thread 3483 thread_id threadID = thread_create_thread(attributes, false); 3484 3485 if (threadID >= 0) 3486 user_debug_thread_created(threadID); 3487 3488 return threadID; 3489 } 3490 3491 3492 status_t 3493 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags, 3494 bigtime_t* userRemainingTime) 3495 { 3496 // We need to store more syscall restart parameters than usual and need a 3497 // somewhat different handling. Hence we can't use 3498 // syscall_restart_handle_timeout_pre() but do the job ourselves. 3499 struct restart_parameters { 3500 bigtime_t timeout; 3501 clockid_t timebase; 3502 uint32 flags; 3503 }; 3504 3505 Thread* thread = thread_get_current_thread(); 3506 3507 if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) { 3508 // The syscall was restarted. Fetch the parameters from the stored 3509 // restart parameters. 3510 restart_parameters* restartParameters 3511 = (restart_parameters*)thread->syscall_restart.parameters; 3512 timeout = restartParameters->timeout; 3513 timebase = restartParameters->timebase; 3514 flags = restartParameters->flags; 3515 } else { 3516 // convert relative timeouts to absolute ones 3517 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 3518 // not restarted yet and the flags indicate a relative timeout 3519 3520 // Make sure we use the system time base, so real-time clock changes 3521 // won't affect our wait. 3522 flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE; 3523 if (timebase == CLOCK_REALTIME) 3524 timebase = CLOCK_MONOTONIC; 3525 3526 // get the current time and make the timeout absolute 3527 bigtime_t now; 3528 status_t error = user_timer_get_clock(timebase, now); 3529 if (error != B_OK) 3530 return error; 3531 3532 timeout += now; 3533 3534 // deal with overflow 3535 if (timeout < 0) 3536 timeout = B_INFINITE_TIMEOUT; 3537 3538 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT; 3539 } else 3540 flags |= B_ABSOLUTE_TIMEOUT; 3541 } 3542 3543 // snooze 3544 bigtime_t remainingTime; 3545 status_t error = common_snooze_etc(timeout, timebase, 3546 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, 3547 userRemainingTime != NULL ? &remainingTime : NULL); 3548 3549 // If interrupted, copy the remaining time back to userland and prepare the 3550 // syscall restart. 3551 if (error == B_INTERRUPTED) { 3552 if (userRemainingTime != NULL 3553 && (!IS_USER_ADDRESS(userRemainingTime) 3554 || user_memcpy(userRemainingTime, &remainingTime, 3555 sizeof(remainingTime)) != B_OK)) { 3556 return B_BAD_ADDRESS; 3557 } 3558 3559 // store the normalized values in the restart parameters 3560 restart_parameters* restartParameters 3561 = (restart_parameters*)thread->syscall_restart.parameters; 3562 restartParameters->timeout = timeout; 3563 restartParameters->timebase = timebase; 3564 restartParameters->flags = flags; 3565 3566 // restart the syscall, if possible 3567 atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL); 3568 } 3569 3570 return error; 3571 } 3572 3573 3574 void 3575 _user_thread_yield(void) 3576 { 3577 thread_yield(); 3578 } 3579 3580 3581 status_t 3582 _user_get_thread_info(thread_id id, thread_info *userInfo) 3583 { 3584 thread_info info; 3585 status_t status; 3586 3587 if (!IS_USER_ADDRESS(userInfo)) 3588 return B_BAD_ADDRESS; 3589 3590 status = _get_thread_info(id, &info, sizeof(thread_info)); 3591 3592 if (status >= B_OK 3593 && user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3594 return B_BAD_ADDRESS; 3595 3596 return status; 3597 } 3598 3599 3600 status_t 3601 _user_get_next_thread_info(team_id team, int32 *userCookie, 3602 thread_info *userInfo) 3603 { 3604 status_t status; 3605 thread_info info; 3606 int32 cookie; 3607 3608 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 3609 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 3610 return B_BAD_ADDRESS; 3611 3612 status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info)); 3613 if (status < B_OK) 3614 return status; 3615 3616 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 3617 || user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3618 return B_BAD_ADDRESS; 3619 3620 return status; 3621 } 3622 3623 3624 thread_id 3625 _user_find_thread(const char *userName) 3626 { 3627 char name[B_OS_NAME_LENGTH]; 3628 3629 if (userName == NULL) 3630 return find_thread(NULL); 3631 3632 if (!IS_USER_ADDRESS(userName) 3633 || user_strlcpy(name, userName, sizeof(name)) < B_OK) 3634 return B_BAD_ADDRESS; 3635 3636 return find_thread(name); 3637 } 3638 3639 3640 status_t 3641 _user_wait_for_thread(thread_id id, status_t *userReturnCode) 3642 { 3643 status_t returnCode; 3644 status_t status; 3645 3646 if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode)) 3647 return B_BAD_ADDRESS; 3648 3649 status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode); 3650 3651 if (status == B_OK && userReturnCode != NULL 3652 && user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) { 3653 return B_BAD_ADDRESS; 3654 } 3655 3656 return syscall_restart_handle_post(status); 3657 } 3658 3659 3660 bool 3661 _user_has_data(thread_id thread) 3662 { 3663 return thread_has_data(thread, false); 3664 } 3665 3666 3667 status_t 3668 _user_send_data(thread_id thread, int32 code, const void *buffer, 3669 size_t bufferSize) 3670 { 3671 if (buffer != NULL && !IS_USER_ADDRESS(buffer)) 3672 return B_BAD_ADDRESS; 3673 3674 return send_data_etc(thread, code, buffer, bufferSize, 3675 B_KILL_CAN_INTERRUPT); 3676 // supports userland buffers 3677 } 3678 3679 3680 status_t 3681 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize) 3682 { 3683 thread_id sender; 3684 status_t code; 3685 3686 if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL) 3687 || (!IS_USER_ADDRESS(buffer) && buffer != NULL)) { 3688 return B_BAD_ADDRESS; 3689 } 3690 3691 code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT); 3692 // supports userland buffers 3693 3694 if (_userSender != NULL) 3695 if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK) 3696 return B_BAD_ADDRESS; 3697 3698 return code; 3699 } 3700 3701 3702 status_t 3703 _user_block_thread(uint32 flags, bigtime_t timeout) 3704 { 3705 syscall_restart_handle_timeout_pre(flags, timeout); 3706 flags |= B_CAN_INTERRUPT; 3707 3708 Thread* thread = thread_get_current_thread(); 3709 ThreadLocker threadLocker(thread); 3710 3711 // check, if already done 3712 set_ac(); 3713 if (thread->user_thread->wait_status <= 0) { 3714 status_t status = thread->user_thread->wait_status; 3715 clear_ac(); 3716 return status; 3717 } 3718 clear_ac(); 3719 3720 // nope, so wait 3721 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_USER, NULL); 3722 3723 threadLocker.Unlock(); 3724 3725 status_t status = thread_block_with_timeout(flags, timeout); 3726 3727 threadLocker.Lock(); 3728 3729 // Interruptions or timeouts can race with other threads unblocking us. 3730 // Favor a wake-up by another thread, i.e. if someone changed the wait 3731 // status, use that. 3732 set_ac(); 3733 status_t oldStatus = thread->user_thread->wait_status; 3734 if (oldStatus > 0) { 3735 thread->user_thread->wait_status = status; 3736 clear_ac(); 3737 } else { 3738 clear_ac(); 3739 status = oldStatus; 3740 } 3741 3742 threadLocker.Unlock(); 3743 3744 return syscall_restart_handle_timeout_post(status, timeout); 3745 } 3746 3747 3748 status_t 3749 _user_unblock_thread(thread_id threadID, status_t status) 3750 { 3751 status_t error = user_unblock_thread(threadID, status); 3752 3753 if (error == B_OK) 3754 scheduler_reschedule_if_necessary(); 3755 3756 return error; 3757 } 3758 3759 3760 status_t 3761 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status) 3762 { 3763 enum { 3764 MAX_USER_THREADS_TO_UNBLOCK = 128 3765 }; 3766 3767 if (userThreads == NULL || !IS_USER_ADDRESS(userThreads)) 3768 return B_BAD_ADDRESS; 3769 if (count > MAX_USER_THREADS_TO_UNBLOCK) 3770 return B_BAD_VALUE; 3771 3772 thread_id threads[MAX_USER_THREADS_TO_UNBLOCK]; 3773 if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK) 3774 return B_BAD_ADDRESS; 3775 3776 for (uint32 i = 0; i < count; i++) 3777 user_unblock_thread(threads[i], status); 3778 3779 scheduler_reschedule_if_necessary(); 3780 3781 return B_OK; 3782 } 3783 3784 3785 // TODO: the following two functions don't belong here 3786 3787 3788 int 3789 _user_getrlimit(int resource, struct rlimit *urlp) 3790 { 3791 struct rlimit rl; 3792 int ret; 3793 3794 if (urlp == NULL) 3795 return EINVAL; 3796 3797 if (!IS_USER_ADDRESS(urlp)) 3798 return B_BAD_ADDRESS; 3799 3800 ret = common_getrlimit(resource, &rl); 3801 3802 if (ret == 0) { 3803 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit)); 3804 if (ret < 0) 3805 return ret; 3806 3807 return 0; 3808 } 3809 3810 return ret; 3811 } 3812 3813 3814 int 3815 _user_setrlimit(int resource, const struct rlimit *userResourceLimit) 3816 { 3817 struct rlimit resourceLimit; 3818 3819 if (userResourceLimit == NULL) 3820 return EINVAL; 3821 3822 if (!IS_USER_ADDRESS(userResourceLimit) 3823 || user_memcpy(&resourceLimit, userResourceLimit, 3824 sizeof(struct rlimit)) < B_OK) 3825 return B_BAD_ADDRESS; 3826 3827 return common_setrlimit(resource, &resourceLimit); 3828 } 3829