1 /* 2 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 /*! Threading routines */ 12 13 14 #include <thread.h> 15 16 #include <errno.h> 17 #include <malloc.h> 18 #include <stdio.h> 19 #include <stdlib.h> 20 #include <string.h> 21 #include <sys/resource.h> 22 23 #include <algorithm> 24 25 #include <OS.h> 26 27 #include <util/AutoLock.h> 28 29 #include <arch/debug.h> 30 #include <boot/kernel_args.h> 31 #include <condition_variable.h> 32 #include <cpu.h> 33 #include <int.h> 34 #include <kimage.h> 35 #include <kscheduler.h> 36 #include <ksignal.h> 37 #include <Notifications.h> 38 #include <real_time_clock.h> 39 #include <slab/Slab.h> 40 #include <smp.h> 41 #include <syscalls.h> 42 #include <syscall_restart.h> 43 #include <team.h> 44 #include <tls.h> 45 #include <user_runtime.h> 46 #include <user_thread.h> 47 #include <vfs.h> 48 #include <vm/vm.h> 49 #include <vm/VMAddressSpace.h> 50 #include <wait_for_objects.h> 51 52 #include "TeamThreadTables.h" 53 54 55 //#define TRACE_THREAD 56 #ifdef TRACE_THREAD 57 # define TRACE(x) dprintf x 58 #else 59 # define TRACE(x) ; 60 #endif 61 62 63 #define THREAD_MAX_MESSAGE_SIZE 65536 64 65 66 // #pragma mark - ThreadHashTable 67 68 69 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable; 70 71 72 // thread list 73 static Thread sIdleThreads[B_MAX_CPU_COUNT]; 74 static ThreadHashTable sThreadHash; 75 static spinlock sThreadHashLock = B_SPINLOCK_INITIALIZER; 76 static thread_id sNextThreadID = 2; 77 // ID 1 is allocated for the kernel by Team::Team() behind our back 78 79 // some arbitrarily chosen limits -- should probably depend on the available 80 // memory (the limit is not yet enforced) 81 static int32 sMaxThreads = 4096; 82 static int32 sUsedThreads = 0; 83 84 85 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> { 86 Thread* thread; 87 team_id teamID; 88 89 UndertakerEntry(Thread* thread, team_id teamID) 90 : 91 thread(thread), 92 teamID(teamID) 93 { 94 } 95 }; 96 97 98 struct ThreadEntryArguments { 99 status_t (*kernelFunction)(void* argument); 100 void* argument; 101 bool enterUserland; 102 }; 103 104 struct UserThreadEntryArguments : ThreadEntryArguments { 105 addr_t userlandEntry; 106 void* userlandArgument1; 107 void* userlandArgument2; 108 pthread_t pthread; 109 arch_fork_arg* forkArgs; 110 uint32 flags; 111 }; 112 113 114 class ThreadNotificationService : public DefaultNotificationService { 115 public: 116 ThreadNotificationService() 117 : DefaultNotificationService("threads") 118 { 119 } 120 121 void Notify(uint32 eventCode, team_id teamID, thread_id threadID, 122 Thread* thread = NULL) 123 { 124 char eventBuffer[180]; 125 KMessage event; 126 event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR); 127 event.AddInt32("event", eventCode); 128 event.AddInt32("team", teamID); 129 event.AddInt32("thread", threadID); 130 if (thread != NULL) 131 event.AddPointer("threadStruct", thread); 132 133 DefaultNotificationService::Notify(event, eventCode); 134 } 135 136 void Notify(uint32 eventCode, Thread* thread) 137 { 138 return Notify(eventCode, thread->id, thread->team->id, thread); 139 } 140 }; 141 142 143 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries; 144 static ConditionVariable sUndertakerCondition; 145 static ThreadNotificationService sNotificationService; 146 147 148 // object cache to allocate thread structures from 149 static object_cache* sThreadCache; 150 151 152 // #pragma mark - Thread 153 154 155 /*! Constructs a thread. 156 157 \param name The thread's name. 158 \param threadID The ID to be assigned to the new thread. If 159 \code < 0 \endcode a fresh one is allocated. 160 \param cpu The CPU the thread shall be assigned. 161 */ 162 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu) 163 : 164 flags(0), 165 serial_number(-1), 166 hash_next(NULL), 167 team_next(NULL), 168 queue_next(NULL), 169 priority(-1), 170 next_priority(-1), 171 io_priority(-1), 172 cpu(cpu), 173 previous_cpu(NULL), 174 pinned_to_cpu(0), 175 sig_block_mask(0), 176 sigsuspend_original_unblocked_mask(0), 177 user_signal_context(NULL), 178 signal_stack_base(0), 179 signal_stack_size(0), 180 signal_stack_enabled(false), 181 in_kernel(true), 182 was_yielded(false), 183 user_thread(NULL), 184 fault_handler(0), 185 page_faults_allowed(1), 186 team(NULL), 187 select_infos(NULL), 188 kernel_stack_area(-1), 189 kernel_stack_base(0), 190 user_stack_area(-1), 191 user_stack_base(0), 192 user_local_storage(0), 193 kernel_errno(0), 194 user_time(0), 195 kernel_time(0), 196 last_time(0), 197 cpu_clock_offset(0), 198 post_interrupt_callback(NULL), 199 post_interrupt_data(NULL) 200 { 201 id = threadID >= 0 ? threadID : allocate_thread_id(); 202 visible = false; 203 204 // init locks 205 char lockName[32]; 206 snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id); 207 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME); 208 209 B_INITIALIZE_SPINLOCK(&time_lock); 210 211 // init name 212 if (name != NULL) 213 strlcpy(this->name, name, B_OS_NAME_LENGTH); 214 else 215 strcpy(this->name, "unnamed thread"); 216 217 alarm.period = 0; 218 219 exit.status = 0; 220 221 list_init(&exit.waiters); 222 223 exit.sem = -1; 224 msg.write_sem = -1; 225 msg.read_sem = -1; 226 227 // add to thread table -- yet invisible 228 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 229 sThreadHash.Insert(this); 230 } 231 232 233 Thread::~Thread() 234 { 235 // Delete resources that should actually be deleted by the thread itself, 236 // when it exited, but that might still exist, if the thread was never run. 237 238 if (user_stack_area >= 0) 239 delete_area(user_stack_area); 240 241 DeleteUserTimers(false); 242 243 // delete the resources, that may remain in either case 244 245 if (kernel_stack_area >= 0) 246 delete_area(kernel_stack_area); 247 248 fPendingSignals.Clear(); 249 250 if (exit.sem >= 0) 251 delete_sem(exit.sem); 252 if (msg.write_sem >= 0) 253 delete_sem(msg.write_sem); 254 if (msg.read_sem >= 0) 255 delete_sem(msg.read_sem); 256 257 scheduler_on_thread_destroy(this); 258 259 mutex_destroy(&fLock); 260 261 // remove from thread table 262 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 263 sThreadHash.Remove(this); 264 } 265 266 267 /*static*/ status_t 268 Thread::Create(const char* name, Thread*& _thread) 269 { 270 Thread* thread = new Thread(name, -1, NULL); 271 if (thread == NULL) 272 return B_NO_MEMORY; 273 274 status_t error = thread->Init(false); 275 if (error != B_OK) { 276 delete thread; 277 return error; 278 } 279 280 _thread = thread; 281 return B_OK; 282 } 283 284 285 /*static*/ Thread* 286 Thread::Get(thread_id id) 287 { 288 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 289 Thread* thread = sThreadHash.Lookup(id); 290 if (thread != NULL) 291 thread->AcquireReference(); 292 return thread; 293 } 294 295 296 /*static*/ Thread* 297 Thread::GetAndLock(thread_id id) 298 { 299 // look it up and acquire a reference 300 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 301 Thread* thread = sThreadHash.Lookup(id); 302 if (thread == NULL) 303 return NULL; 304 305 thread->AcquireReference(); 306 threadHashLocker.Unlock(); 307 308 // lock and check, if it is still in the hash table 309 thread->Lock(); 310 threadHashLocker.Lock(); 311 312 if (sThreadHash.Lookup(id) == thread) 313 return thread; 314 315 threadHashLocker.Unlock(); 316 317 // nope, the thread is no longer in the hash table 318 thread->UnlockAndReleaseReference(); 319 320 return NULL; 321 } 322 323 324 /*static*/ Thread* 325 Thread::GetDebug(thread_id id) 326 { 327 return sThreadHash.Lookup(id, false); 328 } 329 330 331 /*static*/ bool 332 Thread::IsAlive(thread_id id) 333 { 334 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 335 return sThreadHash.Lookup(id) != NULL; 336 } 337 338 339 void* 340 Thread::operator new(size_t size) 341 { 342 return object_cache_alloc(sThreadCache, 0); 343 } 344 345 346 void* 347 Thread::operator new(size_t, void* pointer) 348 { 349 return pointer; 350 } 351 352 353 void 354 Thread::operator delete(void* pointer, size_t size) 355 { 356 object_cache_free(sThreadCache, pointer, 0); 357 } 358 359 360 status_t 361 Thread::Init(bool idleThread) 362 { 363 status_t error = scheduler_on_thread_create(this, idleThread); 364 if (error != B_OK) 365 return error; 366 367 char temp[64]; 368 snprintf(temp, sizeof(temp), "thread_%" B_PRId32 "_retcode_sem", id); 369 exit.sem = create_sem(0, temp); 370 if (exit.sem < 0) 371 return exit.sem; 372 373 snprintf(temp, sizeof(temp), "%s send", name); 374 msg.write_sem = create_sem(1, temp); 375 if (msg.write_sem < 0) 376 return msg.write_sem; 377 378 snprintf(temp, sizeof(temp), "%s receive", name); 379 msg.read_sem = create_sem(0, temp); 380 if (msg.read_sem < 0) 381 return msg.read_sem; 382 383 error = arch_thread_init_thread_struct(this); 384 if (error != B_OK) 385 return error; 386 387 return B_OK; 388 } 389 390 391 /*! Checks whether the thread is still in the thread hash table. 392 */ 393 bool 394 Thread::IsAlive() const 395 { 396 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 397 398 return sThreadHash.Lookup(id) != NULL; 399 } 400 401 402 void 403 Thread::ResetSignalsOnExec() 404 { 405 // We are supposed keep the pending signals and the signal mask. Only the 406 // signal stack, if set, shall be unset. 407 408 sigsuspend_original_unblocked_mask = 0; 409 user_signal_context = NULL; 410 signal_stack_base = 0; 411 signal_stack_size = 0; 412 signal_stack_enabled = false; 413 } 414 415 416 /*! Adds the given user timer to the thread and, if user-defined, assigns it an 417 ID. 418 419 The caller must hold the thread's lock. 420 421 \param timer The timer to be added. If it doesn't have an ID yet, it is 422 considered user-defined and will be assigned an ID. 423 \return \c B_OK, if the timer was added successfully, another error code 424 otherwise. 425 */ 426 status_t 427 Thread::AddUserTimer(UserTimer* timer) 428 { 429 // If the timer is user-defined, check timer limit and increment 430 // user-defined count. 431 if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer()) 432 return EAGAIN; 433 434 fUserTimers.AddTimer(timer); 435 436 return B_OK; 437 } 438 439 440 /*! Removes the given user timer from the thread. 441 442 The caller must hold the thread's lock. 443 444 \param timer The timer to be removed. 445 446 */ 447 void 448 Thread::RemoveUserTimer(UserTimer* timer) 449 { 450 fUserTimers.RemoveTimer(timer); 451 452 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID) 453 team->UserDefinedTimersRemoved(1); 454 } 455 456 457 /*! Deletes all (or all user-defined) user timers of the thread. 458 459 The caller must hold the thread's lock. 460 461 \param userDefinedOnly If \c true, only the user-defined timers are deleted, 462 otherwise all timers are deleted. 463 */ 464 void 465 Thread::DeleteUserTimers(bool userDefinedOnly) 466 { 467 int32 count = fUserTimers.DeleteTimers(userDefinedOnly); 468 if (count > 0) 469 team->UserDefinedTimersRemoved(count); 470 } 471 472 473 void 474 Thread::DeactivateCPUTimeUserTimers() 475 { 476 while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head()) 477 timer->Deactivate(); 478 } 479 480 481 // #pragma mark - ThreadListIterator 482 483 484 ThreadListIterator::ThreadListIterator() 485 { 486 // queue the entry 487 InterruptsSpinLocker locker(sThreadHashLock); 488 sThreadHash.InsertIteratorEntry(&fEntry); 489 } 490 491 492 ThreadListIterator::~ThreadListIterator() 493 { 494 // remove the entry 495 InterruptsSpinLocker locker(sThreadHashLock); 496 sThreadHash.RemoveIteratorEntry(&fEntry); 497 } 498 499 500 Thread* 501 ThreadListIterator::Next() 502 { 503 // get the next team -- if there is one, get reference for it 504 InterruptsSpinLocker locker(sThreadHashLock); 505 Thread* thread = sThreadHash.NextElement(&fEntry); 506 if (thread != NULL) 507 thread->AcquireReference(); 508 509 return thread; 510 } 511 512 513 // #pragma mark - ThreadCreationAttributes 514 515 516 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function, 517 const char* name, int32 priority, void* arg, team_id team, 518 Thread* thread) 519 { 520 this->entry = NULL; 521 this->name = name; 522 this->priority = priority; 523 this->args1 = NULL; 524 this->args2 = NULL; 525 this->stack_address = NULL; 526 this->stack_size = 0; 527 this->guard_size = 0; 528 this->pthread = NULL; 529 this->flags = 0; 530 this->team = team >= 0 ? team : team_get_kernel_team()->id; 531 this->thread = thread; 532 this->signal_mask = 0; 533 this->additional_stack_size = 0; 534 this->kernelEntry = function; 535 this->kernelArgument = arg; 536 this->forkArgs = NULL; 537 } 538 539 540 /*! Initializes the structure from a userland structure. 541 \param userAttributes The userland structure (must be a userland address). 542 \param nameBuffer A character array of at least size B_OS_NAME_LENGTH, 543 which will be used for the \c name field, if the userland structure has 544 a name. The buffer must remain valid as long as this structure is in 545 use afterwards (or until it is reinitialized). 546 \return \c B_OK, if the initialization went fine, another error code 547 otherwise. 548 */ 549 status_t 550 ThreadCreationAttributes::InitFromUserAttributes( 551 const thread_creation_attributes* userAttributes, char* nameBuffer) 552 { 553 if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes) 554 || user_memcpy((thread_creation_attributes*)this, userAttributes, 555 sizeof(thread_creation_attributes)) != B_OK) { 556 return B_BAD_ADDRESS; 557 } 558 559 if (stack_size != 0 560 && (stack_size < MIN_USER_STACK_SIZE 561 || stack_size > MAX_USER_STACK_SIZE)) { 562 return B_BAD_VALUE; 563 } 564 565 if (entry == NULL || !IS_USER_ADDRESS(entry) 566 || (stack_address != NULL && !IS_USER_ADDRESS(stack_address)) 567 || (name != NULL && (!IS_USER_ADDRESS(name) 568 || user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) { 569 return B_BAD_ADDRESS; 570 } 571 572 name = name != NULL ? nameBuffer : "user thread"; 573 574 // kernel only attributes (not in thread_creation_attributes): 575 Thread* currentThread = thread_get_current_thread(); 576 team = currentThread->team->id; 577 thread = NULL; 578 signal_mask = currentThread->sig_block_mask; 579 // inherit the current thread's signal mask 580 additional_stack_size = 0; 581 kernelEntry = NULL; 582 kernelArgument = NULL; 583 forkArgs = NULL; 584 585 return B_OK; 586 } 587 588 589 // #pragma mark - private functions 590 591 592 /*! Inserts a thread into a team. 593 The caller must hold the team's lock, the thread's lock, and the scheduler 594 lock. 595 */ 596 static void 597 insert_thread_into_team(Team *team, Thread *thread) 598 { 599 thread->team_next = team->thread_list; 600 team->thread_list = thread; 601 team->num_threads++; 602 603 if (team->num_threads == 1) { 604 // this was the first thread 605 team->main_thread = thread; 606 } 607 thread->team = team; 608 } 609 610 611 /*! Removes a thread from a team. 612 The caller must hold the team's lock, the thread's lock, and the scheduler 613 lock. 614 */ 615 static void 616 remove_thread_from_team(Team *team, Thread *thread) 617 { 618 Thread *temp, *last = NULL; 619 620 for (temp = team->thread_list; temp != NULL; temp = temp->team_next) { 621 if (temp == thread) { 622 if (last == NULL) 623 team->thread_list = temp->team_next; 624 else 625 last->team_next = temp->team_next; 626 627 team->num_threads--; 628 break; 629 } 630 last = temp; 631 } 632 } 633 634 635 static status_t 636 enter_userspace(Thread* thread, UserThreadEntryArguments* args) 637 { 638 status_t error = arch_thread_init_tls(thread); 639 if (error != B_OK) { 640 dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32 641 ")\n", thread->name, thread->id); 642 free(args->forkArgs); 643 return error; 644 } 645 646 user_debug_update_new_thread_flags(thread); 647 648 // init the thread's user_thread 649 user_thread* userThread = thread->user_thread; 650 userThread->pthread = args->pthread; 651 userThread->flags = 0; 652 userThread->wait_status = B_OK; 653 userThread->defer_signals 654 = (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0; 655 userThread->pending_signals = 0; 656 657 if (args->forkArgs != NULL) { 658 // This is a fork()ed thread. Copy the fork args onto the stack and 659 // free them. 660 arch_fork_arg archArgs = *args->forkArgs; 661 free(args->forkArgs); 662 663 arch_restore_fork_frame(&archArgs); 664 // this one won't return here 665 return B_ERROR; 666 } 667 668 // Jump to the entry point in user space. Only returns, if something fails. 669 return arch_thread_enter_userspace(thread, args->userlandEntry, 670 args->userlandArgument1, args->userlandArgument2); 671 } 672 673 674 status_t 675 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction, 676 void* argument1, void* argument2) 677 { 678 UserThreadEntryArguments entryArgs; 679 entryArgs.kernelFunction = NULL; 680 entryArgs.argument = NULL; 681 entryArgs.enterUserland = true; 682 entryArgs.userlandEntry = (addr_t)entryFunction; 683 entryArgs.userlandArgument1 = argument1; 684 entryArgs.userlandArgument2 = argument2; 685 entryArgs.pthread = NULL; 686 entryArgs.forkArgs = NULL; 687 entryArgs.flags = 0; 688 689 return enter_userspace(thread, &entryArgs); 690 } 691 692 693 static void 694 common_thread_entry(void* _args) 695 { 696 Thread* thread = thread_get_current_thread(); 697 698 // The thread is new and has been scheduled the first time. 699 700 // start CPU time based user timers 701 if (thread->HasActiveCPUTimeUserTimers() 702 || thread->team->HasActiveCPUTimeUserTimers()) { 703 user_timer_continue_cpu_timers(thread, thread->cpu->previous_thread); 704 } 705 706 // notify the user debugger code 707 if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0) 708 user_debug_thread_scheduled(thread); 709 710 // start tracking time 711 thread->last_time = system_time(); 712 713 // unlock the scheduler lock and enable interrupts 714 release_spinlock(&gSchedulerLock); 715 enable_interrupts(); 716 717 // call the kernel function, if any 718 ThreadEntryArguments* args = (ThreadEntryArguments*)_args; 719 if (args->kernelFunction != NULL) 720 args->kernelFunction(args->argument); 721 722 // If requested, enter userland, now. 723 if (args->enterUserland) { 724 enter_userspace(thread, (UserThreadEntryArguments*)args); 725 // only returns or error 726 727 // If that's the team's main thread, init the team exit info. 728 if (thread == thread->team->main_thread) 729 team_init_exit_info_on_error(thread->team); 730 } 731 732 // we're done 733 thread_exit(); 734 } 735 736 737 /*! Prepares the given thread's kernel stack for executing its entry function. 738 739 The data pointed to by \a data of size \a dataSize are copied to the 740 thread's kernel stack. A pointer to the copy's data is passed to the entry 741 function. The entry function is common_thread_entry(). 742 743 \param thread The thread. 744 \param data Pointer to data to be copied to the thread's stack and passed 745 to the entry function. 746 \param dataSize The size of \a data. 747 */ 748 static void 749 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize) 750 { 751 uint8* stack = (uint8*)thread->kernel_stack_base; 752 uint8* stackTop = (uint8*)thread->kernel_stack_top; 753 754 // clear (or rather invalidate) the kernel stack contents, if compiled with 755 // debugging 756 #if KDEBUG > 0 757 # if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS) 758 memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc, 759 KERNEL_STACK_SIZE); 760 # else 761 memset(stack, 0xcc, KERNEL_STACK_SIZE); 762 # endif 763 #endif 764 765 // copy the data onto the stack, with 16-byte alignment to be on the safe 766 // side 767 void* clonedData; 768 #ifdef STACK_GROWS_DOWNWARDS 769 clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16); 770 stackTop = (uint8*)clonedData; 771 #else 772 clonedData = (void*)ROUNDUP((addr_t)stack, 16); 773 stack = (uint8*)clonedData + ROUNDUP(dataSize, 16); 774 #endif 775 776 memcpy(clonedData, data, dataSize); 777 778 arch_thread_init_kthread_stack(thread, stack, stackTop, 779 &common_thread_entry, clonedData); 780 } 781 782 783 static status_t 784 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase, 785 size_t stackSize, size_t additionalSize, size_t guardSize, 786 char* nameBuffer) 787 { 788 area_id stackArea = -1; 789 uint8* stackBase = (uint8*)_stackBase; 790 791 if (stackBase != NULL) { 792 // A stack has been specified. It must be large enough to hold the 793 // TLS space at least. Guard pages are ignored for existing stacks. 794 STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE); 795 if (stackSize < MIN_USER_STACK_SIZE) 796 return B_BAD_VALUE; 797 798 stackSize -= TLS_SIZE; 799 } else { 800 // No user-defined stack -- allocate one. For non-main threads the stack 801 // will be between USER_STACK_REGION and the main thread stack area. For 802 // a main thread the position is fixed. 803 804 guardSize = PAGE_ALIGN(guardSize); 805 806 if (stackSize == 0) { 807 // Use the default size (a different one for a main thread). 808 stackSize = thread->id == team->id 809 ? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE; 810 } else { 811 // Verify that the given stack size is large enough. 812 if (stackSize < MIN_USER_STACK_SIZE) 813 return B_BAD_VALUE; 814 815 stackSize = PAGE_ALIGN(stackSize); 816 } 817 818 size_t areaSize = PAGE_ALIGN(guardSize + stackSize + TLS_SIZE 819 + additionalSize); 820 821 snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack", 822 thread->name, thread->id); 823 824 stackBase = (uint8*)USER_STACK_REGION; 825 826 virtual_address_restrictions virtualRestrictions = {}; 827 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS; 828 virtualRestrictions.address = (void*)stackBase; 829 830 physical_address_restrictions physicalRestrictions = {}; 831 832 stackArea = create_area_etc(team->id, nameBuffer, 833 areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA, 834 0, guardSize, &virtualRestrictions, &physicalRestrictions, 835 (void**)&stackBase); 836 if (stackArea < 0) 837 return stackArea; 838 } 839 840 // set the stack 841 ThreadLocker threadLocker(thread); 842 #ifdef STACK_GROWS_DOWNWARDS 843 thread->user_stack_base = (addr_t)stackBase + guardSize; 844 #else 845 thread->user_stack_base = (addr_t)stackBase; 846 #endif 847 thread->user_stack_size = stackSize; 848 thread->user_stack_area = stackArea; 849 850 return B_OK; 851 } 852 853 854 status_t 855 thread_create_user_stack(Team* team, Thread* thread, void* stackBase, 856 size_t stackSize, size_t additionalSize) 857 { 858 char nameBuffer[B_OS_NAME_LENGTH]; 859 return create_thread_user_stack(team, thread, stackBase, stackSize, 860 additionalSize, USER_STACK_GUARD_SIZE, nameBuffer); 861 } 862 863 864 /*! Creates a new thread. 865 866 \param attributes The thread creation attributes, specifying the team in 867 which to create the thread, as well as a whole bunch of other arguments. 868 \param kernel \c true, if a kernel-only thread shall be created, \c false, 869 if the thread shall also be able to run in userland. 870 \return The ID of the newly created thread (>= 0) or an error code on 871 failure. 872 */ 873 thread_id 874 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel) 875 { 876 status_t status = B_OK; 877 878 TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name, 879 attributes.thread, kernel ? "kernel" : "user")); 880 881 // get the team 882 Team* team = Team::Get(attributes.team); 883 if (team == NULL) 884 return B_BAD_TEAM_ID; 885 BReference<Team> teamReference(team, true); 886 887 // If a thread object is given, acquire a reference to it, otherwise create 888 // a new thread object with the given attributes. 889 Thread* thread = attributes.thread; 890 if (thread != NULL) { 891 thread->AcquireReference(); 892 } else { 893 status = Thread::Create(attributes.name, thread); 894 if (status != B_OK) 895 return status; 896 } 897 BReference<Thread> threadReference(thread, true); 898 899 thread->team = team; 900 // set already, so, if something goes wrong, the team pointer is 901 // available for deinitialization 902 thread->priority = attributes.priority == -1 903 ? B_NORMAL_PRIORITY : attributes.priority; 904 thread->next_priority = thread->priority; 905 thread->state = B_THREAD_SUSPENDED; 906 thread->next_state = B_THREAD_SUSPENDED; 907 908 thread->sig_block_mask = attributes.signal_mask; 909 910 // init debug structure 911 init_thread_debug_info(&thread->debug_info); 912 913 // create the kernel stack 914 char stackName[B_OS_NAME_LENGTH]; 915 snprintf(stackName, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_kstack", 916 thread->name, thread->id); 917 virtual_address_restrictions virtualRestrictions = {}; 918 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS; 919 physical_address_restrictions physicalRestrictions = {}; 920 921 thread->kernel_stack_area = create_area_etc(B_SYSTEM_TEAM, stackName, 922 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 923 B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA 924 | B_KERNEL_STACK_AREA, 0, KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 925 &virtualRestrictions, &physicalRestrictions, 926 (void**)&thread->kernel_stack_base); 927 928 if (thread->kernel_stack_area < 0) { 929 // we're not yet part of a team, so we can just bail out 930 status = thread->kernel_stack_area; 931 932 dprintf("create_thread: error creating kernel stack: %s!\n", 933 strerror(status)); 934 935 return status; 936 } 937 938 thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE 939 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 940 941 if (kernel) { 942 // Init the thread's kernel stack. It will start executing 943 // common_thread_entry() with the arguments we prepare here. 944 ThreadEntryArguments entryArgs; 945 entryArgs.kernelFunction = attributes.kernelEntry; 946 entryArgs.argument = attributes.kernelArgument; 947 entryArgs.enterUserland = false; 948 949 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 950 } else { 951 // create the userland stack, if the thread doesn't have one yet 952 if (thread->user_stack_base == 0) { 953 status = create_thread_user_stack(team, thread, 954 attributes.stack_address, attributes.stack_size, 955 attributes.additional_stack_size, attributes.guard_size, 956 stackName); 957 if (status != B_OK) 958 return status; 959 } 960 961 // Init the thread's kernel stack. It will start executing 962 // common_thread_entry() with the arguments we prepare here. 963 UserThreadEntryArguments entryArgs; 964 entryArgs.kernelFunction = attributes.kernelEntry; 965 entryArgs.argument = attributes.kernelArgument; 966 entryArgs.enterUserland = true; 967 entryArgs.userlandEntry = (addr_t)attributes.entry; 968 entryArgs.userlandArgument1 = attributes.args1; 969 entryArgs.userlandArgument2 = attributes.args2; 970 entryArgs.pthread = attributes.pthread; 971 entryArgs.forkArgs = attributes.forkArgs; 972 entryArgs.flags = attributes.flags; 973 974 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs)); 975 976 // create the pre-defined thread timers 977 status = user_timer_create_thread_timers(team, thread); 978 if (status != B_OK) 979 return status; 980 } 981 982 // lock the team and see, if it is still alive 983 TeamLocker teamLocker(team); 984 if (team->state >= TEAM_STATE_SHUTDOWN) 985 return B_BAD_TEAM_ID; 986 987 bool debugNewThread = false; 988 if (!kernel) { 989 // allocate the user_thread structure, if not already allocated 990 if (thread->user_thread == NULL) { 991 thread->user_thread = team_allocate_user_thread(team); 992 if (thread->user_thread == NULL) 993 return B_NO_MEMORY; 994 } 995 996 // If the new thread belongs to the same team as the current thread, it 997 // may inherit some of the thread debug flags. 998 Thread* currentThread = thread_get_current_thread(); 999 if (currentThread != NULL && currentThread->team == team) { 1000 // inherit all user flags... 1001 int32 debugFlags = atomic_get(¤tThread->debug_info.flags) 1002 & B_THREAD_DEBUG_USER_FLAG_MASK; 1003 1004 // ... save the syscall tracing flags, unless explicitely specified 1005 if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) { 1006 debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL 1007 | B_THREAD_DEBUG_POST_SYSCALL); 1008 } 1009 1010 thread->debug_info.flags = debugFlags; 1011 1012 // stop the new thread, if desired 1013 debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS; 1014 } 1015 } 1016 1017 // We're going to make the thread live, now. The thread itself will take 1018 // over a reference to its Thread object. We acquire another reference for 1019 // our own use (and threadReference remains armed). 1020 thread->AcquireReference(); 1021 1022 ThreadLocker threadLocker(thread); 1023 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 1024 SpinLocker threadHashLocker(sThreadHashLock); 1025 1026 // make thread visible in global hash/list 1027 thread->visible = true; 1028 sUsedThreads++; 1029 scheduler_on_thread_init(thread); 1030 1031 // Debug the new thread, if the parent thread required that (see above), 1032 // or the respective global team debug flag is set. But only, if a 1033 // debugger is installed for the team. 1034 if (!kernel) { 1035 int32 teamDebugFlags = atomic_get(&team->debug_info.flags); 1036 debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0; 1037 if (debugNewThread 1038 && (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) { 1039 thread->debug_info.flags |= B_THREAD_DEBUG_STOP; 1040 } 1041 } 1042 1043 // insert thread into team 1044 insert_thread_into_team(team, thread); 1045 1046 threadHashLocker.Unlock(); 1047 schedulerLocker.Unlock(); 1048 threadLocker.Unlock(); 1049 teamLocker.Unlock(); 1050 1051 // notify listeners 1052 sNotificationService.Notify(THREAD_ADDED, thread); 1053 1054 return thread->id; 1055 } 1056 1057 1058 static status_t 1059 undertaker(void* /*args*/) 1060 { 1061 while (true) { 1062 // wait for a thread to bury 1063 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 1064 1065 while (sUndertakerEntries.IsEmpty()) { 1066 ConditionVariableEntry conditionEntry; 1067 sUndertakerCondition.Add(&conditionEntry); 1068 schedulerLocker.Unlock(); 1069 1070 conditionEntry.Wait(); 1071 1072 schedulerLocker.Lock(); 1073 } 1074 1075 UndertakerEntry* _entry = sUndertakerEntries.RemoveHead(); 1076 schedulerLocker.Unlock(); 1077 1078 UndertakerEntry entry = *_entry; 1079 // we need a copy, since the original entry is on the thread's stack 1080 1081 // we've got an entry 1082 Thread* thread = entry.thread; 1083 1084 // remove this thread from from the kernel team -- this makes it 1085 // unaccessible 1086 Team* kernelTeam = team_get_kernel_team(); 1087 TeamLocker kernelTeamLocker(kernelTeam); 1088 thread->Lock(); 1089 schedulerLocker.Lock(); 1090 1091 remove_thread_from_team(kernelTeam, thread); 1092 1093 schedulerLocker.Unlock(); 1094 kernelTeamLocker.Unlock(); 1095 1096 // free the thread structure 1097 thread->UnlockAndReleaseReference(); 1098 } 1099 1100 // can never get here 1101 return B_OK; 1102 } 1103 1104 1105 /*! Returns the semaphore the thread is currently waiting on. 1106 1107 The return value is purely informative. 1108 The caller must hold the scheduler lock. 1109 1110 \param thread The thread. 1111 \return The ID of the semaphore the thread is currently waiting on or \c -1, 1112 if it isn't waiting on a semaphore. 1113 */ 1114 static sem_id 1115 get_thread_wait_sem(Thread* thread) 1116 { 1117 if (thread->state == B_THREAD_WAITING 1118 && thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) { 1119 return (sem_id)(addr_t)thread->wait.object; 1120 } 1121 return -1; 1122 } 1123 1124 1125 /*! Fills the thread_info structure with information from the specified thread. 1126 The caller must hold the thread's lock and the scheduler lock. 1127 */ 1128 static void 1129 fill_thread_info(Thread *thread, thread_info *info, size_t size) 1130 { 1131 info->thread = thread->id; 1132 info->team = thread->team->id; 1133 1134 strlcpy(info->name, thread->name, B_OS_NAME_LENGTH); 1135 1136 info->sem = -1; 1137 1138 if (thread->state == B_THREAD_WAITING) { 1139 info->state = B_THREAD_WAITING; 1140 1141 switch (thread->wait.type) { 1142 case THREAD_BLOCK_TYPE_SNOOZE: 1143 info->state = B_THREAD_ASLEEP; 1144 break; 1145 1146 case THREAD_BLOCK_TYPE_SEMAPHORE: 1147 { 1148 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1149 if (sem == thread->msg.read_sem) 1150 info->state = B_THREAD_RECEIVING; 1151 else 1152 info->sem = sem; 1153 break; 1154 } 1155 1156 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1157 default: 1158 break; 1159 } 1160 } else 1161 info->state = (thread_state)thread->state; 1162 1163 info->priority = thread->priority; 1164 info->stack_base = (void *)thread->user_stack_base; 1165 info->stack_end = (void *)(thread->user_stack_base 1166 + thread->user_stack_size); 1167 1168 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 1169 info->user_time = thread->user_time; 1170 info->kernel_time = thread->kernel_time; 1171 } 1172 1173 1174 static status_t 1175 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize, 1176 int32 flags) 1177 { 1178 // get the thread 1179 Thread *target = Thread::Get(id); 1180 if (target == NULL) 1181 return B_BAD_THREAD_ID; 1182 BReference<Thread> targetReference(target, true); 1183 1184 // get the write semaphore 1185 ThreadLocker targetLocker(target); 1186 sem_id cachedSem = target->msg.write_sem; 1187 targetLocker.Unlock(); 1188 1189 if (bufferSize > THREAD_MAX_MESSAGE_SIZE) 1190 return B_NO_MEMORY; 1191 1192 status_t status = acquire_sem_etc(cachedSem, 1, flags, 0); 1193 if (status == B_INTERRUPTED) { 1194 // we got interrupted by a signal 1195 return status; 1196 } 1197 if (status != B_OK) { 1198 // Any other acquisition problems may be due to thread deletion 1199 return B_BAD_THREAD_ID; 1200 } 1201 1202 void* data; 1203 if (bufferSize > 0) { 1204 data = malloc(bufferSize); 1205 if (data == NULL) 1206 return B_NO_MEMORY; 1207 if (user_memcpy(data, buffer, bufferSize) != B_OK) { 1208 free(data); 1209 return B_BAD_DATA; 1210 } 1211 } else 1212 data = NULL; 1213 1214 targetLocker.Lock(); 1215 1216 // The target thread could have been deleted at this point. 1217 if (!target->IsAlive()) { 1218 targetLocker.Unlock(); 1219 free(data); 1220 return B_BAD_THREAD_ID; 1221 } 1222 1223 // Save message informations 1224 target->msg.sender = thread_get_current_thread()->id; 1225 target->msg.code = code; 1226 target->msg.size = bufferSize; 1227 target->msg.buffer = data; 1228 cachedSem = target->msg.read_sem; 1229 1230 targetLocker.Unlock(); 1231 1232 release_sem(cachedSem); 1233 return B_OK; 1234 } 1235 1236 1237 static int32 1238 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize, 1239 int32 flags) 1240 { 1241 Thread *thread = thread_get_current_thread(); 1242 size_t size; 1243 int32 code; 1244 1245 status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0); 1246 if (status != B_OK) { 1247 // Actually, we're not supposed to return error codes 1248 // but since the only reason this can fail is that we 1249 // were killed, it's probably okay to do so (but also 1250 // meaningless). 1251 return status; 1252 } 1253 1254 if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) { 1255 size = min_c(bufferSize, thread->msg.size); 1256 status = user_memcpy(buffer, thread->msg.buffer, size); 1257 if (status != B_OK) { 1258 free(thread->msg.buffer); 1259 release_sem(thread->msg.write_sem); 1260 return status; 1261 } 1262 } 1263 1264 *_sender = thread->msg.sender; 1265 code = thread->msg.code; 1266 1267 free(thread->msg.buffer); 1268 release_sem(thread->msg.write_sem); 1269 1270 return code; 1271 } 1272 1273 1274 static status_t 1275 common_getrlimit(int resource, struct rlimit * rlp) 1276 { 1277 if (!rlp) 1278 return B_BAD_ADDRESS; 1279 1280 switch (resource) { 1281 case RLIMIT_NOFILE: 1282 case RLIMIT_NOVMON: 1283 return vfs_getrlimit(resource, rlp); 1284 1285 case RLIMIT_CORE: 1286 rlp->rlim_cur = 0; 1287 rlp->rlim_max = 0; 1288 return B_OK; 1289 1290 case RLIMIT_STACK: 1291 { 1292 rlp->rlim_cur = USER_MAIN_THREAD_STACK_SIZE; 1293 rlp->rlim_max = USER_MAIN_THREAD_STACK_SIZE; 1294 return B_OK; 1295 } 1296 1297 default: 1298 return EINVAL; 1299 } 1300 1301 return B_OK; 1302 } 1303 1304 1305 static status_t 1306 common_setrlimit(int resource, const struct rlimit * rlp) 1307 { 1308 if (!rlp) 1309 return B_BAD_ADDRESS; 1310 1311 switch (resource) { 1312 case RLIMIT_NOFILE: 1313 case RLIMIT_NOVMON: 1314 return vfs_setrlimit(resource, rlp); 1315 1316 case RLIMIT_CORE: 1317 // We don't support core file, so allow settings to 0/0 only. 1318 if (rlp->rlim_cur != 0 || rlp->rlim_max != 0) 1319 return EINVAL; 1320 return B_OK; 1321 1322 default: 1323 return EINVAL; 1324 } 1325 1326 return B_OK; 1327 } 1328 1329 1330 static status_t 1331 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags, 1332 bigtime_t* _remainingTime) 1333 { 1334 switch (clockID) { 1335 case CLOCK_REALTIME: 1336 // make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall 1337 // through 1338 flags |= B_TIMEOUT_REAL_TIME_BASE; 1339 case CLOCK_MONOTONIC: 1340 { 1341 // Store the start time, for the case that we get interrupted and 1342 // need to return the remaining time. For absolute timeouts we can 1343 // still get he time later, if needed. 1344 bigtime_t startTime 1345 = _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0 1346 ? system_time() : 0; 1347 1348 Thread* thread = thread_get_current_thread(); 1349 1350 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 1351 1352 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE, 1353 NULL); 1354 status_t status = thread_block_with_timeout_locked(flags, timeout); 1355 1356 if (status == B_TIMED_OUT || status == B_WOULD_BLOCK) 1357 return B_OK; 1358 1359 // If interrupted, compute the remaining time, if requested. 1360 if (status == B_INTERRUPTED && _remainingTime != NULL) { 1361 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 1362 *_remainingTime = std::max( 1363 startTime + timeout - system_time(), (bigtime_t)0); 1364 } else { 1365 bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0 1366 ? real_time_clock_usecs() : system_time(); 1367 *_remainingTime = std::max(timeout - now, (bigtime_t)0); 1368 } 1369 } 1370 1371 return status; 1372 } 1373 1374 case CLOCK_THREAD_CPUTIME_ID: 1375 // Waiting for ourselves to do something isn't particularly 1376 // productive. 1377 return B_BAD_VALUE; 1378 1379 case CLOCK_PROCESS_CPUTIME_ID: 1380 default: 1381 // We don't have to support those, but we are allowed to. Could be 1382 // done be creating a UserTimer on the fly with a custom UserEvent 1383 // that would just wake us up. 1384 return ENOTSUP; 1385 } 1386 } 1387 1388 1389 // #pragma mark - debugger calls 1390 1391 1392 static int 1393 make_thread_unreal(int argc, char **argv) 1394 { 1395 int32 id = -1; 1396 1397 if (argc > 2) { 1398 print_debugger_command_usage(argv[0]); 1399 return 0; 1400 } 1401 1402 if (argc > 1) 1403 id = strtoul(argv[1], NULL, 0); 1404 1405 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1406 Thread* thread = it.Next();) { 1407 if (id != -1 && thread->id != id) 1408 continue; 1409 1410 if (thread->priority > B_DISPLAY_PRIORITY) { 1411 thread->priority = thread->next_priority = B_NORMAL_PRIORITY; 1412 kprintf("thread %" B_PRId32 " made unreal\n", thread->id); 1413 } 1414 } 1415 1416 return 0; 1417 } 1418 1419 1420 static int 1421 set_thread_prio(int argc, char **argv) 1422 { 1423 int32 id; 1424 int32 prio; 1425 1426 if (argc > 3 || argc < 2) { 1427 print_debugger_command_usage(argv[0]); 1428 return 0; 1429 } 1430 1431 prio = strtoul(argv[1], NULL, 0); 1432 if (prio > THREAD_MAX_SET_PRIORITY) 1433 prio = THREAD_MAX_SET_PRIORITY; 1434 if (prio < THREAD_MIN_SET_PRIORITY) 1435 prio = THREAD_MIN_SET_PRIORITY; 1436 1437 if (argc > 2) 1438 id = strtoul(argv[2], NULL, 0); 1439 else 1440 id = thread_get_current_thread()->id; 1441 1442 bool found = false; 1443 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1444 Thread* thread = it.Next();) { 1445 if (thread->id != id) 1446 continue; 1447 thread->priority = thread->next_priority = prio; 1448 kprintf("thread %" B_PRId32 " set to priority %" B_PRId32 "\n", id, prio); 1449 found = true; 1450 break; 1451 } 1452 if (!found) 1453 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1454 1455 return 0; 1456 } 1457 1458 1459 static int 1460 make_thread_suspended(int argc, char **argv) 1461 { 1462 int32 id; 1463 1464 if (argc > 2) { 1465 print_debugger_command_usage(argv[0]); 1466 return 0; 1467 } 1468 1469 if (argc == 1) 1470 id = thread_get_current_thread()->id; 1471 else 1472 id = strtoul(argv[1], NULL, 0); 1473 1474 bool found = false; 1475 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1476 Thread* thread = it.Next();) { 1477 if (thread->id != id) 1478 continue; 1479 1480 thread->next_state = B_THREAD_SUSPENDED; 1481 kprintf("thread %" B_PRId32 " suspended\n", id); 1482 found = true; 1483 break; 1484 } 1485 if (!found) 1486 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1487 1488 return 0; 1489 } 1490 1491 1492 static int 1493 make_thread_resumed(int argc, char **argv) 1494 { 1495 int32 id; 1496 1497 if (argc != 2) { 1498 print_debugger_command_usage(argv[0]); 1499 return 0; 1500 } 1501 1502 // force user to enter a thread id, as using 1503 // the current thread is usually not intended 1504 id = strtoul(argv[1], NULL, 0); 1505 1506 bool found = false; 1507 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1508 Thread* thread = it.Next();) { 1509 if (thread->id != id) 1510 continue; 1511 1512 if (thread->state == B_THREAD_SUSPENDED) { 1513 scheduler_enqueue_in_run_queue(thread); 1514 kprintf("thread %" B_PRId32 " resumed\n", thread->id); 1515 } 1516 found = true; 1517 break; 1518 } 1519 if (!found) 1520 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id); 1521 1522 return 0; 1523 } 1524 1525 1526 static int 1527 drop_into_debugger(int argc, char **argv) 1528 { 1529 status_t err; 1530 int32 id; 1531 1532 if (argc > 2) { 1533 print_debugger_command_usage(argv[0]); 1534 return 0; 1535 } 1536 1537 if (argc == 1) 1538 id = thread_get_current_thread()->id; 1539 else 1540 id = strtoul(argv[1], NULL, 0); 1541 1542 err = _user_debug_thread(id); 1543 // TODO: This is a non-trivial syscall doing some locking, so this is 1544 // really nasty and may go seriously wrong. 1545 if (err) 1546 kprintf("drop failed\n"); 1547 else 1548 kprintf("thread %" B_PRId32 " dropped into user debugger\n", id); 1549 1550 return 0; 1551 } 1552 1553 1554 /*! Returns a user-readable string for a thread state. 1555 Only for use in the kernel debugger. 1556 */ 1557 static const char * 1558 state_to_text(Thread *thread, int32 state) 1559 { 1560 switch (state) { 1561 case B_THREAD_READY: 1562 return "ready"; 1563 1564 case B_THREAD_RUNNING: 1565 return "running"; 1566 1567 case B_THREAD_WAITING: 1568 { 1569 if (thread != NULL) { 1570 switch (thread->wait.type) { 1571 case THREAD_BLOCK_TYPE_SNOOZE: 1572 return "zzz"; 1573 1574 case THREAD_BLOCK_TYPE_SEMAPHORE: 1575 { 1576 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1577 if (sem == thread->msg.read_sem) 1578 return "receive"; 1579 break; 1580 } 1581 } 1582 } 1583 1584 return "waiting"; 1585 } 1586 1587 case B_THREAD_SUSPENDED: 1588 return "suspended"; 1589 1590 case THREAD_STATE_FREE_ON_RESCHED: 1591 return "death"; 1592 1593 default: 1594 return "UNKNOWN"; 1595 } 1596 } 1597 1598 1599 static void 1600 print_thread_list_table_head() 1601 { 1602 kprintf("%-*s id state wait for %-*s cpu pri %-*s team " 1603 "name\n", 1604 B_PRINTF_POINTER_WIDTH, "thread", B_PRINTF_POINTER_WIDTH, "object", 1605 B_PRINTF_POINTER_WIDTH, "stack"); 1606 } 1607 1608 1609 static void 1610 _dump_thread_info(Thread *thread, bool shortInfo) 1611 { 1612 if (shortInfo) { 1613 kprintf("%p %6" B_PRId32 " %-10s", thread, thread->id, 1614 state_to_text(thread, thread->state)); 1615 1616 // does it block on a semaphore or a condition variable? 1617 if (thread->state == B_THREAD_WAITING) { 1618 switch (thread->wait.type) { 1619 case THREAD_BLOCK_TYPE_SEMAPHORE: 1620 { 1621 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1622 if (sem == thread->msg.read_sem) 1623 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1624 else { 1625 kprintf("sem %-*" B_PRId32, 1626 B_PRINTF_POINTER_WIDTH + 5, sem); 1627 } 1628 break; 1629 } 1630 1631 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1632 kprintf("cvar %p ", thread->wait.object); 1633 break; 1634 1635 case THREAD_BLOCK_TYPE_SNOOZE: 1636 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, ""); 1637 break; 1638 1639 case THREAD_BLOCK_TYPE_SIGNAL: 1640 kprintf("signal%*s", B_PRINTF_POINTER_WIDTH + 9, ""); 1641 break; 1642 1643 case THREAD_BLOCK_TYPE_MUTEX: 1644 kprintf("mutex %p ", thread->wait.object); 1645 break; 1646 1647 case THREAD_BLOCK_TYPE_RW_LOCK: 1648 kprintf("rwlock %p ", thread->wait.object); 1649 break; 1650 1651 case THREAD_BLOCK_TYPE_OTHER: 1652 kprintf("other%*s", B_PRINTF_POINTER_WIDTH + 10, ""); 1653 break; 1654 1655 default: 1656 kprintf("??? %p ", thread->wait.object); 1657 break; 1658 } 1659 } else 1660 kprintf("-%*s", B_PRINTF_POINTER_WIDTH + 14, ""); 1661 1662 // on which CPU does it run? 1663 if (thread->cpu) 1664 kprintf("%2d", thread->cpu->cpu_num); 1665 else 1666 kprintf(" -"); 1667 1668 kprintf("%4" B_PRId32 " %p%5" B_PRId32 " %s\n", thread->priority, 1669 (void *)thread->kernel_stack_base, thread->team->id, 1670 thread->name != NULL ? thread->name : "<NULL>"); 1671 1672 return; 1673 } 1674 1675 // print the long info 1676 1677 struct thread_death_entry *death = NULL; 1678 1679 kprintf("THREAD: %p\n", thread); 1680 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", thread->id, 1681 thread->id); 1682 kprintf("serial_number: %" B_PRId64 "\n", thread->serial_number); 1683 kprintf("name: \"%s\"\n", thread->name); 1684 kprintf("hash_next: %p\nteam_next: %p\nq_next: %p\n", 1685 thread->hash_next, thread->team_next, thread->queue_next); 1686 kprintf("priority: %" B_PRId32 " (next %" B_PRId32 ", " 1687 "I/O: %" B_PRId32 ")\n", thread->priority, thread->next_priority, 1688 thread->io_priority); 1689 kprintf("state: %s\n", state_to_text(thread, thread->state)); 1690 kprintf("next_state: %s\n", state_to_text(thread, thread->next_state)); 1691 kprintf("cpu: %p ", thread->cpu); 1692 if (thread->cpu) 1693 kprintf("(%d)\n", thread->cpu->cpu_num); 1694 else 1695 kprintf("\n"); 1696 kprintf("sig_pending: %#" B_PRIx64 " (blocked: %#" B_PRIx64 1697 ", before sigsuspend(): %#" B_PRIx64 ")\n", 1698 (int64)thread->ThreadPendingSignals(), 1699 (int64)thread->sig_block_mask, 1700 (int64)thread->sigsuspend_original_unblocked_mask); 1701 kprintf("in_kernel: %d\n", thread->in_kernel); 1702 1703 if (thread->state == B_THREAD_WAITING) { 1704 kprintf("waiting for: "); 1705 1706 switch (thread->wait.type) { 1707 case THREAD_BLOCK_TYPE_SEMAPHORE: 1708 { 1709 sem_id sem = (sem_id)(addr_t)thread->wait.object; 1710 if (sem == thread->msg.read_sem) 1711 kprintf("data\n"); 1712 else 1713 kprintf("semaphore %" B_PRId32 "\n", sem); 1714 break; 1715 } 1716 1717 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE: 1718 kprintf("condition variable %p\n", thread->wait.object); 1719 break; 1720 1721 case THREAD_BLOCK_TYPE_SNOOZE: 1722 kprintf("snooze()\n"); 1723 break; 1724 1725 case THREAD_BLOCK_TYPE_SIGNAL: 1726 kprintf("signal\n"); 1727 break; 1728 1729 case THREAD_BLOCK_TYPE_MUTEX: 1730 kprintf("mutex %p\n", thread->wait.object); 1731 break; 1732 1733 case THREAD_BLOCK_TYPE_RW_LOCK: 1734 kprintf("rwlock %p\n", thread->wait.object); 1735 break; 1736 1737 case THREAD_BLOCK_TYPE_OTHER: 1738 kprintf("other (%s)\n", (char*)thread->wait.object); 1739 break; 1740 1741 default: 1742 kprintf("unknown (%p)\n", thread->wait.object); 1743 break; 1744 } 1745 } 1746 1747 kprintf("fault_handler: %p\n", (void *)thread->fault_handler); 1748 kprintf("team: %p, \"%s\"\n", thread->team, 1749 thread->team->Name()); 1750 kprintf(" exit.sem: %" B_PRId32 "\n", thread->exit.sem); 1751 kprintf(" exit.status: %#" B_PRIx32 " (%s)\n", thread->exit.status, 1752 strerror(thread->exit.status)); 1753 kprintf(" exit.waiters:\n"); 1754 while ((death = (struct thread_death_entry*)list_get_next_item( 1755 &thread->exit.waiters, death)) != NULL) { 1756 kprintf("\t%p (thread %" B_PRId32 ")\n", death, death->thread); 1757 } 1758 1759 kprintf("kernel_stack_area: %" B_PRId32 "\n", thread->kernel_stack_area); 1760 kprintf("kernel_stack_base: %p\n", (void *)thread->kernel_stack_base); 1761 kprintf("user_stack_area: %" B_PRId32 "\n", thread->user_stack_area); 1762 kprintf("user_stack_base: %p\n", (void *)thread->user_stack_base); 1763 kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage); 1764 kprintf("user_thread: %p\n", (void *)thread->user_thread); 1765 kprintf("kernel_errno: %#x (%s)\n", thread->kernel_errno, 1766 strerror(thread->kernel_errno)); 1767 kprintf("kernel_time: %" B_PRId64 "\n", thread->kernel_time); 1768 kprintf("user_time: %" B_PRId64 "\n", thread->user_time); 1769 kprintf("flags: 0x%" B_PRIx32 "\n", thread->flags); 1770 kprintf("architecture dependant section:\n"); 1771 arch_thread_dump_info(&thread->arch_info); 1772 } 1773 1774 1775 static int 1776 dump_thread_info(int argc, char **argv) 1777 { 1778 bool shortInfo = false; 1779 int argi = 1; 1780 if (argi < argc && strcmp(argv[argi], "-s") == 0) { 1781 shortInfo = true; 1782 print_thread_list_table_head(); 1783 argi++; 1784 } 1785 1786 if (argi == argc) { 1787 _dump_thread_info(thread_get_current_thread(), shortInfo); 1788 return 0; 1789 } 1790 1791 for (; argi < argc; argi++) { 1792 const char *name = argv[argi]; 1793 ulong arg = strtoul(name, NULL, 0); 1794 1795 if (IS_KERNEL_ADDRESS(arg)) { 1796 // semi-hack 1797 _dump_thread_info((Thread *)arg, shortInfo); 1798 continue; 1799 } 1800 1801 // walk through the thread list, trying to match name or id 1802 bool found = false; 1803 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1804 Thread* thread = it.Next();) { 1805 if (!strcmp(name, thread->name) || thread->id == (thread_id)arg) { 1806 _dump_thread_info(thread, shortInfo); 1807 found = true; 1808 break; 1809 } 1810 } 1811 1812 if (!found) 1813 kprintf("thread \"%s\" (%" B_PRId32 ") doesn't exist!\n", name, (thread_id)arg); 1814 } 1815 1816 return 0; 1817 } 1818 1819 1820 static int 1821 dump_thread_list(int argc, char **argv) 1822 { 1823 bool realTimeOnly = false; 1824 bool calling = false; 1825 const char *callSymbol = NULL; 1826 addr_t callStart = 0; 1827 addr_t callEnd = 0; 1828 int32 requiredState = 0; 1829 team_id team = -1; 1830 sem_id sem = -1; 1831 1832 if (!strcmp(argv[0], "realtime")) 1833 realTimeOnly = true; 1834 else if (!strcmp(argv[0], "ready")) 1835 requiredState = B_THREAD_READY; 1836 else if (!strcmp(argv[0], "running")) 1837 requiredState = B_THREAD_RUNNING; 1838 else if (!strcmp(argv[0], "waiting")) { 1839 requiredState = B_THREAD_WAITING; 1840 1841 if (argc > 1) { 1842 sem = strtoul(argv[1], NULL, 0); 1843 if (sem == 0) 1844 kprintf("ignoring invalid semaphore argument.\n"); 1845 } 1846 } else if (!strcmp(argv[0], "calling")) { 1847 if (argc < 2) { 1848 kprintf("Need to give a symbol name or start and end arguments.\n"); 1849 return 0; 1850 } else if (argc == 3) { 1851 callStart = parse_expression(argv[1]); 1852 callEnd = parse_expression(argv[2]); 1853 } else 1854 callSymbol = argv[1]; 1855 1856 calling = true; 1857 } else if (argc > 1) { 1858 team = strtoul(argv[1], NULL, 0); 1859 if (team == 0) 1860 kprintf("ignoring invalid team argument.\n"); 1861 } 1862 1863 print_thread_list_table_head(); 1864 1865 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 1866 Thread* thread = it.Next();) { 1867 // filter out threads not matching the search criteria 1868 if ((requiredState && thread->state != requiredState) 1869 || (calling && !arch_debug_contains_call(thread, callSymbol, 1870 callStart, callEnd)) 1871 || (sem > 0 && get_thread_wait_sem(thread) != sem) 1872 || (team > 0 && thread->team->id != team) 1873 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY)) 1874 continue; 1875 1876 _dump_thread_info(thread, true); 1877 } 1878 return 0; 1879 } 1880 1881 1882 // #pragma mark - private kernel API 1883 1884 1885 void 1886 thread_exit(void) 1887 { 1888 cpu_status state; 1889 Thread* thread = thread_get_current_thread(); 1890 Team* team = thread->team; 1891 Team* kernelTeam = team_get_kernel_team(); 1892 status_t status; 1893 struct thread_debug_info debugInfo; 1894 team_id teamID = team->id; 1895 1896 TRACE(("thread %" B_PRId32 " exiting w/return code %#" B_PRIx32 "\n", 1897 thread->id, thread->exit.status)); 1898 1899 if (!are_interrupts_enabled()) 1900 panic("thread_exit() called with interrupts disabled!\n"); 1901 1902 // boost our priority to get this over with 1903 thread->priority = thread->next_priority = B_URGENT_DISPLAY_PRIORITY; 1904 1905 if (team != kernelTeam) { 1906 // Cancel previously installed alarm timer, if any. Hold the scheduler 1907 // lock to make sure that when cancel_timer() returns, the alarm timer 1908 // hook will not be invoked anymore (since 1909 // B_TIMER_ACQUIRE_SCHEDULER_LOCK is used). 1910 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 1911 cancel_timer(&thread->alarm); 1912 schedulerLocker.Unlock(); 1913 1914 // Delete all user timers associated with the thread. 1915 ThreadLocker threadLocker(thread); 1916 thread->DeleteUserTimers(false); 1917 1918 // detach the thread's user thread 1919 user_thread* userThread = thread->user_thread; 1920 thread->user_thread = NULL; 1921 1922 threadLocker.Unlock(); 1923 1924 // Delete the thread's user thread, if it's not the main thread. If it 1925 // is, we can save the work, since it will be deleted with the team's 1926 // address space. 1927 if (thread != team->main_thread) 1928 team_free_user_thread(team, userThread); 1929 } 1930 1931 // remember the user stack area -- we will delete it below 1932 area_id userStackArea = -1; 1933 if (team->address_space != NULL && thread->user_stack_area >= 0) { 1934 userStackArea = thread->user_stack_area; 1935 thread->user_stack_area = -1; 1936 } 1937 1938 struct job_control_entry *death = NULL; 1939 struct thread_death_entry* threadDeathEntry = NULL; 1940 bool deleteTeam = false; 1941 port_id debuggerPort = -1; 1942 1943 if (team != kernelTeam) { 1944 user_debug_thread_exiting(thread); 1945 1946 if (team->main_thread == thread) { 1947 // The main thread is exiting. Shut down the whole team. 1948 deleteTeam = true; 1949 1950 // kill off all other threads and the user debugger facilities 1951 debuggerPort = team_shutdown_team(team); 1952 1953 // acquire necessary locks, which are: process group lock, kernel 1954 // team lock, parent team lock, and the team lock 1955 team->LockProcessGroup(); 1956 kernelTeam->Lock(); 1957 team->LockTeamAndParent(true); 1958 } else { 1959 threadDeathEntry 1960 = (thread_death_entry*)malloc(sizeof(thread_death_entry)); 1961 1962 // acquire necessary locks, which are: kernel team lock and the team 1963 // lock 1964 kernelTeam->Lock(); 1965 team->Lock(); 1966 } 1967 1968 ThreadLocker threadLocker(thread); 1969 1970 state = disable_interrupts(); 1971 1972 // swap address spaces, to make sure we're running on the kernel's pgdir 1973 vm_swap_address_space(team->address_space, VMAddressSpace::Kernel()); 1974 1975 SpinLocker schedulerLocker(gSchedulerLock); 1976 // removing the thread and putting its death entry to the parent 1977 // team needs to be an atomic operation 1978 1979 // remember how long this thread lasted 1980 bigtime_t now = system_time(); 1981 InterruptsSpinLocker threadTimeLocker(thread->time_lock); 1982 thread->kernel_time += now - thread->last_time; 1983 thread->last_time = now; 1984 threadTimeLocker.Unlock(); 1985 1986 team->dead_threads_kernel_time += thread->kernel_time; 1987 team->dead_threads_user_time += thread->user_time; 1988 1989 // stop/update thread/team CPU time user timers 1990 if (thread->HasActiveCPUTimeUserTimers() 1991 || team->HasActiveCPUTimeUserTimers()) { 1992 user_timer_stop_cpu_timers(thread, NULL); 1993 } 1994 1995 // deactivate CPU time user timers for the thread 1996 if (thread->HasActiveCPUTimeUserTimers()) 1997 thread->DeactivateCPUTimeUserTimers(); 1998 1999 // put the thread into the kernel team until it dies 2000 remove_thread_from_team(team, thread); 2001 insert_thread_into_team(kernelTeam, thread); 2002 2003 if (team->death_entry != NULL) { 2004 if (--team->death_entry->remaining_threads == 0) 2005 team->death_entry->condition.NotifyOne(true, B_OK); 2006 } 2007 2008 if (deleteTeam) { 2009 Team* parent = team->parent; 2010 2011 // Set the team job control state to "dead" and detach the job 2012 // control entry from our team struct. 2013 team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL, 2014 true); 2015 death = team->job_control_entry; 2016 team->job_control_entry = NULL; 2017 2018 if (death != NULL) { 2019 death->InitDeadState(); 2020 2021 // team_set_job_control_state() already moved our entry 2022 // into the parent's list. We just check the soft limit of 2023 // death entries. 2024 if (parent->dead_children.count > MAX_DEAD_CHILDREN) { 2025 death = parent->dead_children.entries.RemoveHead(); 2026 parent->dead_children.count--; 2027 } else 2028 death = NULL; 2029 } 2030 2031 schedulerLocker.Unlock(); 2032 restore_interrupts(state); 2033 2034 threadLocker.Unlock(); 2035 2036 // Get a temporary reference to the team's process group 2037 // -- team_remove_team() removes the team from the group, which 2038 // might destroy it otherwise and we wouldn't be able to unlock it. 2039 ProcessGroup* group = team->group; 2040 group->AcquireReference(); 2041 2042 pid_t foregroundGroupToSignal; 2043 team_remove_team(team, foregroundGroupToSignal); 2044 2045 // unlock everything but the parent team 2046 team->Unlock(); 2047 if (parent != kernelTeam) 2048 kernelTeam->Unlock(); 2049 group->Unlock(); 2050 group->ReleaseReference(); 2051 2052 // Send SIGCHLD to the parent as long as we still have its lock. 2053 // This makes job control state change + signalling atomic. 2054 Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id); 2055 if (team->exit.reason == CLD_EXITED) { 2056 childSignal.SetStatus(team->exit.status); 2057 } else { 2058 childSignal.SetStatus(team->exit.signal); 2059 childSignal.SetSendingUser(team->exit.signaling_user); 2060 } 2061 send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE); 2062 2063 // also unlock the parent 2064 parent->Unlock(); 2065 2066 // If the team was a session leader with controlling TTY, we have 2067 // to send SIGHUP to the foreground process group. 2068 if (foregroundGroupToSignal >= 0) { 2069 Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id); 2070 send_signal_to_process_group(foregroundGroupToSignal, 2071 groupSignal, B_DO_NOT_RESCHEDULE); 2072 } 2073 } else { 2074 // The thread is not the main thread. We store a thread death entry 2075 // for it, unless someone is already waiting for it. 2076 if (threadDeathEntry != NULL 2077 && list_is_empty(&thread->exit.waiters)) { 2078 threadDeathEntry->thread = thread->id; 2079 threadDeathEntry->status = thread->exit.status; 2080 2081 // add entry -- remove an old one, if we hit the limit 2082 list_add_item(&team->dead_threads, threadDeathEntry); 2083 team->dead_threads_count++; 2084 threadDeathEntry = NULL; 2085 2086 if (team->dead_threads_count > MAX_DEAD_THREADS) { 2087 threadDeathEntry 2088 = (thread_death_entry*)list_remove_head_item( 2089 &team->dead_threads); 2090 team->dead_threads_count--; 2091 } 2092 } 2093 2094 schedulerLocker.Unlock(); 2095 restore_interrupts(state); 2096 2097 threadLocker.Unlock(); 2098 team->Unlock(); 2099 kernelTeam->Unlock(); 2100 } 2101 2102 TRACE(("thread_exit: thread %" B_PRId32 " now a kernel thread!\n", 2103 thread->id)); 2104 } 2105 2106 free(threadDeathEntry); 2107 2108 // delete the team if we're its main thread 2109 if (deleteTeam) { 2110 team_delete_team(team, debuggerPort); 2111 2112 // we need to delete any death entry that made it to here 2113 delete death; 2114 } 2115 2116 ThreadLocker threadLocker(thread); 2117 2118 state = disable_interrupts(); 2119 SpinLocker schedulerLocker(gSchedulerLock); 2120 2121 // mark invisible in global hash/list, so it's no longer accessible 2122 SpinLocker threadHashLocker(sThreadHashLock); 2123 thread->visible = false; 2124 sUsedThreads--; 2125 threadHashLocker.Unlock(); 2126 2127 // Stop debugging for this thread 2128 SpinLocker threadDebugInfoLocker(thread->debug_info.lock); 2129 debugInfo = thread->debug_info; 2130 clear_thread_debug_info(&thread->debug_info, true); 2131 threadDebugInfoLocker.Unlock(); 2132 2133 // Remove the select infos. We notify them a little later. 2134 select_info* selectInfos = thread->select_infos; 2135 thread->select_infos = NULL; 2136 2137 schedulerLocker.Unlock(); 2138 restore_interrupts(state); 2139 2140 threadLocker.Unlock(); 2141 2142 destroy_thread_debug_info(&debugInfo); 2143 2144 // notify select infos 2145 select_info* info = selectInfos; 2146 while (info != NULL) { 2147 select_sync* sync = info->sync; 2148 2149 notify_select_events(info, B_EVENT_INVALID); 2150 info = info->next; 2151 put_select_sync(sync); 2152 } 2153 2154 // notify listeners 2155 sNotificationService.Notify(THREAD_REMOVED, thread); 2156 2157 // shutdown the thread messaging 2158 2159 status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0); 2160 if (status == B_WOULD_BLOCK) { 2161 // there is data waiting for us, so let us eat it 2162 thread_id sender; 2163 2164 delete_sem(thread->msg.write_sem); 2165 // first, let's remove all possibly waiting writers 2166 receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT); 2167 } else { 2168 // we probably own the semaphore here, and we're the last to do so 2169 delete_sem(thread->msg.write_sem); 2170 } 2171 // now we can safely remove the msg.read_sem 2172 delete_sem(thread->msg.read_sem); 2173 2174 // fill all death entries and delete the sem that others will use to wait 2175 // for us 2176 { 2177 sem_id cachedExitSem = thread->exit.sem; 2178 2179 ThreadLocker threadLocker(thread); 2180 2181 // make sure no one will grab this semaphore again 2182 thread->exit.sem = -1; 2183 2184 // fill all death entries 2185 thread_death_entry* entry = NULL; 2186 while ((entry = (thread_death_entry*)list_get_next_item( 2187 &thread->exit.waiters, entry)) != NULL) { 2188 entry->status = thread->exit.status; 2189 } 2190 2191 threadLocker.Unlock(); 2192 2193 delete_sem(cachedExitSem); 2194 } 2195 2196 // delete the user stack, if this was a user thread 2197 if (!deleteTeam && userStackArea >= 0) { 2198 // We postponed deleting the user stack until now, since this way all 2199 // notifications for the thread's death are out already and all other 2200 // threads waiting for this thread's death and some object on its stack 2201 // will wake up before we (try to) delete the stack area. Of most 2202 // relevance is probably the case where this is the main thread and 2203 // other threads use objects on its stack -- so we want them terminated 2204 // first. 2205 // When the team is deleted, all areas are deleted anyway, so we don't 2206 // need to do that explicitly in that case. 2207 vm_delete_area(teamID, userStackArea, true); 2208 } 2209 2210 // notify the debugger 2211 if (teamID != kernelTeam->id) 2212 user_debug_thread_deleted(teamID, thread->id); 2213 2214 // enqueue in the undertaker list and reschedule for the last time 2215 UndertakerEntry undertakerEntry(thread, teamID); 2216 2217 disable_interrupts(); 2218 schedulerLocker.Lock(); 2219 2220 sUndertakerEntries.Add(&undertakerEntry); 2221 sUndertakerCondition.NotifyOne(true); 2222 2223 thread->next_state = THREAD_STATE_FREE_ON_RESCHED; 2224 scheduler_reschedule(); 2225 2226 panic("never can get here\n"); 2227 } 2228 2229 2230 /*! Called in the interrupt handler code when a thread enters 2231 the kernel for any reason. 2232 Only tracks time for now. 2233 Interrupts are disabled. 2234 */ 2235 void 2236 thread_at_kernel_entry(bigtime_t now) 2237 { 2238 Thread *thread = thread_get_current_thread(); 2239 2240 TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32 "\n", thread->id)); 2241 2242 // track user time 2243 SpinLocker threadTimeLocker(thread->time_lock); 2244 thread->user_time += now - thread->last_time; 2245 thread->last_time = now; 2246 thread->in_kernel = true; 2247 threadTimeLocker.Unlock(); 2248 } 2249 2250 2251 /*! Called whenever a thread exits kernel space to user space. 2252 Tracks time, handles signals, ... 2253 Interrupts must be enabled. When the function returns, interrupts will be 2254 disabled. 2255 The function may not return. This e.g. happens when the thread has received 2256 a deadly signal. 2257 */ 2258 void 2259 thread_at_kernel_exit(void) 2260 { 2261 Thread *thread = thread_get_current_thread(); 2262 2263 TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32 "\n", thread->id)); 2264 2265 handle_signals(thread); 2266 2267 disable_interrupts(); 2268 2269 // track kernel time 2270 bigtime_t now = system_time(); 2271 SpinLocker threadTimeLocker(thread->time_lock); 2272 thread->in_kernel = false; 2273 thread->kernel_time += now - thread->last_time; 2274 thread->last_time = now; 2275 } 2276 2277 2278 /*! The quick version of thread_kernel_exit(), in case no signals are pending 2279 and no debugging shall be done. 2280 Interrupts must be disabled. 2281 */ 2282 void 2283 thread_at_kernel_exit_no_signals(void) 2284 { 2285 Thread *thread = thread_get_current_thread(); 2286 2287 TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32 "\n", 2288 thread->id)); 2289 2290 // track kernel time 2291 bigtime_t now = system_time(); 2292 SpinLocker threadTimeLocker(thread->time_lock); 2293 thread->in_kernel = false; 2294 thread->kernel_time += now - thread->last_time; 2295 thread->last_time = now; 2296 } 2297 2298 2299 void 2300 thread_reset_for_exec(void) 2301 { 2302 Thread* thread = thread_get_current_thread(); 2303 2304 ThreadLocker threadLocker(thread); 2305 2306 // delete user-defined timers 2307 thread->DeleteUserTimers(true); 2308 2309 // cancel pre-defined timer 2310 if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID)) 2311 timer->Cancel(); 2312 2313 // reset user_thread and user stack 2314 thread->user_thread = NULL; 2315 thread->user_stack_area = -1; 2316 thread->user_stack_base = 0; 2317 thread->user_stack_size = 0; 2318 2319 // reset signals 2320 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 2321 2322 thread->ResetSignalsOnExec(); 2323 2324 // reset thread CPU time clock 2325 thread->cpu_clock_offset = -thread->CPUTime(false); 2326 2327 // Note: We don't cancel an alarm. It is supposed to survive exec*(). 2328 } 2329 2330 2331 /*! Insert a thread to the tail of a queue */ 2332 void 2333 thread_enqueue(Thread *thread, struct thread_queue *queue) 2334 { 2335 thread->queue_next = NULL; 2336 if (queue->head == NULL) { 2337 queue->head = thread; 2338 queue->tail = thread; 2339 } else { 2340 queue->tail->queue_next = thread; 2341 queue->tail = thread; 2342 } 2343 } 2344 2345 2346 Thread * 2347 thread_lookat_queue(struct thread_queue *queue) 2348 { 2349 return queue->head; 2350 } 2351 2352 2353 Thread * 2354 thread_dequeue(struct thread_queue *queue) 2355 { 2356 Thread *thread = queue->head; 2357 2358 if (thread != NULL) { 2359 queue->head = thread->queue_next; 2360 if (queue->tail == thread) 2361 queue->tail = NULL; 2362 } 2363 return thread; 2364 } 2365 2366 2367 Thread * 2368 thread_dequeue_id(struct thread_queue *q, thread_id id) 2369 { 2370 Thread *thread; 2371 Thread *last = NULL; 2372 2373 thread = q->head; 2374 while (thread != NULL) { 2375 if (thread->id == id) { 2376 if (last == NULL) 2377 q->head = thread->queue_next; 2378 else 2379 last->queue_next = thread->queue_next; 2380 2381 if (q->tail == thread) 2382 q->tail = last; 2383 break; 2384 } 2385 last = thread; 2386 thread = thread->queue_next; 2387 } 2388 return thread; 2389 } 2390 2391 2392 thread_id 2393 allocate_thread_id() 2394 { 2395 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 2396 2397 // find the next unused ID 2398 thread_id id; 2399 do { 2400 id = sNextThreadID++; 2401 2402 // deal with integer overflow 2403 if (sNextThreadID < 0) 2404 sNextThreadID = 2; 2405 2406 // check whether the ID is already in use 2407 } while (sThreadHash.Lookup(id, false) != NULL); 2408 2409 return id; 2410 } 2411 2412 2413 thread_id 2414 peek_next_thread_id() 2415 { 2416 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 2417 return sNextThreadID; 2418 } 2419 2420 2421 /*! Yield the CPU to other threads. 2422 If \a force is \c true, the thread will almost guaranteedly be unscheduled. 2423 If \c false, it will continue to run, if there's no other thread in ready 2424 state, and if it has a higher priority than the other ready threads, it 2425 still has a good chance to continue. 2426 */ 2427 void 2428 thread_yield(bool force) 2429 { 2430 if (force) { 2431 // snooze for roughly 3 thread quantums 2432 snooze_etc(9000, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT | B_CAN_INTERRUPT); 2433 #if 0 2434 cpu_status state; 2435 2436 Thread *thread = thread_get_current_thread(); 2437 if (thread == NULL) 2438 return; 2439 2440 InterruptsSpinLocker _(gSchedulerLock); 2441 2442 // mark the thread as yielded, so it will not be scheduled next 2443 //thread->was_yielded = true; 2444 thread->next_priority = B_LOWEST_ACTIVE_PRIORITY; 2445 scheduler_reschedule(); 2446 #endif 2447 } else { 2448 Thread *thread = thread_get_current_thread(); 2449 if (thread == NULL) 2450 return; 2451 2452 // Don't force the thread off the CPU, just reschedule. 2453 InterruptsSpinLocker _(gSchedulerLock); 2454 scheduler_reschedule(); 2455 } 2456 } 2457 2458 2459 /*! Kernel private thread creation function. 2460 */ 2461 thread_id 2462 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority, 2463 void *arg, team_id team) 2464 { 2465 return thread_create_thread( 2466 ThreadCreationAttributes(function, name, priority, arg, team), 2467 true); 2468 } 2469 2470 2471 status_t 2472 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout, 2473 status_t *_returnCode) 2474 { 2475 if (id < 0) 2476 return B_BAD_THREAD_ID; 2477 2478 // get the thread, queue our death entry, and fetch the semaphore we have to 2479 // wait on 2480 sem_id exitSem = B_BAD_THREAD_ID; 2481 struct thread_death_entry death; 2482 2483 Thread* thread = Thread::GetAndLock(id); 2484 if (thread != NULL) { 2485 // remember the semaphore we have to wait on and place our death entry 2486 exitSem = thread->exit.sem; 2487 if (exitSem >= 0) 2488 list_add_link_to_head(&thread->exit.waiters, &death); 2489 2490 thread->UnlockAndReleaseReference(); 2491 2492 if (exitSem < 0) 2493 return B_BAD_THREAD_ID; 2494 } else { 2495 // we couldn't find this thread -- maybe it's already gone, and we'll 2496 // find its death entry in our team 2497 Team* team = thread_get_current_thread()->team; 2498 TeamLocker teamLocker(team); 2499 2500 // check the child death entries first (i.e. main threads of child 2501 // teams) 2502 bool deleteEntry; 2503 job_control_entry* freeDeath 2504 = team_get_death_entry(team, id, &deleteEntry); 2505 if (freeDeath != NULL) { 2506 death.status = freeDeath->status; 2507 if (deleteEntry) 2508 delete freeDeath; 2509 } else { 2510 // check the thread death entries of the team (non-main threads) 2511 thread_death_entry* threadDeathEntry = NULL; 2512 while ((threadDeathEntry = (thread_death_entry*)list_get_next_item( 2513 &team->dead_threads, threadDeathEntry)) != NULL) { 2514 if (threadDeathEntry->thread == id) { 2515 list_remove_item(&team->dead_threads, threadDeathEntry); 2516 team->dead_threads_count--; 2517 death.status = threadDeathEntry->status; 2518 free(threadDeathEntry); 2519 break; 2520 } 2521 } 2522 2523 if (threadDeathEntry == NULL) 2524 return B_BAD_THREAD_ID; 2525 } 2526 2527 // we found the thread's death entry in our team 2528 if (_returnCode) 2529 *_returnCode = death.status; 2530 2531 return B_OK; 2532 } 2533 2534 // we need to wait for the death of the thread 2535 2536 resume_thread(id); 2537 // make sure we don't wait forever on a suspended thread 2538 2539 status_t status = acquire_sem_etc(exitSem, 1, flags, timeout); 2540 2541 if (status == B_OK) { 2542 // this should never happen as the thread deletes the semaphore on exit 2543 panic("could acquire exit_sem for thread %" B_PRId32 "\n", id); 2544 } else if (status == B_BAD_SEM_ID) { 2545 // this is the way the thread normally exits 2546 status = B_OK; 2547 } else { 2548 // We were probably interrupted or the timeout occurred; we need to 2549 // remove our death entry now. 2550 thread = Thread::GetAndLock(id); 2551 if (thread != NULL) { 2552 list_remove_link(&death); 2553 thread->UnlockAndReleaseReference(); 2554 } else { 2555 // The thread is already gone, so we need to wait uninterruptibly 2556 // for its exit semaphore to make sure our death entry stays valid. 2557 // It won't take long, since the thread is apparently already in the 2558 // middle of the cleanup. 2559 acquire_sem(exitSem); 2560 status = B_OK; 2561 } 2562 } 2563 2564 if (status == B_OK && _returnCode != NULL) 2565 *_returnCode = death.status; 2566 2567 return status; 2568 } 2569 2570 2571 status_t 2572 select_thread(int32 id, struct select_info* info, bool kernel) 2573 { 2574 // get and lock the thread 2575 Thread* thread = Thread::GetAndLock(id); 2576 if (thread == NULL) 2577 return B_BAD_THREAD_ID; 2578 BReference<Thread> threadReference(thread, true); 2579 ThreadLocker threadLocker(thread, true); 2580 2581 // We support only B_EVENT_INVALID at the moment. 2582 info->selected_events &= B_EVENT_INVALID; 2583 2584 // add info to list 2585 if (info->selected_events != 0) { 2586 info->next = thread->select_infos; 2587 thread->select_infos = info; 2588 2589 // we need a sync reference 2590 atomic_add(&info->sync->ref_count, 1); 2591 } 2592 2593 return B_OK; 2594 } 2595 2596 2597 status_t 2598 deselect_thread(int32 id, struct select_info* info, bool kernel) 2599 { 2600 // get and lock the thread 2601 Thread* thread = Thread::GetAndLock(id); 2602 if (thread == NULL) 2603 return B_BAD_THREAD_ID; 2604 BReference<Thread> threadReference(thread, true); 2605 ThreadLocker threadLocker(thread, true); 2606 2607 // remove info from list 2608 select_info** infoLocation = &thread->select_infos; 2609 while (*infoLocation != NULL && *infoLocation != info) 2610 infoLocation = &(*infoLocation)->next; 2611 2612 if (*infoLocation != info) 2613 return B_OK; 2614 2615 *infoLocation = info->next; 2616 2617 threadLocker.Unlock(); 2618 2619 // surrender sync reference 2620 put_select_sync(info->sync); 2621 2622 return B_OK; 2623 } 2624 2625 2626 int32 2627 thread_max_threads(void) 2628 { 2629 return sMaxThreads; 2630 } 2631 2632 2633 int32 2634 thread_used_threads(void) 2635 { 2636 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 2637 return sUsedThreads; 2638 } 2639 2640 2641 /*! Returns a user-readable string for a thread state. 2642 Only for use in the kernel debugger. 2643 */ 2644 const char* 2645 thread_state_to_text(Thread* thread, int32 state) 2646 { 2647 return state_to_text(thread, state); 2648 } 2649 2650 2651 int32 2652 thread_get_io_priority(thread_id id) 2653 { 2654 Thread* thread = Thread::GetAndLock(id); 2655 if (thread == NULL) 2656 return B_BAD_THREAD_ID; 2657 BReference<Thread> threadReference(thread, true); 2658 ThreadLocker threadLocker(thread, true); 2659 2660 int32 priority = thread->io_priority; 2661 if (priority < 0) { 2662 // negative I/O priority means using the (CPU) priority 2663 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 2664 priority = thread->priority; 2665 } 2666 2667 return priority; 2668 } 2669 2670 2671 void 2672 thread_set_io_priority(int32 priority) 2673 { 2674 Thread* thread = thread_get_current_thread(); 2675 ThreadLocker threadLocker(thread); 2676 2677 thread->io_priority = priority; 2678 } 2679 2680 2681 status_t 2682 thread_init(kernel_args *args) 2683 { 2684 TRACE(("thread_init: entry\n")); 2685 2686 // create the thread hash table 2687 new(&sThreadHash) ThreadHashTable(); 2688 if (sThreadHash.Init(128) != B_OK) 2689 panic("thread_init(): failed to init thread hash table!"); 2690 2691 // create the thread structure object cache 2692 sThreadCache = create_object_cache("threads", sizeof(Thread), 16, NULL, 2693 NULL, NULL); 2694 // Note: The x86 port requires 16 byte alignment of thread structures. 2695 if (sThreadCache == NULL) 2696 panic("thread_init(): failed to allocate thread object cache!"); 2697 2698 if (arch_thread_init(args) < B_OK) 2699 panic("arch_thread_init() failed!\n"); 2700 2701 // skip all thread IDs including B_SYSTEM_TEAM, which is reserved 2702 sNextThreadID = B_SYSTEM_TEAM + 1; 2703 2704 // create an idle thread for each cpu 2705 for (uint32 i = 0; i < args->num_cpus; i++) { 2706 Thread *thread; 2707 area_info info; 2708 char name[64]; 2709 2710 sprintf(name, "idle thread %" B_PRIu32, i + 1); 2711 thread = new(&sIdleThreads[i]) Thread(name, 2712 i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]); 2713 if (thread == NULL || thread->Init(true) != B_OK) { 2714 panic("error creating idle thread struct\n"); 2715 return B_NO_MEMORY; 2716 } 2717 2718 gCPU[i].running_thread = thread; 2719 2720 thread->team = team_get_kernel_team(); 2721 thread->priority = thread->next_priority = B_IDLE_PRIORITY; 2722 thread->state = B_THREAD_RUNNING; 2723 thread->next_state = B_THREAD_READY; 2724 sprintf(name, "idle thread %" B_PRIu32 " kstack", i + 1); 2725 thread->kernel_stack_area = find_area(name); 2726 2727 if (get_area_info(thread->kernel_stack_area, &info) != B_OK) 2728 panic("error finding idle kstack area\n"); 2729 2730 thread->kernel_stack_base = (addr_t)info.address; 2731 thread->kernel_stack_top = thread->kernel_stack_base + info.size; 2732 2733 thread->visible = true; 2734 insert_thread_into_team(thread->team, thread); 2735 } 2736 sUsedThreads = args->num_cpus; 2737 2738 // init the notification service 2739 new(&sNotificationService) ThreadNotificationService(); 2740 2741 sNotificationService.Register(); 2742 2743 // start the undertaker thread 2744 new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>(); 2745 sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries"); 2746 2747 thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker", 2748 B_DISPLAY_PRIORITY, NULL); 2749 if (undertakerThread < 0) 2750 panic("Failed to create undertaker thread!"); 2751 resume_thread(undertakerThread); 2752 2753 // set up some debugger commands 2754 add_debugger_command_etc("threads", &dump_thread_list, "List all threads", 2755 "[ <team> ]\n" 2756 "Prints a list of all existing threads, or, if a team ID is given,\n" 2757 "all threads of the specified team.\n" 2758 " <team> - The ID of the team whose threads shall be listed.\n", 0); 2759 add_debugger_command_etc("ready", &dump_thread_list, 2760 "List all ready threads", 2761 "\n" 2762 "Prints a list of all threads in ready state.\n", 0); 2763 add_debugger_command_etc("running", &dump_thread_list, 2764 "List all running threads", 2765 "\n" 2766 "Prints a list of all threads in running state.\n", 0); 2767 add_debugger_command_etc("waiting", &dump_thread_list, 2768 "List all waiting threads (optionally for a specific semaphore)", 2769 "[ <sem> ]\n" 2770 "Prints a list of all threads in waiting state. If a semaphore is\n" 2771 "specified, only the threads waiting on that semaphore are listed.\n" 2772 " <sem> - ID of the semaphore.\n", 0); 2773 add_debugger_command_etc("realtime", &dump_thread_list, 2774 "List all realtime threads", 2775 "\n" 2776 "Prints a list of all threads with realtime priority.\n", 0); 2777 add_debugger_command_etc("thread", &dump_thread_info, 2778 "Dump info about a particular thread", 2779 "[ -s ] ( <id> | <address> | <name> )*\n" 2780 "Prints information about the specified thread. If no argument is\n" 2781 "given the current thread is selected.\n" 2782 " -s - Print info in compact table form (like \"threads\").\n" 2783 " <id> - The ID of the thread.\n" 2784 " <address> - The address of the thread structure.\n" 2785 " <name> - The thread's name.\n", 0); 2786 add_debugger_command_etc("calling", &dump_thread_list, 2787 "Show all threads that have a specific address in their call chain", 2788 "{ <symbol-pattern> | <start> <end> }\n", 0); 2789 add_debugger_command_etc("unreal", &make_thread_unreal, 2790 "Set realtime priority threads to normal priority", 2791 "[ <id> ]\n" 2792 "Sets the priority of all realtime threads or, if given, the one\n" 2793 "with the specified ID to \"normal\" priority.\n" 2794 " <id> - The ID of the thread.\n", 0); 2795 add_debugger_command_etc("suspend", &make_thread_suspended, 2796 "Suspend a thread", 2797 "[ <id> ]\n" 2798 "Suspends the thread with the given ID. If no ID argument is given\n" 2799 "the current thread is selected.\n" 2800 " <id> - The ID of the thread.\n", 0); 2801 add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread", 2802 "<id>\n" 2803 "Resumes the specified thread, if it is currently suspended.\n" 2804 " <id> - The ID of the thread.\n", 0); 2805 add_debugger_command_etc("drop", &drop_into_debugger, 2806 "Drop a thread into the userland debugger", 2807 "<id>\n" 2808 "Drops the specified (userland) thread into the userland debugger\n" 2809 "after leaving the kernel debugger.\n" 2810 " <id> - The ID of the thread.\n", 0); 2811 add_debugger_command_etc("priority", &set_thread_prio, 2812 "Set a thread's priority", 2813 "<priority> [ <id> ]\n" 2814 "Sets the priority of the thread with the specified ID to the given\n" 2815 "priority. If no thread ID is given, the current thread is selected.\n" 2816 " <priority> - The thread's new priority (0 - 120)\n" 2817 " <id> - The ID of the thread.\n", 0); 2818 2819 return B_OK; 2820 } 2821 2822 2823 status_t 2824 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum) 2825 { 2826 // set up the cpu pointer in the not yet initialized per-cpu idle thread 2827 // so that get_current_cpu and friends will work, which is crucial for 2828 // a lot of low level routines 2829 sIdleThreads[cpuNum].cpu = &gCPU[cpuNum]; 2830 arch_thread_set_current_thread(&sIdleThreads[cpuNum]); 2831 return B_OK; 2832 } 2833 2834 2835 // #pragma mark - thread blocking API 2836 2837 2838 static status_t 2839 thread_block_timeout(timer* timer) 2840 { 2841 // The timer has been installed with B_TIMER_ACQUIRE_SCHEDULER_LOCK, so 2842 // we're holding the scheduler lock already. This makes things comfortably 2843 // easy. 2844 2845 Thread* thread = (Thread*)timer->user_data; 2846 thread_unblock_locked(thread, B_TIMED_OUT); 2847 2848 return B_HANDLED_INTERRUPT; 2849 } 2850 2851 2852 /*! Blocks the current thread. 2853 2854 The function acquires the scheduler lock and calls thread_block_locked(). 2855 See there for more information. 2856 */ 2857 status_t 2858 thread_block() 2859 { 2860 InterruptsSpinLocker _(gSchedulerLock); 2861 return thread_block_locked(thread_get_current_thread()); 2862 } 2863 2864 2865 /*! Blocks the current thread with a timeout. 2866 2867 Acquires the scheduler lock and calls thread_block_with_timeout_locked(). 2868 See there for more information. 2869 */ 2870 status_t 2871 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout) 2872 { 2873 InterruptsSpinLocker _(gSchedulerLock); 2874 return thread_block_with_timeout_locked(timeoutFlags, timeout); 2875 } 2876 2877 2878 /*! Blocks the current thread with a timeout. 2879 2880 The thread is blocked until someone else unblock it or the specified timeout 2881 occurs. Must be called after a call to thread_prepare_to_block(). If the 2882 thread has already been unblocked after the previous call to 2883 thread_prepare_to_block(), this function will return immediately. See 2884 thread_prepare_to_block() for more details. 2885 2886 The caller must hold the scheduler lock. 2887 2888 \param thread The current thread. 2889 \param timeoutFlags The standard timeout flags: 2890 - \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait. 2891 - \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when 2892 the timeout shall occur. 2893 - \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT 2894 is specified, too. Specifies that \a timeout is a real time, not a 2895 system time. 2896 If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are 2897 specified, an infinite timeout is implied and the function behaves like 2898 thread_block_locked(). 2899 \return The error code passed to the unblocking function. thread_interrupt() 2900 uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is 2901 returned. By convention \c B_OK means that the wait was successful while 2902 another error code indicates a failure (what that means depends on the 2903 client code). 2904 */ 2905 status_t 2906 thread_block_with_timeout_locked(uint32 timeoutFlags, bigtime_t timeout) 2907 { 2908 Thread* thread = thread_get_current_thread(); 2909 2910 if (thread->wait.status != 1) 2911 return thread->wait.status; 2912 2913 bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) 2914 && timeout != B_INFINITE_TIMEOUT; 2915 2916 if (useTimer) { 2917 // Timer flags: absolute/relative + "acquire thread lock". The latter 2918 // avoids nasty race conditions and deadlock problems that could 2919 // otherwise occur between our cancel_timer() and a concurrently 2920 // executing thread_block_timeout(). 2921 uint32 timerFlags; 2922 if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) { 2923 timerFlags = B_ONE_SHOT_RELATIVE_TIMER; 2924 } else { 2925 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER; 2926 if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0) 2927 timerFlags |= B_TIMER_REAL_TIME_BASE; 2928 } 2929 timerFlags |= B_TIMER_ACQUIRE_SCHEDULER_LOCK; 2930 2931 // install the timer 2932 thread->wait.unblock_timer.user_data = thread; 2933 add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout, 2934 timerFlags); 2935 } 2936 2937 // block 2938 status_t error = thread_block_locked(thread); 2939 2940 // cancel timer, if it didn't fire 2941 if (error != B_TIMED_OUT && useTimer) 2942 cancel_timer(&thread->wait.unblock_timer); 2943 2944 return error; 2945 } 2946 2947 2948 /*! Unblocks a userland-blocked thread. 2949 The caller must not hold any locks. 2950 */ 2951 static status_t 2952 user_unblock_thread(thread_id threadID, status_t status) 2953 { 2954 // get the thread 2955 Thread* thread = Thread::GetAndLock(threadID); 2956 if (thread == NULL) 2957 return B_BAD_THREAD_ID; 2958 BReference<Thread> threadReference(thread, true); 2959 ThreadLocker threadLocker(thread, true); 2960 2961 if (thread->user_thread == NULL) 2962 return B_NOT_ALLOWED; 2963 2964 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 2965 2966 if (thread->user_thread->wait_status > 0) { 2967 thread->user_thread->wait_status = status; 2968 thread_unblock_locked(thread, status); 2969 } 2970 2971 return B_OK; 2972 } 2973 2974 2975 // #pragma mark - public kernel API 2976 2977 2978 void 2979 exit_thread(status_t returnValue) 2980 { 2981 Thread *thread = thread_get_current_thread(); 2982 Team* team = thread->team; 2983 2984 thread->exit.status = returnValue; 2985 2986 // if called from a kernel thread, we don't deliver the signal, 2987 // we just exit directly to keep the user space behaviour of 2988 // this function 2989 if (team != team_get_kernel_team()) { 2990 // If this is its main thread, set the team's exit status. 2991 if (thread == team->main_thread) { 2992 TeamLocker teamLocker(team); 2993 2994 if (!team->exit.initialized) { 2995 team->exit.reason = CLD_EXITED; 2996 team->exit.signal = 0; 2997 team->exit.signaling_user = 0; 2998 team->exit.status = returnValue; 2999 team->exit.initialized = true; 3000 } 3001 3002 teamLocker.Unlock(); 3003 } 3004 3005 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id); 3006 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE); 3007 } else 3008 thread_exit(); 3009 } 3010 3011 3012 status_t 3013 kill_thread(thread_id id) 3014 { 3015 if (id <= 0) 3016 return B_BAD_VALUE; 3017 3018 Thread* currentThread = thread_get_current_thread(); 3019 3020 Signal signal(SIGKILLTHR, SI_USER, B_OK, currentThread->team->id); 3021 return send_signal_to_thread_id(id, signal, 0); 3022 } 3023 3024 3025 status_t 3026 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize) 3027 { 3028 return send_data_etc(thread, code, buffer, bufferSize, 0); 3029 } 3030 3031 3032 int32 3033 receive_data(thread_id *sender, void *buffer, size_t bufferSize) 3034 { 3035 return receive_data_etc(sender, buffer, bufferSize, 0); 3036 } 3037 3038 3039 bool 3040 has_data(thread_id thread) 3041 { 3042 // TODO: The thread argument is ignored. 3043 int32 count; 3044 3045 if (get_sem_count(thread_get_current_thread()->msg.read_sem, 3046 &count) != B_OK) 3047 return false; 3048 3049 return count == 0 ? false : true; 3050 } 3051 3052 3053 status_t 3054 _get_thread_info(thread_id id, thread_info *info, size_t size) 3055 { 3056 if (info == NULL || size != sizeof(thread_info) || id < B_OK) 3057 return B_BAD_VALUE; 3058 3059 // get the thread 3060 Thread* thread = Thread::GetAndLock(id); 3061 if (thread == NULL) 3062 return B_BAD_THREAD_ID; 3063 BReference<Thread> threadReference(thread, true); 3064 ThreadLocker threadLocker(thread, true); 3065 3066 // fill the info -- also requires the scheduler lock to be held 3067 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3068 3069 fill_thread_info(thread, info, size); 3070 3071 return B_OK; 3072 } 3073 3074 3075 status_t 3076 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info, 3077 size_t size) 3078 { 3079 if (info == NULL || size != sizeof(thread_info) || teamID < 0) 3080 return B_BAD_VALUE; 3081 3082 int32 lastID = *_cookie; 3083 3084 // get the team 3085 Team* team = Team::GetAndLock(teamID); 3086 if (team == NULL) 3087 return B_BAD_VALUE; 3088 BReference<Team> teamReference(team, true); 3089 TeamLocker teamLocker(team, true); 3090 3091 Thread* thread = NULL; 3092 3093 if (lastID == 0) { 3094 // We start with the main thread 3095 thread = team->main_thread; 3096 } else { 3097 // Find the one thread with an ID greater than ours (as long as the IDs 3098 // don't wrap they are always sorted from highest to lowest). 3099 // TODO: That is broken not only when the IDs wrap, but also for the 3100 // kernel team, to which threads are added when they are dying. 3101 for (Thread* next = team->thread_list; next != NULL; 3102 next = next->team_next) { 3103 if (next->id <= lastID) 3104 break; 3105 3106 thread = next; 3107 } 3108 } 3109 3110 if (thread == NULL) 3111 return B_BAD_VALUE; 3112 3113 lastID = thread->id; 3114 *_cookie = lastID; 3115 3116 ThreadLocker threadLocker(thread); 3117 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3118 3119 fill_thread_info(thread, info, size); 3120 3121 return B_OK; 3122 } 3123 3124 3125 thread_id 3126 find_thread(const char* name) 3127 { 3128 if (name == NULL) 3129 return thread_get_current_thread_id(); 3130 3131 InterruptsSpinLocker threadHashLocker(sThreadHashLock); 3132 3133 // TODO: Scanning the whole hash with the thread hash lock held isn't 3134 // exactly cheap -- although this function is probably used very rarely. 3135 3136 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator(); 3137 Thread* thread = it.Next();) { 3138 if (!thread->visible) 3139 continue; 3140 3141 if (strcmp(thread->name, name) == 0) 3142 return thread->id; 3143 } 3144 3145 return B_NAME_NOT_FOUND; 3146 } 3147 3148 3149 status_t 3150 rename_thread(thread_id id, const char* name) 3151 { 3152 if (name == NULL) 3153 return B_BAD_VALUE; 3154 3155 // get the thread 3156 Thread* thread = Thread::GetAndLock(id); 3157 if (thread == NULL) 3158 return B_BAD_THREAD_ID; 3159 BReference<Thread> threadReference(thread, true); 3160 ThreadLocker threadLocker(thread, true); 3161 3162 // check whether the operation is allowed 3163 if (thread->team != thread_get_current_thread()->team) 3164 return B_NOT_ALLOWED; 3165 3166 strlcpy(thread->name, name, B_OS_NAME_LENGTH); 3167 3168 team_id teamID = thread->team->id; 3169 3170 threadLocker.Unlock(); 3171 3172 // notify listeners 3173 sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id); 3174 // don't pass the thread structure, as it's unsafe, if it isn't ours 3175 3176 return B_OK; 3177 } 3178 3179 3180 status_t 3181 set_thread_priority(thread_id id, int32 priority) 3182 { 3183 int32 oldPriority; 3184 3185 // make sure the passed in priority is within bounds 3186 if (priority > THREAD_MAX_SET_PRIORITY) 3187 priority = THREAD_MAX_SET_PRIORITY; 3188 if (priority < THREAD_MIN_SET_PRIORITY) 3189 priority = THREAD_MIN_SET_PRIORITY; 3190 3191 // get the thread 3192 Thread* thread = Thread::GetAndLock(id); 3193 if (thread == NULL) 3194 return B_BAD_THREAD_ID; 3195 BReference<Thread> threadReference(thread, true); 3196 ThreadLocker threadLocker(thread, true); 3197 3198 // check whether the change is allowed 3199 if (thread_is_idle_thread(thread)) 3200 return B_NOT_ALLOWED; 3201 3202 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3203 3204 if (thread == thread_get_current_thread()) { 3205 // It's ourself, so we know we aren't in the run queue, and we can 3206 // manipulate our structure directly. 3207 oldPriority = thread->priority; 3208 thread->priority = thread->next_priority = priority; 3209 } else { 3210 oldPriority = thread->priority; 3211 scheduler_set_thread_priority(thread, priority); 3212 } 3213 3214 return oldPriority; 3215 } 3216 3217 3218 status_t 3219 snooze_etc(bigtime_t timeout, int timebase, uint32 flags) 3220 { 3221 return common_snooze_etc(timeout, timebase, flags, NULL); 3222 } 3223 3224 3225 /*! snooze() for internal kernel use only; doesn't interrupt on signals. */ 3226 status_t 3227 snooze(bigtime_t timeout) 3228 { 3229 return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT); 3230 } 3231 3232 3233 /*! snooze_until() for internal kernel use only; doesn't interrupt on 3234 signals. 3235 */ 3236 status_t 3237 snooze_until(bigtime_t timeout, int timebase) 3238 { 3239 return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT); 3240 } 3241 3242 3243 status_t 3244 wait_for_thread(thread_id thread, status_t *_returnCode) 3245 { 3246 return wait_for_thread_etc(thread, 0, 0, _returnCode); 3247 } 3248 3249 3250 status_t 3251 suspend_thread(thread_id id) 3252 { 3253 if (id <= 0) 3254 return B_BAD_VALUE; 3255 3256 Thread* currentThread = thread_get_current_thread(); 3257 3258 Signal signal(SIGSTOP, SI_USER, B_OK, currentThread->team->id); 3259 return send_signal_to_thread_id(id, signal, 0); 3260 } 3261 3262 3263 status_t 3264 resume_thread(thread_id id) 3265 { 3266 if (id <= 0) 3267 return B_BAD_VALUE; 3268 3269 Thread* currentThread = thread_get_current_thread(); 3270 3271 // Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains 3272 // compatibility to BeOS which documents the combination of suspend_thread() 3273 // and resume_thread() to interrupt threads waiting on semaphores. 3274 Signal signal(SIGNAL_CONTINUE_THREAD, SI_USER, B_OK, 3275 currentThread->team->id); 3276 return send_signal_to_thread_id(id, signal, 0); 3277 } 3278 3279 3280 thread_id 3281 spawn_kernel_thread(thread_func function, const char *name, int32 priority, 3282 void *arg) 3283 { 3284 return thread_create_thread( 3285 ThreadCreationAttributes(function, name, priority, arg), 3286 true); 3287 } 3288 3289 3290 int 3291 getrlimit(int resource, struct rlimit * rlp) 3292 { 3293 status_t error = common_getrlimit(resource, rlp); 3294 if (error != B_OK) { 3295 errno = error; 3296 return -1; 3297 } 3298 3299 return 0; 3300 } 3301 3302 3303 int 3304 setrlimit(int resource, const struct rlimit * rlp) 3305 { 3306 status_t error = common_setrlimit(resource, rlp); 3307 if (error != B_OK) { 3308 errno = error; 3309 return -1; 3310 } 3311 3312 return 0; 3313 } 3314 3315 3316 // #pragma mark - syscalls 3317 3318 3319 void 3320 _user_exit_thread(status_t returnValue) 3321 { 3322 exit_thread(returnValue); 3323 } 3324 3325 3326 status_t 3327 _user_kill_thread(thread_id thread) 3328 { 3329 // TODO: Don't allow kernel threads to be killed! 3330 return kill_thread(thread); 3331 } 3332 3333 3334 status_t 3335 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int)) 3336 { 3337 // check the cancel function 3338 if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction)) 3339 return B_BAD_VALUE; 3340 3341 // get and lock the thread 3342 Thread* thread = Thread::GetAndLock(threadID); 3343 if (thread == NULL) 3344 return B_BAD_THREAD_ID; 3345 BReference<Thread> threadReference(thread, true); 3346 ThreadLocker threadLocker(thread, true); 3347 3348 // only threads of the same team can be canceled 3349 if (thread->team != thread_get_current_thread()->team) 3350 return B_NOT_ALLOWED; 3351 3352 // set the cancel function 3353 thread->cancel_function = cancelFunction; 3354 3355 // send the cancellation signal to the thread 3356 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3357 return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0); 3358 } 3359 3360 3361 status_t 3362 _user_resume_thread(thread_id thread) 3363 { 3364 // TODO: Don't allow kernel threads to be resumed! 3365 return resume_thread(thread); 3366 } 3367 3368 3369 status_t 3370 _user_suspend_thread(thread_id thread) 3371 { 3372 // TODO: Don't allow kernel threads to be suspended! 3373 return suspend_thread(thread); 3374 } 3375 3376 3377 status_t 3378 _user_rename_thread(thread_id thread, const char *userName) 3379 { 3380 char name[B_OS_NAME_LENGTH]; 3381 3382 if (!IS_USER_ADDRESS(userName) 3383 || userName == NULL 3384 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK) 3385 return B_BAD_ADDRESS; 3386 3387 // TODO: Don't allow kernel threads to be renamed! 3388 return rename_thread(thread, name); 3389 } 3390 3391 3392 int32 3393 _user_set_thread_priority(thread_id thread, int32 newPriority) 3394 { 3395 // TODO: Don't allow setting priority of kernel threads! 3396 return set_thread_priority(thread, newPriority); 3397 } 3398 3399 3400 thread_id 3401 _user_spawn_thread(thread_creation_attributes* userAttributes) 3402 { 3403 // copy the userland structure to the kernel 3404 char nameBuffer[B_OS_NAME_LENGTH]; 3405 ThreadCreationAttributes attributes; 3406 status_t error = attributes.InitFromUserAttributes(userAttributes, 3407 nameBuffer); 3408 if (error != B_OK) 3409 return error; 3410 3411 // create the thread 3412 thread_id threadID = thread_create_thread(attributes, false); 3413 3414 if (threadID >= 0) 3415 user_debug_thread_created(threadID); 3416 3417 return threadID; 3418 } 3419 3420 3421 status_t 3422 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags, 3423 bigtime_t* userRemainingTime) 3424 { 3425 // We need to store more syscall restart parameters than usual and need a 3426 // somewhat different handling. Hence we can't use 3427 // syscall_restart_handle_timeout_pre() but do the job ourselves. 3428 struct restart_parameters { 3429 bigtime_t timeout; 3430 clockid_t timebase; 3431 uint32 flags; 3432 }; 3433 3434 Thread* thread = thread_get_current_thread(); 3435 3436 if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) { 3437 // The syscall was restarted. Fetch the parameters from the stored 3438 // restart parameters. 3439 restart_parameters* restartParameters 3440 = (restart_parameters*)thread->syscall_restart.parameters; 3441 timeout = restartParameters->timeout; 3442 timebase = restartParameters->timebase; 3443 flags = restartParameters->flags; 3444 } else { 3445 // convert relative timeouts to absolute ones 3446 if ((flags & B_RELATIVE_TIMEOUT) != 0) { 3447 // not restarted yet and the flags indicate a relative timeout 3448 3449 // Make sure we use the system time base, so real-time clock changes 3450 // won't affect our wait. 3451 flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE; 3452 if (timebase == CLOCK_REALTIME) 3453 timebase = CLOCK_MONOTONIC; 3454 3455 // get the current time and make the timeout absolute 3456 bigtime_t now; 3457 status_t error = user_timer_get_clock(timebase, now); 3458 if (error != B_OK) 3459 return error; 3460 3461 timeout += now; 3462 3463 // deal with overflow 3464 if (timeout < 0) 3465 timeout = B_INFINITE_TIMEOUT; 3466 3467 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT; 3468 } else 3469 flags |= B_ABSOLUTE_TIMEOUT; 3470 } 3471 3472 // snooze 3473 bigtime_t remainingTime; 3474 status_t error = common_snooze_etc(timeout, timebase, 3475 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION, 3476 userRemainingTime != NULL ? &remainingTime : NULL); 3477 3478 // If interrupted, copy the remaining time back to userland and prepare the 3479 // syscall restart. 3480 if (error == B_INTERRUPTED) { 3481 if (userRemainingTime != NULL 3482 && (!IS_USER_ADDRESS(userRemainingTime) 3483 || user_memcpy(userRemainingTime, &remainingTime, 3484 sizeof(remainingTime)) != B_OK)) { 3485 return B_BAD_ADDRESS; 3486 } 3487 3488 // store the normalized values in the restart parameters 3489 restart_parameters* restartParameters 3490 = (restart_parameters*)thread->syscall_restart.parameters; 3491 restartParameters->timeout = timeout; 3492 restartParameters->timebase = timebase; 3493 restartParameters->flags = flags; 3494 3495 // restart the syscall, if possible 3496 atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL); 3497 } 3498 3499 return error; 3500 } 3501 3502 3503 void 3504 _user_thread_yield(void) 3505 { 3506 thread_yield(true); 3507 } 3508 3509 3510 status_t 3511 _user_get_thread_info(thread_id id, thread_info *userInfo) 3512 { 3513 thread_info info; 3514 status_t status; 3515 3516 if (!IS_USER_ADDRESS(userInfo)) 3517 return B_BAD_ADDRESS; 3518 3519 status = _get_thread_info(id, &info, sizeof(thread_info)); 3520 3521 if (status >= B_OK 3522 && user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3523 return B_BAD_ADDRESS; 3524 3525 return status; 3526 } 3527 3528 3529 status_t 3530 _user_get_next_thread_info(team_id team, int32 *userCookie, 3531 thread_info *userInfo) 3532 { 3533 status_t status; 3534 thread_info info; 3535 int32 cookie; 3536 3537 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo) 3538 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK) 3539 return B_BAD_ADDRESS; 3540 3541 status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info)); 3542 if (status < B_OK) 3543 return status; 3544 3545 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK 3546 || user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK) 3547 return B_BAD_ADDRESS; 3548 3549 return status; 3550 } 3551 3552 3553 thread_id 3554 _user_find_thread(const char *userName) 3555 { 3556 char name[B_OS_NAME_LENGTH]; 3557 3558 if (userName == NULL) 3559 return find_thread(NULL); 3560 3561 if (!IS_USER_ADDRESS(userName) 3562 || user_strlcpy(name, userName, sizeof(name)) < B_OK) 3563 return B_BAD_ADDRESS; 3564 3565 return find_thread(name); 3566 } 3567 3568 3569 status_t 3570 _user_wait_for_thread(thread_id id, status_t *userReturnCode) 3571 { 3572 status_t returnCode; 3573 status_t status; 3574 3575 if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode)) 3576 return B_BAD_ADDRESS; 3577 3578 status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode); 3579 3580 if (status == B_OK && userReturnCode != NULL 3581 && user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) { 3582 return B_BAD_ADDRESS; 3583 } 3584 3585 return syscall_restart_handle_post(status); 3586 } 3587 3588 3589 bool 3590 _user_has_data(thread_id thread) 3591 { 3592 return has_data(thread); 3593 } 3594 3595 3596 status_t 3597 _user_send_data(thread_id thread, int32 code, const void *buffer, 3598 size_t bufferSize) 3599 { 3600 if (!IS_USER_ADDRESS(buffer)) 3601 return B_BAD_ADDRESS; 3602 3603 return send_data_etc(thread, code, buffer, bufferSize, 3604 B_KILL_CAN_INTERRUPT); 3605 // supports userland buffers 3606 } 3607 3608 3609 status_t 3610 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize) 3611 { 3612 thread_id sender; 3613 status_t code; 3614 3615 if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL) 3616 || !IS_USER_ADDRESS(buffer)) 3617 return B_BAD_ADDRESS; 3618 3619 code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT); 3620 // supports userland buffers 3621 3622 if (_userSender != NULL) 3623 if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK) 3624 return B_BAD_ADDRESS; 3625 3626 return code; 3627 } 3628 3629 3630 status_t 3631 _user_block_thread(uint32 flags, bigtime_t timeout) 3632 { 3633 syscall_restart_handle_timeout_pre(flags, timeout); 3634 flags |= B_CAN_INTERRUPT; 3635 3636 Thread* thread = thread_get_current_thread(); 3637 ThreadLocker threadLocker(thread); 3638 3639 // check, if already done 3640 if (thread->user_thread->wait_status <= 0) 3641 return thread->user_thread->wait_status; 3642 3643 // nope, so wait 3644 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_OTHER, "user"); 3645 3646 threadLocker.Unlock(); 3647 InterruptsSpinLocker schedulerLocker(gSchedulerLock); 3648 3649 status_t status = thread_block_with_timeout_locked(flags, timeout); 3650 3651 schedulerLocker.Unlock(); 3652 threadLocker.Lock(); 3653 3654 // Interruptions or timeouts can race with other threads unblocking us. 3655 // Favor a wake-up by another thread, i.e. if someone changed the wait 3656 // status, use that. 3657 status_t oldStatus = thread->user_thread->wait_status; 3658 if (oldStatus > 0) 3659 thread->user_thread->wait_status = status; 3660 else 3661 status = oldStatus; 3662 3663 threadLocker.Unlock(); 3664 3665 return syscall_restart_handle_timeout_post(status, timeout); 3666 } 3667 3668 3669 status_t 3670 _user_unblock_thread(thread_id threadID, status_t status) 3671 { 3672 status_t error = user_unblock_thread(threadID, status); 3673 3674 if (error == B_OK) 3675 scheduler_reschedule_if_necessary(); 3676 3677 return error; 3678 } 3679 3680 3681 status_t 3682 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status) 3683 { 3684 enum { 3685 MAX_USER_THREADS_TO_UNBLOCK = 128 3686 }; 3687 3688 if (userThreads == NULL || !IS_USER_ADDRESS(userThreads)) 3689 return B_BAD_ADDRESS; 3690 if (count > MAX_USER_THREADS_TO_UNBLOCK) 3691 return B_BAD_VALUE; 3692 3693 thread_id threads[MAX_USER_THREADS_TO_UNBLOCK]; 3694 if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK) 3695 return B_BAD_ADDRESS; 3696 3697 for (uint32 i = 0; i < count; i++) 3698 user_unblock_thread(threads[i], status); 3699 3700 scheduler_reschedule_if_necessary(); 3701 3702 return B_OK; 3703 } 3704 3705 3706 // TODO: the following two functions don't belong here 3707 3708 3709 int 3710 _user_getrlimit(int resource, struct rlimit *urlp) 3711 { 3712 struct rlimit rl; 3713 int ret; 3714 3715 if (urlp == NULL) 3716 return EINVAL; 3717 3718 if (!IS_USER_ADDRESS(urlp)) 3719 return B_BAD_ADDRESS; 3720 3721 ret = common_getrlimit(resource, &rl); 3722 3723 if (ret == 0) { 3724 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit)); 3725 if (ret < 0) 3726 return ret; 3727 3728 return 0; 3729 } 3730 3731 return ret; 3732 } 3733 3734 3735 int 3736 _user_setrlimit(int resource, const struct rlimit *userResourceLimit) 3737 { 3738 struct rlimit resourceLimit; 3739 3740 if (userResourceLimit == NULL) 3741 return EINVAL; 3742 3743 if (!IS_USER_ADDRESS(userResourceLimit) 3744 || user_memcpy(&resourceLimit, userResourceLimit, 3745 sizeof(struct rlimit)) < B_OK) 3746 return B_BAD_ADDRESS; 3747 3748 return common_setrlimit(resource, &resourceLimit); 3749 } 3750