1 /* 2 * Copyright 2013-2014, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2009, Rene Gollent, rene@gollent.com. 4 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 5 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 6 * Copyright 2002, Angelo Mottola, a.mottola@libero.it. 7 * Distributed under the terms of the MIT License. 8 * 9 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 10 * Distributed under the terms of the NewOS License. 11 */ 12 13 14 /*! The thread scheduler */ 15 16 17 #include <OS.h> 18 19 #include <AutoDeleter.h> 20 #include <cpu.h> 21 #include <debug.h> 22 #include <int.h> 23 #include <kernel.h> 24 #include <kscheduler.h> 25 #include <listeners.h> 26 #include <load_tracking.h> 27 #include <scheduler_defs.h> 28 #include <smp.h> 29 #include <timer.h> 30 #include <util/Random.h> 31 32 #include "scheduler_common.h" 33 #include "scheduler_cpu.h" 34 #include "scheduler_locking.h" 35 #include "scheduler_modes.h" 36 #include "scheduler_profiler.h" 37 #include "scheduler_thread.h" 38 #include "scheduler_tracing.h" 39 40 41 namespace Scheduler { 42 43 44 class ThreadEnqueuer : public ThreadProcessing { 45 public: 46 void operator()(ThreadData* thread); 47 }; 48 49 scheduler_mode gCurrentModeID; 50 scheduler_mode_operations* gCurrentMode; 51 52 bool gSingleCore; 53 bool gTrackCoreLoad; 54 bool gTrackCPULoad; 55 56 } // namespace Scheduler 57 58 using namespace Scheduler; 59 60 61 static bool sSchedulerEnabled; 62 63 SchedulerListenerList gSchedulerListeners; 64 spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER; 65 66 static scheduler_mode_operations* sSchedulerModes[] = { 67 &gSchedulerLowLatencyMode, 68 &gSchedulerPowerSavingMode, 69 }; 70 71 // Since CPU IDs used internally by the kernel bear no relation to the actual 72 // CPU topology the following arrays are used to efficiently get the core 73 // and the package that CPU in question belongs to. 74 static int32* sCPUToCore; 75 static int32* sCPUToPackage; 76 77 78 static void enqueue(Thread* thread, bool newOne); 79 80 81 void 82 ThreadEnqueuer::operator()(ThreadData* thread) 83 { 84 enqueue(thread->GetThread(), false); 85 } 86 87 88 void 89 scheduler_dump_thread_data(Thread* thread) 90 { 91 thread->scheduler_data->Dump(); 92 } 93 94 95 static void 96 enqueue(Thread* thread, bool newOne) 97 { 98 SCHEDULER_ENTER_FUNCTION(); 99 100 ThreadData* threadData = thread->scheduler_data; 101 102 int32 threadPriority = threadData->GetEffectivePriority(); 103 T(EnqueueThread(thread, threadPriority)); 104 105 CPUEntry* targetCPU = NULL; 106 CoreEntry* targetCore = NULL; 107 if (thread->pinned_to_cpu > 0) { 108 ASSERT(thread->previous_cpu != NULL); 109 ASSERT(threadData->Core() != NULL); 110 targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num]; 111 } else if (gSingleCore) { 112 targetCore = &gCoreEntries[0]; 113 } else if (threadData->Core() != NULL 114 && (!newOne || !threadData->HasCacheExpired())) { 115 targetCore = threadData->Rebalance(); 116 } 117 118 const bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU); 119 120 TRACE("enqueueing thread %ld with priority %ld on CPU %ld (core %ld)\n", 121 thread->id, threadPriority, targetCPU->ID(), targetCore->ID()); 122 123 bool wasRunQueueEmpty = false; 124 threadData->Enqueue(wasRunQueueEmpty); 125 126 // notify listeners 127 NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue, 128 thread); 129 130 int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU); 131 if (threadPriority > heapPriority 132 || (threadPriority == heapPriority && rescheduleNeeded) 133 || wasRunQueueEmpty) { 134 135 if (targetCPU->ID() == smp_get_current_cpu()) { 136 gCPU[targetCPU->ID()].invoke_scheduler = true; 137 } else { 138 smp_send_ici(targetCPU->ID(), SMP_MSG_RESCHEDULE, 0, 0, 0, 139 NULL, SMP_MSG_FLAG_ASYNC); 140 } 141 } 142 } 143 144 145 /*! Enqueues the thread into the run queue. 146 Note: thread lock must be held when entering this function 147 */ 148 void 149 scheduler_enqueue_in_run_queue(Thread *thread) 150 { 151 ASSERT(!are_interrupts_enabled()); 152 SCHEDULER_ENTER_FUNCTION(); 153 154 SchedulerModeLocker _; 155 156 TRACE("enqueueing new thread %ld with static priority %ld\n", thread->id, 157 thread->priority); 158 159 ThreadData* threadData = thread->scheduler_data; 160 161 if (threadData->ShouldCancelPenalty()) 162 threadData->CancelPenalty(); 163 164 enqueue(thread, true); 165 } 166 167 168 /*! Sets the priority of a thread. 169 */ 170 int32 171 scheduler_set_thread_priority(Thread *thread, int32 priority) 172 { 173 ASSERT(are_interrupts_enabled()); 174 175 InterruptsSpinLocker _(thread->scheduler_lock); 176 SchedulerModeLocker modeLocker; 177 178 SCHEDULER_ENTER_FUNCTION(); 179 180 ThreadData* threadData = thread->scheduler_data; 181 int32 oldPriority = thread->priority; 182 183 TRACE("changing thread %ld priority to %ld (old: %ld, effective: %ld)\n", 184 thread->id, priority, oldPriority, threadData->GetEffectivePriority()); 185 186 thread->priority = priority; 187 threadData->CancelPenalty(); 188 189 if (priority == oldPriority) 190 return oldPriority; 191 192 if (thread->state != B_THREAD_READY) { 193 if (thread->state == B_THREAD_RUNNING) { 194 ASSERT(threadData->Core() != NULL); 195 196 ASSERT(thread->cpu != NULL); 197 CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num]; 198 199 CoreCPUHeapLocker _(threadData->Core()); 200 cpu->UpdatePriority(priority); 201 } 202 203 return oldPriority; 204 } 205 206 // The thread is in the run queue. We need to remove it and re-insert it at 207 // a new position. 208 209 T(RemoveThread(thread)); 210 211 // notify listeners 212 NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue, 213 thread); 214 215 if (threadData->Dequeue()) 216 enqueue(thread, true); 217 218 return oldPriority; 219 } 220 221 222 void 223 scheduler_reschedule_ici() 224 { 225 // This function is called as a result of an incoming ICI. 226 // Make sure the reschedule() is invoked. 227 get_cpu_struct()->invoke_scheduler = true; 228 } 229 230 231 static inline void 232 stop_cpu_timers(Thread* fromThread, Thread* toThread) 233 { 234 SpinLocker teamLocker(&fromThread->team->time_lock); 235 SpinLocker threadLocker(&fromThread->time_lock); 236 237 if (fromThread->HasActiveCPUTimeUserTimers() 238 || fromThread->team->HasActiveCPUTimeUserTimers()) { 239 user_timer_stop_cpu_timers(fromThread, toThread); 240 } 241 } 242 243 244 static inline void 245 continue_cpu_timers(Thread* thread, cpu_ent* cpu) 246 { 247 SpinLocker teamLocker(&thread->team->time_lock); 248 SpinLocker threadLocker(&thread->time_lock); 249 250 if (thread->HasActiveCPUTimeUserTimers() 251 || thread->team->HasActiveCPUTimeUserTimers()) { 252 user_timer_continue_cpu_timers(thread, cpu->previous_thread); 253 } 254 } 255 256 257 static void 258 thread_resumes(Thread* thread) 259 { 260 cpu_ent* cpu = thread->cpu; 261 262 release_spinlock(&cpu->previous_thread->scheduler_lock); 263 264 // continue CPU time based user timers 265 continue_cpu_timers(thread, cpu); 266 267 // notify the user debugger code 268 if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0) 269 user_debug_thread_scheduled(thread); 270 } 271 272 273 void 274 scheduler_new_thread_entry(Thread* thread) 275 { 276 thread_resumes(thread); 277 278 SpinLocker locker(thread->time_lock); 279 thread->last_time = system_time(); 280 } 281 282 283 /*! Switches the currently running thread. 284 This is a service function for scheduler implementations. 285 286 \param fromThread The currently running thread. 287 \param toThread The thread to switch to. Must be different from 288 \a fromThread. 289 */ 290 static inline void 291 switch_thread(Thread* fromThread, Thread* toThread) 292 { 293 // notify the user debugger code 294 if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0) 295 user_debug_thread_unscheduled(fromThread); 296 297 // stop CPU time based user timers 298 stop_cpu_timers(fromThread, toThread); 299 300 // update CPU and Thread structures and perform the context switch 301 cpu_ent* cpu = fromThread->cpu; 302 toThread->previous_cpu = toThread->cpu = cpu; 303 fromThread->cpu = NULL; 304 cpu->running_thread = toThread; 305 cpu->previous_thread = fromThread; 306 307 arch_thread_set_current_thread(toThread); 308 arch_thread_context_switch(fromThread, toThread); 309 310 // The use of fromThread below looks weird, but is correct. fromThread had 311 // been unscheduled earlier, but is back now. For a thread scheduled the 312 // first time the same is done in thread.cpp:common_thread_entry(). 313 thread_resumes(fromThread); 314 } 315 316 317 static void 318 reschedule(int32 nextState) 319 { 320 ASSERT(!are_interrupts_enabled()); 321 SCHEDULER_ENTER_FUNCTION(); 322 323 int32 thisCPU = smp_get_current_cpu(); 324 gCPU[thisCPU].invoke_scheduler = false; 325 326 CPUEntry* cpu = CPUEntry::GetCPU(thisCPU); 327 CoreEntry* core = CoreEntry::GetCore(thisCPU); 328 329 Thread* oldThread = thread_get_current_thread(); 330 ThreadData* oldThreadData = oldThread->scheduler_data; 331 332 oldThreadData->StopCPUTime(); 333 334 SchedulerModeLocker modeLocker; 335 336 TRACE("reschedule(): cpu %ld, current thread = %ld\n", thisCPU, 337 oldThread->id); 338 339 oldThread->state = nextState; 340 341 // return time spent in interrupts 342 oldThreadData->SetStolenInterruptTime(gCPU[thisCPU].interrupt_time); 343 344 bool enqueueOldThread = false; 345 bool putOldThreadAtBack = false; 346 switch (nextState) { 347 case B_THREAD_RUNNING: 348 case B_THREAD_READY: 349 enqueueOldThread = true; 350 351 if (!oldThreadData->IsIdle()) { 352 oldThreadData->Continues(); 353 if (oldThreadData->HasQuantumEnded(oldThread->cpu->preempted, 354 oldThread->has_yielded)) { 355 TRACE("enqueueing thread %ld into run queue priority =" 356 " %ld\n", oldThread->id, 357 oldThreadData->GetEffectivePriority()); 358 putOldThreadAtBack = true; 359 } else { 360 TRACE("putting thread %ld back in run queue priority =" 361 " %ld\n", oldThread->id, 362 oldThreadData->GetEffectivePriority()); 363 putOldThreadAtBack = false; 364 } 365 } 366 367 break; 368 case THREAD_STATE_FREE_ON_RESCHED: 369 oldThreadData->Dies(); 370 break; 371 default: 372 oldThreadData->GoesAway(); 373 TRACE("not enqueueing thread %ld into run queue next_state = %ld\n", 374 oldThread->id, nextState); 375 break; 376 } 377 378 oldThread->has_yielded = false; 379 380 // select thread with the biggest priority and enqueue back the old thread 381 ThreadData* nextThreadData; 382 if (gCPU[thisCPU].disabled) { 383 if (!oldThreadData->IsIdle()) { 384 putOldThreadAtBack = oldThread->pinned_to_cpu == 0; 385 oldThreadData->UnassignCore(true); 386 387 CPURunQueueLocker cpuLocker(cpu); 388 nextThreadData = cpu->PeekIdleThread(); 389 cpu->Remove(nextThreadData); 390 } else 391 nextThreadData = oldThreadData; 392 } else { 393 nextThreadData 394 = cpu->ChooseNextThread(enqueueOldThread ? oldThreadData : NULL, 395 putOldThreadAtBack); 396 397 // update CPU heap 398 CoreCPUHeapLocker cpuLocker(core); 399 cpu->UpdatePriority(nextThreadData->GetEffectivePriority()); 400 } 401 402 Thread* nextThread = nextThreadData->GetThread(); 403 ASSERT(!gCPU[thisCPU].disabled || nextThreadData->IsIdle()); 404 405 if (nextThread != oldThread) { 406 if (enqueueOldThread) { 407 if (putOldThreadAtBack) 408 enqueue(oldThread, false); 409 else 410 oldThreadData->PutBack(); 411 } 412 413 acquire_spinlock(&nextThread->scheduler_lock); 414 } 415 416 TRACE("reschedule(): cpu %ld, next thread = %ld\n", thisCPU, 417 nextThread->id); 418 419 T(ScheduleThread(nextThread, oldThread)); 420 421 // notify listeners 422 NotifySchedulerListeners(&SchedulerListener::ThreadScheduled, 423 oldThread, nextThread); 424 425 ASSERT(nextThreadData->Core() == core); 426 nextThread->state = B_THREAD_RUNNING; 427 nextThreadData->StartCPUTime(); 428 429 // track CPU activity 430 cpu->TrackActivity(oldThreadData, nextThreadData); 431 432 if (nextThread != oldThread || oldThread->cpu->preempted) { 433 cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted); 434 435 oldThread->cpu->preempted = false; 436 if (!nextThreadData->IsIdle()) 437 nextThreadData->Continues(); 438 else 439 gCurrentMode->rebalance_irqs(true); 440 nextThreadData->StartQuantum(); 441 442 modeLocker.Unlock(); 443 444 SCHEDULER_EXIT_FUNCTION(); 445 446 if (nextThread != oldThread) 447 switch_thread(oldThread, nextThread); 448 } 449 } 450 451 452 /*! Runs the scheduler. 453 Note: expects thread spinlock to be held 454 */ 455 void 456 scheduler_reschedule(int32 nextState) 457 { 458 ASSERT(!are_interrupts_enabled()); 459 SCHEDULER_ENTER_FUNCTION(); 460 461 if (!sSchedulerEnabled) { 462 Thread* thread = thread_get_current_thread(); 463 if (thread != NULL && nextState != B_THREAD_READY) 464 panic("scheduler_reschedule_no_op() called in non-ready thread"); 465 return; 466 } 467 468 reschedule(nextState); 469 } 470 471 472 status_t 473 scheduler_on_thread_create(Thread* thread, bool idleThread) 474 { 475 thread->scheduler_data = new(std::nothrow) ThreadData(thread); 476 if (thread->scheduler_data == NULL) 477 return B_NO_MEMORY; 478 return B_OK; 479 } 480 481 482 void 483 scheduler_on_thread_init(Thread* thread) 484 { 485 ASSERT(thread->scheduler_data != NULL); 486 487 if (thread_is_idle_thread(thread)) { 488 static int32 sIdleThreadsID; 489 int32 cpuID = atomic_add(&sIdleThreadsID, 1); 490 491 thread->previous_cpu = &gCPU[cpuID]; 492 thread->pinned_to_cpu = 1; 493 494 thread->scheduler_data->Init(CoreEntry::GetCore(cpuID)); 495 } else 496 thread->scheduler_data->Init(); 497 } 498 499 500 void 501 scheduler_on_thread_destroy(Thread* thread) 502 { 503 delete thread->scheduler_data; 504 } 505 506 507 /*! This starts the scheduler. Must be run in the context of the initial idle 508 thread. Interrupts must be disabled and will be disabled when returning. 509 */ 510 void 511 scheduler_start() 512 { 513 InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock); 514 SCHEDULER_ENTER_FUNCTION(); 515 516 reschedule(B_THREAD_READY); 517 } 518 519 520 status_t 521 scheduler_set_operation_mode(scheduler_mode mode) 522 { 523 if (mode != SCHEDULER_MODE_LOW_LATENCY 524 && mode != SCHEDULER_MODE_POWER_SAVING) { 525 return B_BAD_VALUE; 526 } 527 528 dprintf("scheduler: switching to %s mode\n", sSchedulerModes[mode]->name); 529 530 InterruptsBigSchedulerLocker _; 531 532 gCurrentModeID = mode; 533 gCurrentMode = sSchedulerModes[mode]; 534 gCurrentMode->switch_to_mode(); 535 536 ThreadData::ComputeQuantumLengths(); 537 538 return B_OK; 539 } 540 541 542 void 543 scheduler_set_cpu_enabled(int32 cpuID, bool enabled) 544 { 545 #if KDEBUG 546 if (are_interrupts_enabled()) 547 panic("scheduler_set_cpu_enabled: called with interrupts enabled"); 548 #endif 549 550 dprintf("scheduler: %s CPU %" B_PRId32 "\n", 551 enabled ? "enabling" : "disabling", cpuID); 552 553 InterruptsBigSchedulerLocker _; 554 555 gCurrentMode->set_cpu_enabled(cpuID, enabled); 556 557 CPUEntry* cpu = &gCPUEntries[cpuID]; 558 CoreEntry* core = cpu->Core(); 559 560 ASSERT(core->CPUCount() >= 0); 561 if (enabled) 562 cpu->Start(); 563 else { 564 cpu->UpdatePriority(B_IDLE_PRIORITY); 565 566 ThreadEnqueuer enqueuer; 567 core->RemoveCPU(cpu, enqueuer); 568 } 569 570 gCPU[cpuID].disabled = !enabled; 571 572 if (!enabled) { 573 cpu->Stop(); 574 575 // don't wait until the thread quantum ends 576 if (smp_get_current_cpu() != cpuID) { 577 smp_send_ici(cpuID, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, 578 SMP_MSG_FLAG_ASYNC); 579 } 580 } 581 } 582 583 584 static void 585 traverse_topology_tree(const cpu_topology_node* node, int packageID, int coreID) 586 { 587 switch (node->level) { 588 case CPU_TOPOLOGY_SMT: 589 sCPUToCore[node->id] = coreID; 590 sCPUToPackage[node->id] = packageID; 591 return; 592 593 case CPU_TOPOLOGY_CORE: 594 coreID = node->id; 595 break; 596 597 case CPU_TOPOLOGY_PACKAGE: 598 packageID = node->id; 599 break; 600 601 default: 602 break; 603 } 604 605 for (int32 i = 0; i < node->children_count; i++) 606 traverse_topology_tree(node->children[i], packageID, coreID); 607 } 608 609 610 static status_t 611 build_topology_mappings(int32& cpuCount, int32& coreCount, int32& packageCount) 612 { 613 cpuCount = smp_get_num_cpus(); 614 615 sCPUToCore = new(std::nothrow) int32[cpuCount]; 616 if (sCPUToCore == NULL) 617 return B_NO_MEMORY; 618 ArrayDeleter<int32> cpuToCoreDeleter(sCPUToCore); 619 620 sCPUToPackage = new(std::nothrow) int32[cpuCount]; 621 if (sCPUToPackage == NULL) 622 return B_NO_MEMORY; 623 ArrayDeleter<int32> cpuToPackageDeleter(sCPUToPackage); 624 625 coreCount = 0; 626 for (int32 i = 0; i < cpuCount; i++) { 627 if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0) 628 coreCount++; 629 } 630 631 packageCount = 0; 632 for (int32 i = 0; i < cpuCount; i++) { 633 if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0 634 && gCPU[i].topology_id[CPU_TOPOLOGY_CORE] == 0) { 635 packageCount++; 636 } 637 } 638 639 const cpu_topology_node* root = get_cpu_topology(); 640 traverse_topology_tree(root, 0, 0); 641 642 cpuToCoreDeleter.Detach(); 643 cpuToPackageDeleter.Detach(); 644 return B_OK; 645 } 646 647 648 static status_t 649 init() 650 { 651 // create logical processor to core and package mappings 652 int32 cpuCount, coreCount, packageCount; 653 status_t result = build_topology_mappings(cpuCount, coreCount, 654 packageCount); 655 if (result != B_OK) 656 return result; 657 658 // disable parts of the scheduler logic that are not needed 659 gSingleCore = coreCount == 1; 660 scheduler_update_policy(); 661 662 gCoreCount = coreCount; 663 gPackageCount = packageCount; 664 665 gCPUEntries = new(std::nothrow) CPUEntry[cpuCount]; 666 if (gCPUEntries == NULL) 667 return B_NO_MEMORY; 668 ArrayDeleter<CPUEntry> cpuEntriesDeleter(gCPUEntries); 669 670 gCoreEntries = new(std::nothrow) CoreEntry[coreCount]; 671 if (gCoreEntries == NULL) 672 return B_NO_MEMORY; 673 ArrayDeleter<CoreEntry> coreEntriesDeleter(gCoreEntries); 674 675 gPackageEntries = new(std::nothrow) PackageEntry[packageCount]; 676 if (gPackageEntries == NULL) 677 return B_NO_MEMORY; 678 ArrayDeleter<PackageEntry> packageEntriesDeleter(gPackageEntries); 679 680 new(&gCoreLoadHeap) CoreLoadHeap(coreCount); 681 new(&gCoreHighLoadHeap) CoreLoadHeap(coreCount); 682 683 new(&gIdlePackageList) IdlePackageList; 684 685 for (int32 i = 0; i < cpuCount; i++) { 686 CoreEntry* core = &gCoreEntries[sCPUToCore[i]]; 687 PackageEntry* package = &gPackageEntries[sCPUToPackage[i]]; 688 689 package->Init(sCPUToPackage[i]); 690 core->Init(sCPUToCore[i], package); 691 gCPUEntries[i].Init(i, core); 692 693 core->AddCPU(&gCPUEntries[i]); 694 } 695 696 packageEntriesDeleter.Detach(); 697 coreEntriesDeleter.Detach(); 698 cpuEntriesDeleter.Detach(); 699 700 return B_OK; 701 } 702 703 704 void 705 scheduler_init() 706 { 707 int32 cpuCount = smp_get_num_cpus(); 708 dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32 709 " cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "", 710 gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : ""); 711 712 #ifdef SCHEDULER_PROFILING 713 Profiling::Profiler::Initialize(); 714 #endif 715 716 status_t result = init(); 717 if (result != B_OK) 718 panic("scheduler_init: failed to initialize scheduler\n"); 719 720 scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY); 721 722 init_debug_commands(); 723 724 #if SCHEDULER_TRACING 725 add_debugger_command_etc("scheduler", &cmd_scheduler, 726 "Analyze scheduler tracing information", 727 "<thread>\n" 728 "Analyzes scheduler tracing information for a given thread.\n" 729 " <thread> - ID of the thread.\n", 0); 730 #endif 731 } 732 733 734 void 735 scheduler_enable_scheduling() 736 { 737 sSchedulerEnabled = true; 738 } 739 740 741 void 742 scheduler_update_policy() 743 { 744 gTrackCPULoad = increase_cpu_performance(0) == B_OK; 745 gTrackCoreLoad = !gSingleCore || gTrackCPULoad; 746 dprintf("scheduler switches: single core: %s, cpu load tracking: %s," 747 " core load tracking: %s\n", gSingleCore ? "true" : "false", 748 gTrackCPULoad ? "true" : "false", 749 gTrackCoreLoad ? "true" : "false"); 750 } 751 752 753 // #pragma mark - SchedulerListener 754 755 756 SchedulerListener::~SchedulerListener() 757 { 758 } 759 760 761 // #pragma mark - kernel private 762 763 764 /*! Add the given scheduler listener. Thread lock must be held. 765 */ 766 void 767 scheduler_add_listener(struct SchedulerListener* listener) 768 { 769 InterruptsSpinLocker _(gSchedulerListenersLock); 770 gSchedulerListeners.Add(listener); 771 } 772 773 774 /*! Remove the given scheduler listener. Thread lock must be held. 775 */ 776 void 777 scheduler_remove_listener(struct SchedulerListener* listener) 778 { 779 InterruptsSpinLocker _(gSchedulerListenersLock); 780 gSchedulerListeners.Remove(listener); 781 } 782 783 784 // #pragma mark - Syscalls 785 786 787 bigtime_t 788 _user_estimate_max_scheduling_latency(thread_id id) 789 { 790 syscall_64_bit_return_value(); 791 792 // get the thread 793 Thread* thread; 794 if (id < 0) { 795 thread = thread_get_current_thread(); 796 thread->AcquireReference(); 797 } else { 798 thread = Thread::Get(id); 799 if (thread == NULL) 800 return 0; 801 } 802 BReference<Thread> threadReference(thread, true); 803 804 #ifdef SCHEDULER_PROFILING 805 InterruptsLocker _; 806 #endif 807 808 ThreadData* threadData = thread->scheduler_data; 809 CoreEntry* core = threadData->Core(); 810 if (core == NULL) 811 core = &gCoreEntries[get_random<int32>() % gCoreCount]; 812 813 int32 threadCount = core->ThreadCount(); 814 if (core->CPUCount() > 0) 815 threadCount /= core->CPUCount(); 816 817 if (threadData->GetEffectivePriority() > 0) { 818 threadCount -= threadCount * THREAD_MAX_SET_PRIORITY 819 / threadData->GetEffectivePriority(); 820 } 821 822 return std::min(std::max(threadCount * gCurrentMode->base_quantum, 823 gCurrentMode->minimal_quantum), 824 gCurrentMode->maximum_latency); 825 } 826 827 828 status_t 829 _user_set_scheduler_mode(int32 mode) 830 { 831 scheduler_mode schedulerMode = static_cast<scheduler_mode>(mode); 832 status_t error = scheduler_set_operation_mode(schedulerMode); 833 if (error == B_OK) 834 cpu_set_scheduler_mode(schedulerMode); 835 return error; 836 } 837 838 839 int32 840 _user_get_scheduler_mode() 841 { 842 return gCurrentModeID; 843 } 844 845