1 /* 2 * Copyright 2013-2014, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2009, Rene Gollent, rene@gollent.com. 4 * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. 5 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. 6 * Copyright 2002, Angelo Mottola, a.mottola@libero.it. 7 * Distributed under the terms of the MIT License. 8 * 9 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. 10 * Distributed under the terms of the NewOS License. 11 */ 12 13 14 /*! The thread scheduler */ 15 16 17 #include <OS.h> 18 19 #include <AutoDeleter.h> 20 #include <cpu.h> 21 #include <debug.h> 22 #include <int.h> 23 #include <kernel.h> 24 #include <kscheduler.h> 25 #include <listeners.h> 26 #include <load_tracking.h> 27 #include <scheduler_defs.h> 28 #include <smp.h> 29 #include <timer.h> 30 #include <util/Random.h> 31 32 #include "scheduler_common.h" 33 #include "scheduler_cpu.h" 34 #include "scheduler_locking.h" 35 #include "scheduler_modes.h" 36 #include "scheduler_profiler.h" 37 #include "scheduler_thread.h" 38 #include "scheduler_tracing.h" 39 40 41 namespace Scheduler { 42 43 44 class ThreadEnqueuer : public ThreadProcessing { 45 public: 46 void operator()(ThreadData* thread); 47 }; 48 49 scheduler_mode gCurrentModeID; 50 scheduler_mode_operations* gCurrentMode; 51 52 bool gSingleCore; 53 bool gTrackCoreLoad; 54 bool gTrackCPULoad; 55 56 } // namespace Scheduler 57 58 using namespace Scheduler; 59 60 61 static bool sSchedulerEnabled; 62 63 SchedulerListenerList gSchedulerListeners; 64 spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER; 65 66 static scheduler_mode_operations* sSchedulerModes[] = { 67 &gSchedulerLowLatencyMode, 68 &gSchedulerPowerSavingMode, 69 }; 70 71 // Since CPU IDs used internally by the kernel bear no relation to the actual 72 // CPU topology the following arrays are used to efficiently get the core 73 // and the package that CPU in question belongs to. 74 static int32* sCPUToCore; 75 static int32* sCPUToPackage; 76 77 78 static void enqueue(Thread* thread, bool newOne); 79 80 81 void 82 ThreadEnqueuer::operator()(ThreadData* thread) 83 { 84 enqueue(thread->GetThread(), false); 85 } 86 87 88 void 89 scheduler_dump_thread_data(Thread* thread) 90 { 91 thread->scheduler_data->Dump(); 92 } 93 94 95 static void 96 enqueue(Thread* thread, bool newOne) 97 { 98 SCHEDULER_ENTER_FUNCTION(); 99 100 ThreadData* threadData = thread->scheduler_data; 101 102 int32 threadPriority = threadData->GetEffectivePriority(); 103 T(EnqueueThread(thread, threadPriority)); 104 105 CPUEntry* targetCPU = NULL; 106 CoreEntry* targetCore = NULL; 107 if (thread->pinned_to_cpu > 0) { 108 ASSERT(thread->previous_cpu != NULL); 109 ASSERT(threadData->Core() != NULL); 110 targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num]; 111 } else if (gSingleCore) { 112 targetCore = &gCoreEntries[0]; 113 } else if (threadData->Core() != NULL 114 && (!newOne || !threadData->HasCacheExpired())) { 115 targetCore = threadData->Rebalance(); 116 } 117 118 const bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU); 119 120 TRACE("enqueueing thread %" B_PRId32 " with priority %" B_PRId32 " on CPU %" B_PRId32 " (core %" B_PRId32 ")\n", 121 thread->id, threadPriority, targetCPU->ID(), targetCore->ID()); 122 123 bool wasRunQueueEmpty = false; 124 threadData->Enqueue(wasRunQueueEmpty); 125 126 // notify listeners 127 NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue, 128 thread); 129 130 int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU); 131 if (threadPriority > heapPriority 132 || (threadPriority == heapPriority && rescheduleNeeded) 133 || wasRunQueueEmpty) { 134 135 if (targetCPU->ID() == smp_get_current_cpu()) { 136 gCPU[targetCPU->ID()].invoke_scheduler = true; 137 } else { 138 smp_send_ici(targetCPU->ID(), SMP_MSG_RESCHEDULE, 0, 0, 0, 139 NULL, SMP_MSG_FLAG_ASYNC); 140 } 141 } 142 } 143 144 145 /*! Enqueues the thread into the run queue. 146 Note: thread lock must be held when entering this function 147 */ 148 void 149 scheduler_enqueue_in_run_queue(Thread *thread) 150 { 151 ASSERT(!are_interrupts_enabled()); 152 SCHEDULER_ENTER_FUNCTION(); 153 154 SchedulerModeLocker _; 155 156 TRACE("enqueueing new thread %" B_PRId32 " with static priority %" B_PRId32 "\n", thread->id, 157 thread->priority); 158 159 ThreadData* threadData = thread->scheduler_data; 160 161 if (threadData->ShouldCancelPenalty()) 162 threadData->CancelPenalty(); 163 164 enqueue(thread, true); 165 } 166 167 168 /*! Sets the priority of a thread. 169 */ 170 int32 171 scheduler_set_thread_priority(Thread *thread, int32 priority) 172 { 173 ASSERT(are_interrupts_enabled()); 174 175 InterruptsSpinLocker _(thread->scheduler_lock); 176 SchedulerModeLocker modeLocker; 177 178 SCHEDULER_ENTER_FUNCTION(); 179 180 ThreadData* threadData = thread->scheduler_data; 181 int32 oldPriority = thread->priority; 182 183 TRACE("changing thread %" B_PRId32 " priority to %" B_PRId32 " (old: %" B_PRId32 ", effective: %" B_PRId32 ")\n", 184 thread->id, priority, oldPriority, threadData->GetEffectivePriority()); 185 186 thread->priority = priority; 187 threadData->CancelPenalty(); 188 189 if (priority == oldPriority) 190 return oldPriority; 191 192 if (thread->state != B_THREAD_READY) { 193 if (thread->state == B_THREAD_RUNNING) { 194 ASSERT(threadData->Core() != NULL); 195 196 ASSERT(thread->cpu != NULL); 197 CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num]; 198 199 CoreCPUHeapLocker _(threadData->Core()); 200 cpu->UpdatePriority(priority); 201 } 202 203 return oldPriority; 204 } 205 206 // The thread is in the run queue. We need to remove it and re-insert it at 207 // a new position. 208 209 T(RemoveThread(thread)); 210 211 // notify listeners 212 NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue, 213 thread); 214 215 if (threadData->Dequeue()) 216 enqueue(thread, true); 217 218 return oldPriority; 219 } 220 221 222 void 223 scheduler_reschedule_ici() 224 { 225 // This function is called as a result of an incoming ICI. 226 // Make sure the reschedule() is invoked. 227 get_cpu_struct()->invoke_scheduler = true; 228 } 229 230 231 static inline void 232 stop_cpu_timers(Thread* fromThread, Thread* toThread) 233 { 234 SpinLocker teamLocker(&fromThread->team->time_lock); 235 SpinLocker threadLocker(&fromThread->time_lock); 236 237 if (fromThread->HasActiveCPUTimeUserTimers() 238 || fromThread->team->HasActiveCPUTimeUserTimers()) { 239 user_timer_stop_cpu_timers(fromThread, toThread); 240 } 241 } 242 243 244 static inline void 245 continue_cpu_timers(Thread* thread, cpu_ent* cpu) 246 { 247 SpinLocker teamLocker(&thread->team->time_lock); 248 SpinLocker threadLocker(&thread->time_lock); 249 250 if (thread->HasActiveCPUTimeUserTimers() 251 || thread->team->HasActiveCPUTimeUserTimers()) { 252 user_timer_continue_cpu_timers(thread, cpu->previous_thread); 253 } 254 } 255 256 257 static void 258 thread_resumes(Thread* thread) 259 { 260 cpu_ent* cpu = thread->cpu; 261 262 release_spinlock(&cpu->previous_thread->scheduler_lock); 263 264 // continue CPU time based user timers 265 continue_cpu_timers(thread, cpu); 266 267 // notify the user debugger code 268 if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0) 269 user_debug_thread_scheduled(thread); 270 } 271 272 273 void 274 scheduler_new_thread_entry(Thread* thread) 275 { 276 thread_resumes(thread); 277 278 SpinLocker locker(thread->time_lock); 279 thread->last_time = system_time(); 280 } 281 282 283 /*! Switches the currently running thread. 284 This is a service function for scheduler implementations. 285 286 \param fromThread The currently running thread. 287 \param toThread The thread to switch to. Must be different from 288 \a fromThread. 289 */ 290 static inline void 291 switch_thread(Thread* fromThread, Thread* toThread) 292 { 293 // notify the user debugger code 294 if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0) 295 user_debug_thread_unscheduled(fromThread); 296 297 // stop CPU time based user timers 298 stop_cpu_timers(fromThread, toThread); 299 300 // update CPU and Thread structures and perform the context switch 301 cpu_ent* cpu = fromThread->cpu; 302 toThread->previous_cpu = toThread->cpu = cpu; 303 fromThread->cpu = NULL; 304 cpu->running_thread = toThread; 305 cpu->previous_thread = fromThread; 306 307 arch_thread_set_current_thread(toThread); 308 arch_thread_context_switch(fromThread, toThread); 309 310 // The use of fromThread below looks weird, but is correct. fromThread had 311 // been unscheduled earlier, but is back now. For a thread scheduled the 312 // first time the same is done in thread.cpp:common_thread_entry(). 313 thread_resumes(fromThread); 314 } 315 316 317 static void 318 reschedule(int32 nextState) 319 { 320 ASSERT(!are_interrupts_enabled()); 321 SCHEDULER_ENTER_FUNCTION(); 322 323 int32 thisCPU = smp_get_current_cpu(); 324 gCPU[thisCPU].invoke_scheduler = false; 325 326 CPUEntry* cpu = CPUEntry::GetCPU(thisCPU); 327 CoreEntry* core = CoreEntry::GetCore(thisCPU); 328 329 Thread* oldThread = thread_get_current_thread(); 330 ThreadData* oldThreadData = oldThread->scheduler_data; 331 332 oldThreadData->StopCPUTime(); 333 334 SchedulerModeLocker modeLocker; 335 336 TRACE("reschedule(): cpu %" B_PRId32 ", current thread = %" B_PRId32 "\n", thisCPU, 337 oldThread->id); 338 339 oldThread->state = nextState; 340 341 // return time spent in interrupts 342 oldThreadData->SetStolenInterruptTime(gCPU[thisCPU].interrupt_time); 343 344 bool enqueueOldThread = false; 345 bool putOldThreadAtBack = false; 346 switch (nextState) { 347 case B_THREAD_RUNNING: 348 case B_THREAD_READY: 349 enqueueOldThread = true; 350 351 if (!oldThreadData->IsIdle() && oldThreadData->GetCPUMask().GetBit(thisCPU)) { 352 oldThreadData->Continues(); 353 if (oldThreadData->HasQuantumEnded(oldThread->cpu->preempted, 354 oldThread->has_yielded)) { 355 TRACE("enqueueing thread %ld into run queue priority =" 356 " %ld\n", oldThread->id, 357 oldThreadData->GetEffectivePriority()); 358 putOldThreadAtBack = true; 359 } else { 360 TRACE("putting thread %ld back in run queue priority =" 361 " %ld\n", oldThread->id, 362 oldThreadData->GetEffectivePriority()); 363 putOldThreadAtBack = false; 364 } 365 } 366 367 break; 368 case THREAD_STATE_FREE_ON_RESCHED: 369 oldThreadData->Dies(); 370 break; 371 default: 372 oldThreadData->GoesAway(); 373 TRACE("not enqueueing thread %ld into run queue next_state = %ld\n", 374 oldThread->id, nextState); 375 break; 376 } 377 378 oldThread->has_yielded = false; 379 380 // select thread with the biggest priority and enqueue back the old thread 381 ThreadData* nextThreadData; 382 if (gCPU[thisCPU].disabled) { 383 if (!oldThreadData->IsIdle()) { 384 if (oldThread->pinned_to_cpu == 0) { 385 putOldThreadAtBack = true; 386 oldThreadData->UnassignCore(true); 387 } else { 388 putOldThreadAtBack = false; 389 } 390 391 CPURunQueueLocker cpuLocker(cpu); 392 nextThreadData = cpu->PeekIdleThread(); 393 cpu->Remove(nextThreadData); 394 } else 395 nextThreadData = oldThreadData; 396 } else { 397 CPUSet mask = oldThreadData->GetCPUMask(); 398 if (mask.IsEmpty()) 399 mask.SetAll(); 400 bool oldThreadShouldMigrate = !mask.GetBit(thisCPU); 401 if (oldThreadShouldMigrate) 402 enqueueOldThread = false; 403 nextThreadData 404 = cpu->ChooseNextThread(enqueueOldThread ? oldThreadData : NULL, 405 putOldThreadAtBack); 406 if (oldThreadShouldMigrate) { 407 enqueue(oldThread, true); 408 // replace with the idle thread, if no other thread could be found 409 if (oldThreadData == nextThreadData) 410 nextThreadData = cpu->PeekIdleThread(); 411 } 412 413 // update CPU heap 414 CoreCPUHeapLocker cpuLocker(core); 415 cpu->UpdatePriority(nextThreadData->GetEffectivePriority()); 416 } 417 418 Thread* nextThread = nextThreadData->GetThread(); 419 ASSERT(!gCPU[thisCPU].disabled || nextThreadData->IsIdle()); 420 421 if (nextThread != oldThread) { 422 if (enqueueOldThread) { 423 if (putOldThreadAtBack) 424 enqueue(oldThread, false); 425 else 426 oldThreadData->PutBack(); 427 } 428 429 acquire_spinlock(&nextThread->scheduler_lock); 430 } 431 432 TRACE("reschedule(): cpu %" B_PRId32 ", next thread = %" B_PRId32 "\n", thisCPU, 433 nextThread->id); 434 435 T(ScheduleThread(nextThread, oldThread)); 436 437 // notify listeners 438 NotifySchedulerListeners(&SchedulerListener::ThreadScheduled, 439 oldThread, nextThread); 440 441 ASSERT(nextThreadData->Core() == core); 442 nextThread->state = B_THREAD_RUNNING; 443 nextThreadData->StartCPUTime(); 444 445 // track CPU activity 446 cpu->TrackActivity(oldThreadData, nextThreadData); 447 448 if (nextThread != oldThread || oldThread->cpu->preempted) { 449 cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted); 450 451 oldThread->cpu->preempted = false; 452 if (!nextThreadData->IsIdle()) 453 nextThreadData->Continues(); 454 else 455 gCurrentMode->rebalance_irqs(true); 456 nextThreadData->StartQuantum(); 457 458 modeLocker.Unlock(); 459 460 SCHEDULER_EXIT_FUNCTION(); 461 462 if (nextThread != oldThread) 463 switch_thread(oldThread, nextThread); 464 } 465 } 466 467 468 /*! Runs the scheduler. 469 Note: expects thread spinlock to be held 470 */ 471 void 472 scheduler_reschedule(int32 nextState) 473 { 474 ASSERT(!are_interrupts_enabled()); 475 SCHEDULER_ENTER_FUNCTION(); 476 477 if (!sSchedulerEnabled) { 478 Thread* thread = thread_get_current_thread(); 479 if (thread != NULL && nextState != B_THREAD_READY) 480 panic("scheduler_reschedule_no_op() called in non-ready thread"); 481 return; 482 } 483 484 reschedule(nextState); 485 } 486 487 488 status_t 489 scheduler_on_thread_create(Thread* thread, bool idleThread) 490 { 491 thread->scheduler_data = new(std::nothrow) ThreadData(thread); 492 if (thread->scheduler_data == NULL) 493 return B_NO_MEMORY; 494 return B_OK; 495 } 496 497 498 void 499 scheduler_on_thread_init(Thread* thread) 500 { 501 ASSERT(thread->scheduler_data != NULL); 502 503 if (thread_is_idle_thread(thread)) { 504 static int32 sIdleThreadsID; 505 int32 cpuID = atomic_add(&sIdleThreadsID, 1); 506 507 thread->previous_cpu = &gCPU[cpuID]; 508 thread->pinned_to_cpu = 1; 509 510 thread->scheduler_data->Init(CoreEntry::GetCore(cpuID)); 511 } else 512 thread->scheduler_data->Init(); 513 } 514 515 516 void 517 scheduler_on_thread_destroy(Thread* thread) 518 { 519 delete thread->scheduler_data; 520 } 521 522 523 /*! This starts the scheduler. Must be run in the context of the initial idle 524 thread. Interrupts must be disabled and will be disabled when returning. 525 */ 526 void 527 scheduler_start() 528 { 529 InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock); 530 SCHEDULER_ENTER_FUNCTION(); 531 532 reschedule(B_THREAD_READY); 533 } 534 535 536 status_t 537 scheduler_set_operation_mode(scheduler_mode mode) 538 { 539 if (mode != SCHEDULER_MODE_LOW_LATENCY 540 && mode != SCHEDULER_MODE_POWER_SAVING) { 541 return B_BAD_VALUE; 542 } 543 544 dprintf("scheduler: switching to %s mode\n", sSchedulerModes[mode]->name); 545 546 InterruptsBigSchedulerLocker _; 547 548 gCurrentModeID = mode; 549 gCurrentMode = sSchedulerModes[mode]; 550 gCurrentMode->switch_to_mode(); 551 552 ThreadData::ComputeQuantumLengths(); 553 554 return B_OK; 555 } 556 557 558 void 559 scheduler_set_cpu_enabled(int32 cpuID, bool enabled) 560 { 561 #if KDEBUG 562 if (are_interrupts_enabled()) 563 panic("scheduler_set_cpu_enabled: called with interrupts enabled"); 564 #endif 565 566 dprintf("scheduler: %s CPU %" B_PRId32 "\n", 567 enabled ? "enabling" : "disabling", cpuID); 568 569 InterruptsBigSchedulerLocker _; 570 571 gCurrentMode->set_cpu_enabled(cpuID, enabled); 572 573 CPUEntry* cpu = &gCPUEntries[cpuID]; 574 CoreEntry* core = cpu->Core(); 575 576 ASSERT(core->CPUCount() >= 0); 577 if (enabled) 578 cpu->Start(); 579 else { 580 cpu->UpdatePriority(B_IDLE_PRIORITY); 581 582 ThreadEnqueuer enqueuer; 583 core->RemoveCPU(cpu, enqueuer); 584 } 585 586 gCPU[cpuID].disabled = !enabled; 587 if (enabled) 588 gCPUEnabled.SetBitAtomic(cpuID); 589 else 590 gCPUEnabled.ClearBitAtomic(cpuID); 591 592 if (!enabled) { 593 cpu->Stop(); 594 595 // don't wait until the thread quantum ends 596 if (smp_get_current_cpu() != cpuID) { 597 smp_send_ici(cpuID, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, 598 SMP_MSG_FLAG_ASYNC); 599 } 600 } 601 } 602 603 604 static void 605 traverse_topology_tree(const cpu_topology_node* node, int packageID, int coreID) 606 { 607 switch (node->level) { 608 case CPU_TOPOLOGY_SMT: 609 sCPUToCore[node->id] = coreID; 610 sCPUToPackage[node->id] = packageID; 611 return; 612 613 case CPU_TOPOLOGY_CORE: 614 coreID = node->id; 615 break; 616 617 case CPU_TOPOLOGY_PACKAGE: 618 packageID = node->id; 619 break; 620 621 default: 622 break; 623 } 624 625 for (int32 i = 0; i < node->children_count; i++) 626 traverse_topology_tree(node->children[i], packageID, coreID); 627 } 628 629 630 static status_t 631 build_topology_mappings(int32& cpuCount, int32& coreCount, int32& packageCount) 632 { 633 cpuCount = smp_get_num_cpus(); 634 635 sCPUToCore = new(std::nothrow) int32[cpuCount]; 636 if (sCPUToCore == NULL) 637 return B_NO_MEMORY; 638 ArrayDeleter<int32> cpuToCoreDeleter(sCPUToCore); 639 640 sCPUToPackage = new(std::nothrow) int32[cpuCount]; 641 if (sCPUToPackage == NULL) 642 return B_NO_MEMORY; 643 ArrayDeleter<int32> cpuToPackageDeleter(sCPUToPackage); 644 645 coreCount = 0; 646 for (int32 i = 0; i < cpuCount; i++) { 647 if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0) 648 coreCount++; 649 } 650 651 packageCount = 0; 652 for (int32 i = 0; i < cpuCount; i++) { 653 if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0 654 && gCPU[i].topology_id[CPU_TOPOLOGY_CORE] == 0) { 655 packageCount++; 656 } 657 } 658 659 const cpu_topology_node* root = get_cpu_topology(); 660 traverse_topology_tree(root, 0, 0); 661 662 cpuToCoreDeleter.Detach(); 663 cpuToPackageDeleter.Detach(); 664 return B_OK; 665 } 666 667 668 static status_t 669 init() 670 { 671 // create logical processor to core and package mappings 672 int32 cpuCount, coreCount, packageCount; 673 status_t result = build_topology_mappings(cpuCount, coreCount, 674 packageCount); 675 if (result != B_OK) 676 return result; 677 678 // disable parts of the scheduler logic that are not needed 679 gSingleCore = coreCount == 1; 680 scheduler_update_policy(); 681 682 gCoreCount = coreCount; 683 gPackageCount = packageCount; 684 685 gCPUEntries = new(std::nothrow) CPUEntry[cpuCount]; 686 if (gCPUEntries == NULL) 687 return B_NO_MEMORY; 688 ArrayDeleter<CPUEntry> cpuEntriesDeleter(gCPUEntries); 689 690 gCoreEntries = new(std::nothrow) CoreEntry[coreCount]; 691 if (gCoreEntries == NULL) 692 return B_NO_MEMORY; 693 ArrayDeleter<CoreEntry> coreEntriesDeleter(gCoreEntries); 694 695 gPackageEntries = new(std::nothrow) PackageEntry[packageCount]; 696 if (gPackageEntries == NULL) 697 return B_NO_MEMORY; 698 ArrayDeleter<PackageEntry> packageEntriesDeleter(gPackageEntries); 699 700 new(&gCoreLoadHeap) CoreLoadHeap(coreCount); 701 new(&gCoreHighLoadHeap) CoreLoadHeap(coreCount); 702 703 new(&gIdlePackageList) IdlePackageList; 704 705 for (int32 i = 0; i < cpuCount; i++) { 706 CoreEntry* core = &gCoreEntries[sCPUToCore[i]]; 707 PackageEntry* package = &gPackageEntries[sCPUToPackage[i]]; 708 709 package->Init(sCPUToPackage[i]); 710 core->Init(sCPUToCore[i], package); 711 gCPUEntries[i].Init(i, core); 712 713 core->AddCPU(&gCPUEntries[i]); 714 } 715 716 packageEntriesDeleter.Detach(); 717 coreEntriesDeleter.Detach(); 718 cpuEntriesDeleter.Detach(); 719 720 return B_OK; 721 } 722 723 724 void 725 scheduler_init() 726 { 727 int32 cpuCount = smp_get_num_cpus(); 728 dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32 729 " cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "", 730 gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : ""); 731 732 #ifdef SCHEDULER_PROFILING 733 Profiling::Profiler::Initialize(); 734 #endif 735 736 status_t result = init(); 737 if (result != B_OK) 738 panic("scheduler_init: failed to initialize scheduler\n"); 739 740 scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY); 741 742 init_debug_commands(); 743 744 #if SCHEDULER_TRACING 745 add_debugger_command_etc("scheduler", &cmd_scheduler, 746 "Analyze scheduler tracing information", 747 "<thread>\n" 748 "Analyzes scheduler tracing information for a given thread.\n" 749 " <thread> - ID of the thread.\n", 0); 750 #endif 751 } 752 753 754 void 755 scheduler_enable_scheduling() 756 { 757 sSchedulerEnabled = true; 758 } 759 760 761 void 762 scheduler_update_policy() 763 { 764 gTrackCPULoad = increase_cpu_performance(0) == B_OK; 765 gTrackCoreLoad = !gSingleCore || gTrackCPULoad; 766 dprintf("scheduler switches: single core: %s, cpu load tracking: %s," 767 " core load tracking: %s\n", gSingleCore ? "true" : "false", 768 gTrackCPULoad ? "true" : "false", 769 gTrackCoreLoad ? "true" : "false"); 770 } 771 772 773 // #pragma mark - SchedulerListener 774 775 776 SchedulerListener::~SchedulerListener() 777 { 778 } 779 780 781 // #pragma mark - kernel private 782 783 784 /*! Add the given scheduler listener. Thread lock must be held. 785 */ 786 void 787 scheduler_add_listener(struct SchedulerListener* listener) 788 { 789 InterruptsSpinLocker _(gSchedulerListenersLock); 790 gSchedulerListeners.Add(listener); 791 } 792 793 794 /*! Remove the given scheduler listener. Thread lock must be held. 795 */ 796 void 797 scheduler_remove_listener(struct SchedulerListener* listener) 798 { 799 InterruptsSpinLocker _(gSchedulerListenersLock); 800 gSchedulerListeners.Remove(listener); 801 } 802 803 804 // #pragma mark - Syscalls 805 806 807 bigtime_t 808 _user_estimate_max_scheduling_latency(thread_id id) 809 { 810 syscall_64_bit_return_value(); 811 812 // get the thread 813 Thread* thread; 814 if (id < 0) { 815 thread = thread_get_current_thread(); 816 thread->AcquireReference(); 817 } else { 818 thread = Thread::Get(id); 819 if (thread == NULL) 820 return 0; 821 } 822 BReference<Thread> threadReference(thread, true); 823 824 #ifdef SCHEDULER_PROFILING 825 InterruptsLocker _; 826 #endif 827 828 ThreadData* threadData = thread->scheduler_data; 829 CoreEntry* core = threadData->Core(); 830 if (core == NULL) 831 core = &gCoreEntries[get_random<int32>() % gCoreCount]; 832 833 int32 threadCount = core->ThreadCount(); 834 if (core->CPUCount() > 0) 835 threadCount /= core->CPUCount(); 836 837 if (threadData->GetEffectivePriority() > 0) { 838 threadCount -= threadCount * THREAD_MAX_SET_PRIORITY 839 / threadData->GetEffectivePriority(); 840 } 841 842 return std::min(std::max(threadCount * gCurrentMode->base_quantum, 843 gCurrentMode->minimal_quantum), 844 gCurrentMode->maximum_latency); 845 } 846 847 848 status_t 849 _user_set_scheduler_mode(int32 mode) 850 { 851 scheduler_mode schedulerMode = static_cast<scheduler_mode>(mode); 852 status_t error = scheduler_set_operation_mode(schedulerMode); 853 if (error == B_OK) 854 cpu_set_scheduler_mode(schedulerMode); 855 return error; 856 } 857 858 859 int32 860 _user_get_scheduler_mode() 861 { 862 return gCurrentModeID; 863 } 864 865