1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include "scheduler_cpu.h"
8
9 #include <util/AutoLock.h>
10
11 #include <algorithm>
12
13 #include "scheduler_thread.h"
14
15
16 namespace Scheduler {
17
18
19 CPUEntry* gCPUEntries;
20
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
26
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
31
32
33 } // namespace Scheduler
34
35 using namespace Scheduler;
36
37
38 class Scheduler::DebugDumper {
39 public:
40 static void DumpCPURunQueue(CPUEntry* cpu);
41 static void DumpCoreRunQueue(CoreEntry* core);
42 static void DumpCoreLoadHeapEntry(CoreEntry* core);
43 static void DumpIdleCoresInPackage(PackageEntry* package);
44
45 private:
46 struct CoreThreadsData {
47 CoreEntry* fCore;
48 int32 fLoad;
49 };
50
51 static void _AnalyzeCoreThreads(Thread* thread, void* data);
52 };
53
54
55 static CPUPriorityHeap sDebugCPUHeap;
56 static CoreLoadHeap sDebugCoreHeap;
57
58
59 void
Dump() const60 ThreadRunQueue::Dump() const
61 {
62 ThreadRunQueue::ConstIterator iterator = GetConstIterator();
63 if (!iterator.HasNext())
64 kprintf("Run queue is empty.\n");
65 else {
66 kprintf("thread id priority penalty name\n");
67 while (iterator.HasNext()) {
68 ThreadData* threadData = iterator.Next();
69 Thread* thread = threadData->GetThread();
70
71 kprintf("%p %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
72 thread, thread->id, thread->priority,
73 thread->priority - threadData->GetEffectivePriority(),
74 thread->name);
75 }
76 }
77 }
78
79
CPUEntry()80 CPUEntry::CPUEntry()
81 :
82 fLoad(0),
83 fMeasureActiveTime(0),
84 fMeasureTime(0),
85 fUpdateLoadEvent(false)
86 {
87 B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
88 B_INITIALIZE_SPINLOCK(&fQueueLock);
89 }
90
91
92 void
Init(int32 id,CoreEntry * core)93 CPUEntry::Init(int32 id, CoreEntry* core)
94 {
95 fCPUNumber = id;
96 fCore = core;
97 }
98
99
100 void
Start()101 CPUEntry::Start()
102 {
103 fLoad = 0;
104 fCore->AddCPU(this);
105 }
106
107
108 void
Stop()109 CPUEntry::Stop()
110 {
111 cpu_ent* entry = &gCPU[fCPUNumber];
112
113 // get rid of irqs
114 SpinLocker locker(entry->irqs_lock);
115 irq_assignment* irq
116 = (irq_assignment*)list_get_first_item(&entry->irqs);
117 while (irq != NULL) {
118 locker.Unlock();
119
120 assign_io_interrupt_to_cpu(irq->irq, -1);
121
122 locker.Lock();
123 irq = (irq_assignment*)list_get_first_item(&entry->irqs);
124 }
125 locker.Unlock();
126 }
127
128
129 void
PushFront(ThreadData * thread,int32 priority)130 CPUEntry::PushFront(ThreadData* thread, int32 priority)
131 {
132 SCHEDULER_ENTER_FUNCTION();
133 fRunQueue.PushFront(thread, priority);
134 }
135
136
137 void
PushBack(ThreadData * thread,int32 priority)138 CPUEntry::PushBack(ThreadData* thread, int32 priority)
139 {
140 SCHEDULER_ENTER_FUNCTION();
141 fRunQueue.PushBack(thread, priority);
142 }
143
144
145 void
Remove(ThreadData * thread)146 CPUEntry::Remove(ThreadData* thread)
147 {
148 SCHEDULER_ENTER_FUNCTION();
149 ASSERT(thread->IsEnqueued());
150 thread->SetDequeued();
151 fRunQueue.Remove(thread);
152 }
153
154
155 ThreadData*
PeekThread() const156 CoreEntry::PeekThread() const
157 {
158 SCHEDULER_ENTER_FUNCTION();
159 return fRunQueue.PeekMaximum();
160 }
161
162
163 ThreadData*
PeekThread() const164 CPUEntry::PeekThread() const
165 {
166 SCHEDULER_ENTER_FUNCTION();
167 return fRunQueue.PeekMaximum();
168 }
169
170
171 ThreadData*
PeekIdleThread() const172 CPUEntry::PeekIdleThread() const
173 {
174 SCHEDULER_ENTER_FUNCTION();
175 return fRunQueue.GetHead(B_IDLE_PRIORITY);
176 }
177
178
179 void
UpdatePriority(int32 priority)180 CPUEntry::UpdatePriority(int32 priority)
181 {
182 SCHEDULER_ENTER_FUNCTION();
183
184 ASSERT(!gCPU[fCPUNumber].disabled);
185
186 int32 oldPriority = CPUPriorityHeap::GetKey(this);
187 if (oldPriority == priority)
188 return;
189 fCore->CPUHeap()->ModifyKey(this, priority);
190
191 if (oldPriority == B_IDLE_PRIORITY)
192 fCore->CPUWakesUp(this);
193 else if (priority == B_IDLE_PRIORITY)
194 fCore->CPUGoesIdle(this);
195 }
196
197
198 void
ComputeLoad()199 CPUEntry::ComputeLoad()
200 {
201 SCHEDULER_ENTER_FUNCTION();
202
203 ASSERT(gTrackCPULoad);
204 ASSERT(!gCPU[fCPUNumber].disabled);
205 ASSERT(fCPUNumber == smp_get_current_cpu());
206
207 int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
208 system_time());
209 if (oldLoad < 0)
210 return;
211
212 if (fLoad > kVeryHighLoad)
213 gCurrentMode->rebalance_irqs(false);
214 }
215
216
217 ThreadData*
ChooseNextThread(ThreadData * oldThread,bool putAtBack)218 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
219 {
220 SCHEDULER_ENTER_FUNCTION();
221
222 int32 oldPriority = -1;
223 if (oldThread != NULL)
224 oldPriority = oldThread->GetEffectivePriority();
225
226 CPURunQueueLocker cpuLocker(this);
227
228 ThreadData* pinnedThread = fRunQueue.PeekMaximum();
229 int32 pinnedPriority = -1;
230 if (pinnedThread != NULL)
231 pinnedPriority = pinnedThread->GetEffectivePriority();
232
233 CoreRunQueueLocker coreLocker(fCore);
234
235 ThreadData* sharedThread = fCore->PeekThread();
236 if (sharedThread == NULL && pinnedThread == NULL && oldThread == NULL)
237 return NULL;
238
239 int32 sharedPriority = -1;
240 if (sharedThread != NULL)
241 sharedPriority = sharedThread->GetEffectivePriority();
242
243 int32 rest = std::max(pinnedPriority, sharedPriority);
244 if (oldPriority > rest || (!putAtBack && oldPriority == rest))
245 return oldThread;
246
247 if (sharedPriority > pinnedPriority) {
248 fCore->Remove(sharedThread);
249 return sharedThread;
250 }
251
252 coreLocker.Unlock();
253
254 Remove(pinnedThread);
255 return pinnedThread;
256 }
257
258
259 void
TrackActivity(ThreadData * oldThreadData,ThreadData * nextThreadData)260 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
261 {
262 SCHEDULER_ENTER_FUNCTION();
263
264 cpu_ent* cpuEntry = &gCPU[fCPUNumber];
265
266 Thread* oldThread = oldThreadData->GetThread();
267 if (!thread_is_idle_thread(oldThread)) {
268 bigtime_t active
269 = (oldThread->kernel_time - cpuEntry->last_kernel_time)
270 + (oldThread->user_time - cpuEntry->last_user_time);
271
272 WriteSequentialLocker locker(cpuEntry->active_time_lock);
273 cpuEntry->active_time += active;
274 locker.Unlock();
275
276 fMeasureActiveTime += active;
277 fCore->IncreaseActiveTime(active);
278
279 oldThreadData->UpdateActivity(active);
280 }
281
282 if (gTrackCPULoad) {
283 if (!cpuEntry->disabled)
284 ComputeLoad();
285 _RequestPerformanceLevel(nextThreadData);
286 }
287
288 Thread* nextThread = nextThreadData->GetThread();
289 if (!thread_is_idle_thread(nextThread)) {
290 cpuEntry->last_kernel_time = nextThread->kernel_time;
291 cpuEntry->last_user_time = nextThread->user_time;
292
293 nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
294 }
295 }
296
297
298 void
StartQuantumTimer(ThreadData * thread,bool wasPreempted)299 CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
300 {
301 cpu_ent* cpu = &gCPU[ID()];
302
303 if (!wasPreempted || fUpdateLoadEvent)
304 cancel_timer(&cpu->quantum_timer);
305 fUpdateLoadEvent = false;
306
307 if (!thread->IsIdle()) {
308 bigtime_t quantum = thread->GetQuantumLeft();
309 add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
310 B_ONE_SHOT_RELATIVE_TIMER);
311 } else if (gTrackCoreLoad) {
312 add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
313 kLoadMeasureInterval * 2, B_ONE_SHOT_RELATIVE_TIMER);
314 fUpdateLoadEvent = true;
315 }
316 }
317
318
319 void
_RequestPerformanceLevel(ThreadData * threadData)320 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
321 {
322 SCHEDULER_ENTER_FUNCTION();
323
324 if (gCPU[fCPUNumber].disabled) {
325 decrease_cpu_performance(kCPUPerformanceScaleMax);
326 return;
327 }
328
329 int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
330 ASSERT_PRINT(load >= 0 && load <= kMaxLoad, "load is out of range %"
331 B_PRId32 " (max of %" B_PRId32 " %" B_PRId32 ")", load,
332 threadData->GetLoad(), fCore->GetLoad());
333
334 if (load < kTargetLoad) {
335 int32 delta = kTargetLoad - load;
336
337 delta *= kTargetLoad;
338 delta /= kCPUPerformanceScaleMax;
339
340 decrease_cpu_performance(delta);
341 } else {
342 int32 delta = load - kTargetLoad;
343 delta *= kMaxLoad - kTargetLoad;
344 delta /= kCPUPerformanceScaleMax;
345
346 increase_cpu_performance(delta);
347 }
348 }
349
350
351 /* static */ int32
_RescheduleEvent(timer *)352 CPUEntry::_RescheduleEvent(timer* /* unused */)
353 {
354 get_cpu_struct()->invoke_scheduler = true;
355 get_cpu_struct()->preempted = true;
356 return B_HANDLED_INTERRUPT;
357 }
358
359
360 /* static */ int32
_UpdateLoadEvent(timer *)361 CPUEntry::_UpdateLoadEvent(timer* /* unused */)
362 {
363 CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
364 CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
365 return B_HANDLED_INTERRUPT;
366 }
367
368
CPUPriorityHeap(int32 cpuCount)369 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
370 :
371 Heap<CPUEntry, int32>(cpuCount)
372 {
373 }
374
375
376 void
Dump()377 CPUPriorityHeap::Dump()
378 {
379 kprintf("cpu priority load\n");
380 CPUEntry* entry = PeekRoot();
381 while (entry) {
382 int32 cpu = entry->ID();
383 int32 key = GetKey(entry);
384 kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
385 entry->GetLoad() / 10);
386
387 RemoveRoot();
388 sDebugCPUHeap.Insert(entry, key);
389
390 entry = PeekRoot();
391 }
392
393 entry = sDebugCPUHeap.PeekRoot();
394 while (entry) {
395 int32 key = GetKey(entry);
396 sDebugCPUHeap.RemoveRoot();
397 Insert(entry, key);
398 entry = sDebugCPUHeap.PeekRoot();
399 }
400 }
401
402
CoreEntry()403 CoreEntry::CoreEntry()
404 :
405 fCPUCount(0),
406 fIdleCPUCount(0),
407 fThreadCount(0),
408 fActiveTime(0),
409 fLoad(0),
410 fCurrentLoad(0),
411 fLoadMeasurementEpoch(0),
412 fHighLoad(false),
413 fLastLoadUpdate(0)
414 {
415 B_INITIALIZE_SPINLOCK(&fCPULock);
416 B_INITIALIZE_SPINLOCK(&fQueueLock);
417 B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
418 B_INITIALIZE_RW_SPINLOCK(&fLoadLock);
419 }
420
421
422 void
Init(int32 id,PackageEntry * package)423 CoreEntry::Init(int32 id, PackageEntry* package)
424 {
425 fCoreID = id;
426 fPackage = package;
427 }
428
429
430 void
PushFront(ThreadData * thread,int32 priority)431 CoreEntry::PushFront(ThreadData* thread, int32 priority)
432 {
433 SCHEDULER_ENTER_FUNCTION();
434
435 fRunQueue.PushFront(thread, priority);
436 atomic_add(&fThreadCount, 1);
437 }
438
439
440 void
PushBack(ThreadData * thread,int32 priority)441 CoreEntry::PushBack(ThreadData* thread, int32 priority)
442 {
443 SCHEDULER_ENTER_FUNCTION();
444
445 fRunQueue.PushBack(thread, priority);
446 atomic_add(&fThreadCount, 1);
447 }
448
449
450 void
Remove(ThreadData * thread)451 CoreEntry::Remove(ThreadData* thread)
452 {
453 SCHEDULER_ENTER_FUNCTION();
454
455 ASSERT(!thread->IsIdle());
456
457 ASSERT(thread->IsEnqueued());
458 thread->SetDequeued();
459
460 fRunQueue.Remove(thread);
461 atomic_add(&fThreadCount, -1);
462 }
463
464
465 void
AddCPU(CPUEntry * cpu)466 CoreEntry::AddCPU(CPUEntry* cpu)
467 {
468 ASSERT(fCPUCount >= 0);
469 ASSERT(fIdleCPUCount >= 0);
470
471 fIdleCPUCount++;
472 if (fCPUCount++ == 0) {
473 // core has been reenabled
474 fLoad = 0;
475 fCurrentLoad = 0;
476 fHighLoad = false;
477 gCoreLoadHeap.Insert(this, 0);
478
479 fPackage->AddIdleCore(this);
480 }
481 fCPUSet.SetBit(cpu->ID());
482
483 fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
484 }
485
486
487 void
RemoveCPU(CPUEntry * cpu,ThreadProcessing & threadPostProcessing)488 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
489 {
490 ASSERT(fCPUCount > 0);
491 ASSERT(fIdleCPUCount > 0);
492
493 fIdleCPUCount--;
494 fCPUSet.ClearBit(cpu->ID());
495 if (--fCPUCount == 0) {
496 // unassign threads
497 thread_map(CoreEntry::_UnassignThread, this);
498
499 // core has been disabled
500 if (fHighLoad) {
501 gCoreHighLoadHeap.ModifyKey(this, -1);
502 ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
503 gCoreHighLoadHeap.RemoveMinimum();
504 } else {
505 gCoreLoadHeap.ModifyKey(this, -1);
506 ASSERT(gCoreLoadHeap.PeekMinimum() == this);
507 gCoreLoadHeap.RemoveMinimum();
508 }
509
510 fPackage->RemoveIdleCore(this);
511
512 // get rid of threads
513 while (fRunQueue.PeekMaximum() != NULL) {
514 ThreadData* threadData = fRunQueue.PeekMaximum();
515
516 Remove(threadData);
517
518 ASSERT(threadData->Core() == NULL);
519 threadPostProcessing(threadData);
520 }
521
522 fThreadCount = 0;
523 }
524
525 fCPUHeap.ModifyKey(cpu, -1);
526 ASSERT(fCPUHeap.PeekRoot() == cpu);
527 fCPUHeap.RemoveRoot();
528
529 ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
530 ASSERT(fLoad >= 0);
531 }
532
533
534 void
_UpdateLoad(bool forceUpdate)535 CoreEntry::_UpdateLoad(bool forceUpdate)
536 {
537 SCHEDULER_ENTER_FUNCTION();
538
539 if (fCPUCount <= 0)
540 return;
541
542 bigtime_t now = system_time();
543 bool intervalEnded = now >= kLoadMeasureInterval + fLastLoadUpdate;
544 bool intervalSkipped = now >= kLoadMeasureInterval * 2 + fLastLoadUpdate;
545
546 if (!intervalEnded && !forceUpdate)
547 return;
548
549 WriteSpinLocker coreLocker(gCoreHeapsLock);
550
551 int32 newKey;
552 if (intervalEnded) {
553 WriteSpinLocker locker(fLoadLock);
554
555 newKey = intervalSkipped ? fCurrentLoad : GetLoad();
556
557 ASSERT(fCurrentLoad >= 0);
558 ASSERT(fLoad >= fCurrentLoad);
559
560 fLoad = fCurrentLoad;
561 fLoadMeasurementEpoch++;
562 fLastLoadUpdate = now;
563 } else
564 newKey = GetLoad();
565
566 int32 oldKey = CoreLoadHeap::GetKey(this);
567
568 ASSERT(oldKey >= 0);
569 ASSERT(newKey >= 0);
570
571 if (oldKey == newKey)
572 return;
573
574 if (newKey > kHighLoad) {
575 if (!fHighLoad) {
576 gCoreLoadHeap.ModifyKey(this, -1);
577 ASSERT(gCoreLoadHeap.PeekMinimum() == this);
578 gCoreLoadHeap.RemoveMinimum();
579
580 gCoreHighLoadHeap.Insert(this, newKey);
581
582 fHighLoad = true;
583 } else
584 gCoreHighLoadHeap.ModifyKey(this, newKey);
585 } else if (newKey < kMediumLoad) {
586 if (fHighLoad) {
587 gCoreHighLoadHeap.ModifyKey(this, -1);
588 ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
589 gCoreHighLoadHeap.RemoveMinimum();
590
591 gCoreLoadHeap.Insert(this, newKey);
592
593 fHighLoad = false;
594 } else
595 gCoreLoadHeap.ModifyKey(this, newKey);
596 } else {
597 if (fHighLoad)
598 gCoreHighLoadHeap.ModifyKey(this, newKey);
599 else
600 gCoreLoadHeap.ModifyKey(this, newKey);
601 }
602 }
603
604
605 /* static */ void
_UnassignThread(Thread * thread,void * data)606 CoreEntry::_UnassignThread(Thread* thread, void* data)
607 {
608 CoreEntry* core = static_cast<CoreEntry*>(data);
609 ThreadData* threadData = thread->scheduler_data;
610
611 if (threadData->Core() == core && thread->pinned_to_cpu == 0)
612 threadData->UnassignCore();
613 }
614
615
CoreLoadHeap(int32 coreCount)616 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
617 :
618 MinMaxHeap<CoreEntry, int32>(coreCount)
619 {
620 }
621
622
623 void
Dump()624 CoreLoadHeap::Dump()
625 {
626 CoreEntry* entry = PeekMinimum();
627 while (entry) {
628 int32 key = GetKey(entry);
629
630 DebugDumper::DumpCoreLoadHeapEntry(entry);
631
632 RemoveMinimum();
633 sDebugCoreHeap.Insert(entry, key);
634
635 entry = PeekMinimum();
636 }
637
638 entry = sDebugCoreHeap.PeekMinimum();
639 while (entry) {
640 int32 key = GetKey(entry);
641 sDebugCoreHeap.RemoveMinimum();
642 Insert(entry, key);
643 entry = sDebugCoreHeap.PeekMinimum();
644 }
645 }
646
647
PackageEntry()648 PackageEntry::PackageEntry()
649 :
650 fIdleCoreCount(0),
651 fCoreCount(0)
652 {
653 B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
654 }
655
656
657 void
Init(int32 id)658 PackageEntry::Init(int32 id)
659 {
660 fPackageID = id;
661 }
662
663
664 void
AddIdleCore(CoreEntry * core)665 PackageEntry::AddIdleCore(CoreEntry* core)
666 {
667 fCoreCount++;
668 fIdleCoreCount++;
669 fIdleCores.Add(core);
670
671 if (fCoreCount == 1)
672 gIdlePackageList.Add(this);
673 }
674
675
676 void
RemoveIdleCore(CoreEntry * core)677 PackageEntry::RemoveIdleCore(CoreEntry* core)
678 {
679 fIdleCores.Remove(core);
680 fIdleCoreCount--;
681 fCoreCount--;
682
683 if (fCoreCount == 0)
684 gIdlePackageList.Remove(this);
685 }
686
687
688 /* static */ void
DumpCPURunQueue(CPUEntry * cpu)689 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
690 {
691 ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
692
693 if (iterator.HasNext()
694 && !thread_is_idle_thread(iterator.Next()->GetThread())) {
695 kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
696 cpu->fRunQueue.Dump();
697 }
698 }
699
700
701 /* static */ void
DumpCoreRunQueue(CoreEntry * core)702 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
703 {
704 core->fRunQueue.Dump();
705 }
706
707
708 /* static */ void
DumpCoreLoadHeapEntry(CoreEntry * entry)709 DebugDumper::DumpCoreLoadHeapEntry(CoreEntry* entry)
710 {
711 CoreThreadsData threadsData;
712 threadsData.fCore = entry;
713 threadsData.fLoad = 0;
714 thread_map(DebugDumper::_AnalyzeCoreThreads, &threadsData);
715
716 kprintf("%4" B_PRId32 " %11" B_PRId32 "%% %11" B_PRId32 "%% %11" B_PRId32
717 "%% %7" B_PRId32 " %5" B_PRIu32 "\n", entry->ID(), entry->fLoad / 10,
718 entry->fCurrentLoad / 10, threadsData.fLoad, entry->ThreadCount(),
719 entry->fLoadMeasurementEpoch);
720 }
721
722
723 /* static */ void
DumpIdleCoresInPackage(PackageEntry * package)724 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
725 {
726 kprintf("%-7" B_PRId32 " ", package->fPackageID);
727
728 DoublyLinkedList<CoreEntry>::ReverseIterator iterator
729 = package->fIdleCores.GetReverseIterator();
730 if (iterator.HasNext()) {
731 while (iterator.HasNext()) {
732 CoreEntry* coreEntry = iterator.Next();
733 kprintf("%" B_PRId32 "%s", coreEntry->ID(),
734 iterator.HasNext() ? ", " : "");
735 }
736 } else
737 kprintf("-");
738 kprintf("\n");
739 }
740
741
742 /* static */ void
_AnalyzeCoreThreads(Thread * thread,void * data)743 DebugDumper::_AnalyzeCoreThreads(Thread* thread, void* data)
744 {
745 CoreThreadsData* threadsData = static_cast<CoreThreadsData*>(data);
746 if (thread->scheduler_data->Core() == threadsData->fCore)
747 threadsData->fLoad += thread->scheduler_data->GetLoad();
748 }
749
750
751 static int
dump_run_queue(int,char **)752 dump_run_queue(int /* argc */, char** /* argv */)
753 {
754 int32 cpuCount = smp_get_num_cpus();
755 int32 coreCount = gCoreCount;
756
757 for (int32 i = 0; i < coreCount; i++) {
758 kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
759 DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
760 }
761
762 for (int32 i = 0; i < cpuCount; i++)
763 DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
764
765 return 0;
766 }
767
768
769 static int
dump_cpu_heap(int,char **)770 dump_cpu_heap(int /* argc */, char** /* argv */)
771 {
772 kprintf("core average_load current_load threads_load threads epoch\n");
773 gCoreLoadHeap.Dump();
774 kprintf("\n");
775 gCoreHighLoadHeap.Dump();
776
777 for (int32 i = 0; i < gCoreCount; i++) {
778 if (gCoreEntries[i].CPUCount() < 2)
779 continue;
780
781 kprintf("\nCore %" B_PRId32 " heap:\n", i);
782 gCoreEntries[i].CPUHeap()->Dump();
783 }
784
785 return 0;
786 }
787
788
789 static int
dump_idle_cores(int,char **)790 dump_idle_cores(int /* argc */, char** /* argv */)
791 {
792 kprintf("Idle packages:\n");
793 IdlePackageList::ReverseIterator idleIterator
794 = gIdlePackageList.GetReverseIterator();
795
796 if (idleIterator.HasNext()) {
797 kprintf("package cores\n");
798
799 while (idleIterator.HasNext())
800 DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
801 } else
802 kprintf("No idle packages.\n");
803
804 return 0;
805 }
806
807
init_debug_commands()808 void Scheduler::init_debug_commands()
809 {
810 new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
811 new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
812
813 add_debugger_command_etc("run_queue", &dump_run_queue,
814 "List threads in run queue", "\nLists threads in run queue", 0);
815 if (!gSingleCore) {
816 add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
817 "List CPUs in CPU priority heap",
818 "\nList CPUs in CPU priority heap", 0);
819 add_debugger_command_etc("idle_cores", &dump_idle_cores,
820 "List idle cores", "\nList idle cores", 0);
821 }
822 }
823
824