xref: /haiku/src/system/kernel/scheduler/scheduler_cpu.cpp (revision 5e96d7d537fbec23bad4ae9b4c8e7b02e769f0c6)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "scheduler_cpu.h"
8 
9 #include <util/AutoLock.h>
10 
11 #include <algorithm>
12 
13 #include "scheduler_thread.h"
14 
15 
16 namespace Scheduler {
17 
18 
19 CPUEntry* gCPUEntries;
20 
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
26 
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
31 
32 
33 }	// namespace Scheduler
34 
35 using namespace Scheduler;
36 
37 
38 class Scheduler::DebugDumper {
39 public:
40 	static	void		DumpCPURunQueue(CPUEntry* cpu);
41 	static	void		DumpCoreRunQueue(CoreEntry* core);
42 	static	void		DumpCoreLoadHeapEntry(CoreEntry* core);
43 	static	void		DumpIdleCoresInPackage(PackageEntry* package);
44 
45 private:
46 	struct CoreThreadsData {
47 			CoreEntry*	fCore;
48 			int32		fLoad;
49 	};
50 
51 	static	void		_AnalyzeCoreThreads(Thread* thread, void* data);
52 };
53 
54 
55 static CPUPriorityHeap sDebugCPUHeap;
56 static CoreLoadHeap sDebugCoreHeap;
57 
58 
59 void
60 ThreadRunQueue::Dump() const
61 {
62 	ThreadRunQueue::ConstIterator iterator = GetConstIterator();
63 	if (!iterator.HasNext())
64 		kprintf("Run queue is empty.\n");
65 	else {
66 		kprintf("thread      id      priority penalty  name\n");
67 		while (iterator.HasNext()) {
68 			ThreadData* threadData = iterator.Next();
69 			Thread* thread = threadData->GetThread();
70 
71 			kprintf("%p  %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
72 				thread, thread->id, thread->priority,
73 				thread->priority - threadData->GetEffectivePriority(),
74 				thread->name);
75 		}
76 	}
77 }
78 
79 
80 CPUEntry::CPUEntry()
81 	:
82 	fLoad(0),
83 	fMeasureActiveTime(0),
84 	fMeasureTime(0),
85 	fUpdateLoadEvent(false)
86 {
87 	B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
88 	B_INITIALIZE_SPINLOCK(&fQueueLock);
89 }
90 
91 
92 void
93 CPUEntry::Init(int32 id, CoreEntry* core)
94 {
95 	fCPUNumber = id;
96 	fCore = core;
97 }
98 
99 
100 void
101 CPUEntry::Start()
102 {
103 	fLoad = 0;
104 	fCore->AddCPU(this);
105 }
106 
107 
108 void
109 CPUEntry::Stop()
110 {
111 	cpu_ent* entry = &gCPU[fCPUNumber];
112 
113 	// get rid of irqs
114 	SpinLocker locker(entry->irqs_lock);
115 	irq_assignment* irq
116 		= (irq_assignment*)list_get_first_item(&entry->irqs);
117 	while (irq != NULL) {
118 		locker.Unlock();
119 
120 		assign_io_interrupt_to_cpu(irq->irq, -1);
121 
122 		locker.Lock();
123 		irq = (irq_assignment*)list_get_first_item(&entry->irqs);
124 	}
125 	locker.Unlock();
126 }
127 
128 
129 void
130 CPUEntry::PushFront(ThreadData* thread, int32 priority)
131 {
132 	SCHEDULER_ENTER_FUNCTION();
133 	fRunQueue.PushFront(thread, priority);
134 }
135 
136 
137 void
138 CPUEntry::PushBack(ThreadData* thread, int32 priority)
139 {
140 	SCHEDULER_ENTER_FUNCTION();
141 	fRunQueue.PushBack(thread, priority);
142 }
143 
144 
145 void
146 CPUEntry::Remove(ThreadData* thread)
147 {
148 	SCHEDULER_ENTER_FUNCTION();
149 	ASSERT(thread->IsEnqueued());
150 	thread->SetDequeued();
151 	fRunQueue.Remove(thread);
152 }
153 
154 
155 inline ThreadData*
156 CoreEntry::PeekThread() const
157 {
158 	SCHEDULER_ENTER_FUNCTION();
159 	return fRunQueue.PeekMaximum();
160 }
161 
162 
163 inline ThreadData*
164 CPUEntry::PeekThread() const
165 {
166 	SCHEDULER_ENTER_FUNCTION();
167 	return fRunQueue.PeekMaximum();
168 }
169 
170 
171 ThreadData*
172 CPUEntry::PeekIdleThread() const
173 {
174 	SCHEDULER_ENTER_FUNCTION();
175 	return fRunQueue.GetHead(B_IDLE_PRIORITY);
176 }
177 
178 
179 void
180 CPUEntry::UpdatePriority(int32 priority)
181 {
182 	SCHEDULER_ENTER_FUNCTION();
183 
184 	ASSERT(!gCPU[fCPUNumber].disabled);
185 
186 	int32 oldPriority = CPUPriorityHeap::GetKey(this);
187 	if (oldPriority == priority)
188 		return;
189 	fCore->CPUHeap()->ModifyKey(this, priority);
190 
191 	if (oldPriority == B_IDLE_PRIORITY)
192 		fCore->CPUWakesUp(this);
193 	else if (priority == B_IDLE_PRIORITY)
194 		fCore->CPUGoesIdle(this);
195 }
196 
197 
198 void
199 CPUEntry::ComputeLoad()
200 {
201 	SCHEDULER_ENTER_FUNCTION();
202 
203 	ASSERT(gTrackCPULoad);
204 	ASSERT(!gCPU[fCPUNumber].disabled);
205 	ASSERT(fCPUNumber == smp_get_current_cpu());
206 
207 	int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
208 			system_time());
209 	if (oldLoad < 0)
210 		return;
211 
212 	if (fLoad > kVeryHighLoad)
213 		gCurrentMode->rebalance_irqs(false);
214 }
215 
216 
217 ThreadData*
218 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
219 {
220 	SCHEDULER_ENTER_FUNCTION();
221 
222 	int32 oldPriority = -1;
223 	if (oldThread != NULL)
224 		oldPriority = oldThread->GetEffectivePriority();
225 
226 	CPURunQueueLocker cpuLocker(this);
227 
228 	ThreadData* pinnedThread = fRunQueue.PeekMaximum();
229 	int32 pinnedPriority = -1;
230 	if (pinnedThread != NULL)
231 		pinnedPriority = pinnedThread->GetEffectivePriority();
232 
233 	CoreRunQueueLocker coreLocker(fCore);
234 
235 	ThreadData* sharedThread = fCore->PeekThread();
236 	ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != NULL);
237 
238 	int32 sharedPriority = -1;
239 	if (sharedThread != NULL)
240 		sharedPriority = sharedThread->GetEffectivePriority();
241 
242 	int32 rest = std::max(pinnedPriority, sharedPriority);
243 	if (oldPriority > rest || (!putAtBack && oldPriority == rest))
244 		return oldThread;
245 
246 	if (sharedPriority > pinnedPriority) {
247 		fCore->Remove(sharedThread);
248 		return sharedThread;
249 	}
250 
251 	coreLocker.Unlock();
252 
253 	Remove(pinnedThread);
254 	return pinnedThread;
255 }
256 
257 
258 void
259 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
260 {
261 	SCHEDULER_ENTER_FUNCTION();
262 
263 	cpu_ent* cpuEntry = &gCPU[fCPUNumber];
264 
265 	Thread* oldThread = oldThreadData->GetThread();
266 	if (!thread_is_idle_thread(oldThread)) {
267 		bigtime_t active
268 			= (oldThread->kernel_time - cpuEntry->last_kernel_time)
269 				+ (oldThread->user_time - cpuEntry->last_user_time);
270 
271 		WriteSequentialLocker locker(cpuEntry->active_time_lock);
272 		cpuEntry->active_time += active;
273 		locker.Unlock();
274 
275 		fMeasureActiveTime += active;
276 		fCore->IncreaseActiveTime(active);
277 
278 		oldThreadData->UpdateActivity(active);
279 	}
280 
281 	if (gTrackCPULoad) {
282 		if (!cpuEntry->disabled)
283 			ComputeLoad();
284 		_RequestPerformanceLevel(nextThreadData);
285 	}
286 
287 	Thread* nextThread = nextThreadData->GetThread();
288 	if (!thread_is_idle_thread(nextThread)) {
289 		cpuEntry->last_kernel_time = nextThread->kernel_time;
290 		cpuEntry->last_user_time = nextThread->user_time;
291 
292 		nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
293 	}
294 }
295 
296 
297 void
298 CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
299 {
300 	cpu_ent* cpu = &gCPU[ID()];
301 
302 	if (!wasPreempted || fUpdateLoadEvent)
303 		cancel_timer(&cpu->quantum_timer);
304 	fUpdateLoadEvent = false;
305 
306 	if (!thread->IsIdle()) {
307 		bigtime_t quantum = thread->GetQuantumLeft();
308 		add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
309 			B_ONE_SHOT_RELATIVE_TIMER);
310 	} else if (gTrackCoreLoad) {
311 		add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
312 			kLoadMeasureInterval, B_ONE_SHOT_RELATIVE_TIMER);
313 		fUpdateLoadEvent = true;
314 	}
315 }
316 
317 
318 void
319 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
320 {
321 	SCHEDULER_ENTER_FUNCTION();
322 
323 	if (gCPU[fCPUNumber].disabled) {
324 		decrease_cpu_performance(kCPUPerformanceScaleMax);
325 		return;
326 	}
327 
328 	int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
329 	ASSERT(load >= 0 && load <= kMaxLoad);
330 
331 	if (load < kTargetLoad) {
332 		int32 delta = kTargetLoad - load;
333 
334 		delta *= kTargetLoad;
335 		delta /= kCPUPerformanceScaleMax;
336 
337 		decrease_cpu_performance(delta);
338 	} else {
339 		int32 delta = load - kTargetLoad;
340 		delta *= kMaxLoad - kTargetLoad;
341 		delta /= kCPUPerformanceScaleMax;
342 
343 		increase_cpu_performance(delta);
344 	}
345 }
346 
347 
348 /* static */ int32
349 CPUEntry::_RescheduleEvent(timer* /* unused */)
350 {
351 	get_cpu_struct()->invoke_scheduler = true;
352 	get_cpu_struct()->preempted = true;
353 	return B_HANDLED_INTERRUPT;
354 }
355 
356 
357 /* static */ int32
358 CPUEntry::_UpdateLoadEvent(timer* /* unused */)
359 {
360 	CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
361 	CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
362 	return B_HANDLED_INTERRUPT;
363 }
364 
365 
366 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
367 	:
368 	Heap<CPUEntry, int32>(cpuCount)
369 {
370 }
371 
372 
373 void
374 CPUPriorityHeap::Dump()
375 {
376 	kprintf("cpu priority load\n");
377 	CPUEntry* entry = PeekRoot();
378 	while (entry) {
379 		int32 cpu = entry->ID();
380 		int32 key = GetKey(entry);
381 		kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
382 			entry->GetLoad() / 10);
383 
384 		RemoveRoot();
385 		sDebugCPUHeap.Insert(entry, key);
386 
387 		entry = PeekRoot();
388 	}
389 
390 	entry = sDebugCPUHeap.PeekRoot();
391 	while (entry) {
392 		int32 key = GetKey(entry);
393 		sDebugCPUHeap.RemoveRoot();
394 		Insert(entry, key);
395 		entry = sDebugCPUHeap.PeekRoot();
396 	}
397 }
398 
399 
400 CoreEntry::CoreEntry()
401 	:
402 	fCPUCount(0),
403 	fIdleCPUCount(0),
404 	fThreadCount(0),
405 	fActiveTime(0),
406 	fLoad(0),
407 	fCurrentLoad(0),
408 	fLoadMeasurementEpoch(0),
409 	fHighLoad(false),
410 	fLastLoadUpdate(0)
411 {
412 	B_INITIALIZE_SPINLOCK(&fCPULock);
413 	B_INITIALIZE_SPINLOCK(&fQueueLock);
414 	B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
415 	B_INITIALIZE_RW_SPINLOCK(&fLoadLock);
416 }
417 
418 
419 void
420 CoreEntry::Init(int32 id, PackageEntry* package)
421 {
422 	fCoreID = id;
423 	fPackage = package;
424 }
425 
426 
427 void
428 CoreEntry::PushFront(ThreadData* thread, int32 priority)
429 {
430 	SCHEDULER_ENTER_FUNCTION();
431 
432 	fRunQueue.PushFront(thread, priority);
433 	atomic_add(&fThreadCount, 1);
434 }
435 
436 
437 void
438 CoreEntry::PushBack(ThreadData* thread, int32 priority)
439 {
440 	SCHEDULER_ENTER_FUNCTION();
441 
442 	fRunQueue.PushBack(thread, priority);
443 	atomic_add(&fThreadCount, 1);
444 }
445 
446 
447 void
448 CoreEntry::Remove(ThreadData* thread)
449 {
450 	SCHEDULER_ENTER_FUNCTION();
451 
452 	ASSERT(!thread->IsIdle());
453 
454 	ASSERT(thread->IsEnqueued());
455 	thread->SetDequeued();
456 
457 	fRunQueue.Remove(thread);
458 	atomic_add(&fThreadCount, -1);
459 }
460 
461 
462 void
463 CoreEntry::AddCPU(CPUEntry* cpu)
464 {
465 	ASSERT(fCPUCount >= 0);
466 	ASSERT(fIdleCPUCount >= 0);
467 
468 	fIdleCPUCount++;
469 	if (fCPUCount++ == 0) {
470 		// core has been reenabled
471 		fLoad = 0;
472 		fCurrentLoad = 0;
473 		fHighLoad = false;
474 		gCoreLoadHeap.Insert(this, 0);
475 
476 		fPackage->AddIdleCore(this);
477 	}
478 
479 	fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
480 }
481 
482 
483 void
484 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
485 {
486 	ASSERT(fCPUCount > 0);
487 	ASSERT(fIdleCPUCount > 0);
488 
489 	fIdleCPUCount--;
490 	if (--fCPUCount == 0) {
491 		// unassign threads
492 		thread_map(CoreEntry::_UnassignThread, this);
493 
494 		// core has been disabled
495 		if (fHighLoad) {
496 			gCoreHighLoadHeap.ModifyKey(this, -1);
497 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
498 			gCoreHighLoadHeap.RemoveMinimum();
499 		} else {
500 			gCoreLoadHeap.ModifyKey(this, -1);
501 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
502 			gCoreLoadHeap.RemoveMinimum();
503 		}
504 
505 		fPackage->RemoveIdleCore(this);
506 
507 		// get rid of threads
508 		while (fRunQueue.PeekMaximum() != NULL) {
509 			ThreadData* threadData = fRunQueue.PeekMaximum();
510 
511 			Remove(threadData);
512 
513 			ASSERT(threadData->Core() == NULL);
514 			threadPostProcessing(threadData);
515 		}
516 
517 		fThreadCount = 0;
518 	}
519 
520 	fCPUHeap.ModifyKey(cpu, -1);
521 	ASSERT(fCPUHeap.PeekRoot() == cpu);
522 	fCPUHeap.RemoveRoot();
523 
524 	ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
525 	ASSERT(fLoad >= 0);
526 }
527 
528 
529 void
530 CoreEntry::_UpdateLoad(bool forceUpdate)
531 {
532 	SCHEDULER_ENTER_FUNCTION();
533 
534 	if (fCPUCount <= 0)
535 		return;
536 
537 	bigtime_t now = system_time();
538 	bool intervalEnded = now >= kLoadMeasureInterval + fLastLoadUpdate;
539 
540 	if (!intervalEnded && !forceUpdate)
541 		return;
542 
543 	WriteSpinLocker coreLocker(gCoreHeapsLock);
544 
545 	int32 newKey = GetLoad();
546 	int32 oldKey = CoreLoadHeap::GetKey(this);
547 
548 	ASSERT(oldKey >= 0);
549 	ASSERT(newKey >= 0);
550 
551 	if (intervalEnded) {
552 		WriteSpinLocker locker(fLoadLock);
553 
554 		ASSERT(fCurrentLoad >= 0);
555 		ASSERT(fLoad >= fCurrentLoad);
556 
557 		fLoad = fCurrentLoad;
558 		fLoadMeasurementEpoch++;
559 		fLastLoadUpdate = now;
560 	}
561 
562 	if (oldKey == newKey)
563 		return;
564 
565 	if (newKey > kHighLoad) {
566 		if (!fHighLoad) {
567 			gCoreLoadHeap.ModifyKey(this, -1);
568 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
569 			gCoreLoadHeap.RemoveMinimum();
570 
571 			gCoreHighLoadHeap.Insert(this, newKey);
572 
573 			fHighLoad = true;
574 		} else
575 			gCoreHighLoadHeap.ModifyKey(this, newKey);
576 	} else if (newKey < kMediumLoad) {
577 		if (fHighLoad) {
578 			gCoreHighLoadHeap.ModifyKey(this, -1);
579 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
580 			gCoreHighLoadHeap.RemoveMinimum();
581 
582 			gCoreLoadHeap.Insert(this, newKey);
583 
584 			fHighLoad = false;
585 		} else
586 			gCoreLoadHeap.ModifyKey(this, newKey);
587 	} else {
588 		if (fHighLoad)
589 			gCoreHighLoadHeap.ModifyKey(this, newKey);
590 		else
591 			gCoreLoadHeap.ModifyKey(this, newKey);
592 	}
593 }
594 
595 
596 /* static */ void
597 CoreEntry::_UnassignThread(Thread* thread, void* data)
598 {
599 	CoreEntry* core = static_cast<CoreEntry*>(data);
600 	ThreadData* threadData = thread->scheduler_data;
601 
602 	if (threadData->Core() == core && thread->pinned_to_cpu == 0)
603 		threadData->UnassignCore();
604 }
605 
606 
607 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
608 	:
609 	MinMaxHeap<CoreEntry, int32>(coreCount)
610 {
611 }
612 
613 
614 void
615 CoreLoadHeap::Dump()
616 {
617 	CoreEntry* entry = PeekMinimum();
618 	while (entry) {
619 		int32 key = GetKey(entry);
620 
621 		DebugDumper::DumpCoreLoadHeapEntry(entry);
622 
623 		RemoveMinimum();
624 		sDebugCoreHeap.Insert(entry, key);
625 
626 		entry = PeekMinimum();
627 	}
628 
629 	entry = sDebugCoreHeap.PeekMinimum();
630 	while (entry) {
631 		int32 key = GetKey(entry);
632 		sDebugCoreHeap.RemoveMinimum();
633 		Insert(entry, key);
634 		entry = sDebugCoreHeap.PeekMinimum();
635 	}
636 }
637 
638 
639 PackageEntry::PackageEntry()
640 	:
641 	fIdleCoreCount(0),
642 	fCoreCount(0)
643 {
644 	B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
645 }
646 
647 
648 void
649 PackageEntry::Init(int32 id)
650 {
651 	fPackageID = id;
652 }
653 
654 
655 void
656 PackageEntry::AddIdleCore(CoreEntry* core)
657 {
658 	fCoreCount++;
659 	fIdleCoreCount++;
660 	fIdleCores.Add(core);
661 
662 	if (fCoreCount == 1)
663 		gIdlePackageList.Add(this);
664 }
665 
666 
667 void
668 PackageEntry::RemoveIdleCore(CoreEntry* core)
669 {
670 	fIdleCores.Remove(core);
671 	fIdleCoreCount--;
672 	fCoreCount--;
673 
674 	if (fCoreCount == 0)
675 		gIdlePackageList.Remove(this);
676 }
677 
678 
679 /* static */ void
680 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
681 {
682 	ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
683 
684 	if (iterator.HasNext()
685 		&& !thread_is_idle_thread(iterator.Next()->GetThread())) {
686 		kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
687 		cpu->fRunQueue.Dump();
688 	}
689 }
690 
691 
692 /* static */ void
693 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
694 {
695 	core->fRunQueue.Dump();
696 }
697 
698 
699 /* static */ void
700 DebugDumper::DumpCoreLoadHeapEntry(CoreEntry* entry)
701 {
702 	CoreThreadsData threadsData;
703 	threadsData.fCore = entry;
704 	threadsData.fLoad = 0;
705 	thread_map(DebugDumper::_AnalyzeCoreThreads, &threadsData);
706 
707 	kprintf("%4" B_PRId32 " %11" B_PRId32 "%% %11" B_PRId32 "%% %11" B_PRId32
708 		"%% %7" B_PRId32 " %5" B_PRIu32 "\n", entry->ID(), entry->fLoad / 10,
709 		entry->fCurrentLoad / 10, threadsData.fLoad, entry->ThreadCount(),
710 		entry->fLoadMeasurementEpoch);
711 }
712 
713 
714 /* static */ void
715 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
716 {
717 	kprintf("%-7" B_PRId32 " ", package->fPackageID);
718 
719 	DoublyLinkedList<CoreEntry>::ReverseIterator iterator
720 		= package->fIdleCores.GetReverseIterator();
721 	if (iterator.HasNext()) {
722 		while (iterator.HasNext()) {
723 			CoreEntry* coreEntry = iterator.Next();
724 			kprintf("%" B_PRId32 "%s", coreEntry->ID(),
725 				iterator.HasNext() ? ", " : "");
726 		}
727 	} else
728 		kprintf("-");
729 	kprintf("\n");
730 }
731 
732 
733 /* static */ void
734 DebugDumper::_AnalyzeCoreThreads(Thread* thread, void* data)
735 {
736 	CoreThreadsData* threadsData = static_cast<CoreThreadsData*>(data);
737 	if (thread->scheduler_data->Core() == threadsData->fCore)
738 		threadsData->fLoad += thread->scheduler_data->GetLoad();
739 }
740 
741 
742 static int
743 dump_run_queue(int /* argc */, char** /* argv */)
744 {
745 	int32 cpuCount = smp_get_num_cpus();
746 	int32 coreCount = gCoreCount;
747 
748 	for (int32 i = 0; i < coreCount; i++) {
749 		kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
750 		DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
751 	}
752 
753 	for (int32 i = 0; i < cpuCount; i++)
754 		DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
755 
756 	return 0;
757 }
758 
759 
760 static int
761 dump_cpu_heap(int /* argc */, char** /* argv */)
762 {
763 	kprintf("core average_load current_load threads_load threads epoch\n");
764 	gCoreLoadHeap.Dump();
765 	kprintf("\n");
766 	gCoreHighLoadHeap.Dump();
767 
768 	for (int32 i = 0; i < gCoreCount; i++) {
769 		if (gCoreEntries[i].CPUCount() < 2)
770 			continue;
771 
772 		kprintf("\nCore %" B_PRId32 " heap:\n", i);
773 		gCoreEntries[i].CPUHeap()->Dump();
774 	}
775 
776 	return 0;
777 }
778 
779 
780 static int
781 dump_idle_cores(int /* argc */, char** /* argv */)
782 {
783 	kprintf("Idle packages:\n");
784 	IdlePackageList::ReverseIterator idleIterator
785 		= gIdlePackageList.GetReverseIterator();
786 
787 	if (idleIterator.HasNext()) {
788 		kprintf("package cores\n");
789 
790 		while (idleIterator.HasNext())
791 			DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
792 	} else
793 		kprintf("No idle packages.\n");
794 
795 	return 0;
796 }
797 
798 
799 void Scheduler::init_debug_commands()
800 {
801 	new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
802 	new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
803 
804 	add_debugger_command_etc("run_queue", &dump_run_queue,
805 		"List threads in run queue", "\nLists threads in run queue", 0);
806 	if (!gSingleCore) {
807 		add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
808 			"List CPUs in CPU priority heap",
809 			"\nList CPUs in CPU priority heap", 0);
810 		add_debugger_command_etc("idle_cores", &dump_idle_cores,
811 			"List idle cores", "\nList idle cores", 0);
812 	}
813 }
814 
815