xref: /haiku/src/system/kernel/scheduler/scheduler_cpu.cpp (revision 99d1318ec02694fc520a0dc38ae38565db7e8c3c)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "scheduler_cpu.h"
8 
9 #include <util/AutoLock.h>
10 
11 #include <algorithm>
12 
13 #include "scheduler_thread.h"
14 
15 
16 namespace Scheduler {
17 
18 
19 CPUEntry* gCPUEntries;
20 
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
26 
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
31 
32 
33 }	// namespace Scheduler
34 
35 using namespace Scheduler;
36 
37 
38 class Scheduler::DebugDumper {
39 public:
40 	static	void		DumpCPURunQueue(CPUEntry* cpu);
41 	static	void		DumpCoreRunQueue(CoreEntry* core);
42 	static	void		DumpCoreLoadHeapEntry(CoreEntry* core);
43 	static	void		DumpIdleCoresInPackage(PackageEntry* package);
44 
45 private:
46 	struct CoreThreadsData {
47 			CoreEntry*	fCore;
48 			int32		fLoad;
49 	};
50 
51 	static	void		_AnalyzeCoreThreads(Thread* thread, void* data);
52 };
53 
54 
55 static CPUPriorityHeap sDebugCPUHeap;
56 static CoreLoadHeap sDebugCoreHeap;
57 
58 
59 void
60 ThreadRunQueue::Dump() const
61 {
62 	ThreadRunQueue::ConstIterator iterator = GetConstIterator();
63 	if (!iterator.HasNext())
64 		kprintf("Run queue is empty.\n");
65 	else {
66 		kprintf("thread      id      priority penalty  name\n");
67 		while (iterator.HasNext()) {
68 			ThreadData* threadData = iterator.Next();
69 			Thread* thread = threadData->GetThread();
70 
71 			kprintf("%p  %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
72 				thread, thread->id, thread->priority,
73 				thread->priority - threadData->GetEffectivePriority(),
74 				thread->name);
75 		}
76 	}
77 }
78 
79 
80 CPUEntry::CPUEntry()
81 	:
82 	fLoad(0),
83 	fMeasureActiveTime(0),
84 	fMeasureTime(0),
85 	fUpdateLoadEvent(false)
86 {
87 	B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
88 	B_INITIALIZE_SPINLOCK(&fQueueLock);
89 }
90 
91 
92 void
93 CPUEntry::Init(int32 id, CoreEntry* core)
94 {
95 	fCPUNumber = id;
96 	fCore = core;
97 }
98 
99 
100 void
101 CPUEntry::Start()
102 {
103 	fLoad = 0;
104 	fCore->AddCPU(this);
105 }
106 
107 
108 void
109 CPUEntry::Stop()
110 {
111 	cpu_ent* entry = &gCPU[fCPUNumber];
112 
113 	// get rid of irqs
114 	SpinLocker locker(entry->irqs_lock);
115 	irq_assignment* irq
116 		= (irq_assignment*)list_get_first_item(&entry->irqs);
117 	while (irq != NULL) {
118 		locker.Unlock();
119 
120 		assign_io_interrupt_to_cpu(irq->irq, -1);
121 
122 		locker.Lock();
123 		irq = (irq_assignment*)list_get_first_item(&entry->irqs);
124 	}
125 	locker.Unlock();
126 }
127 
128 
129 void
130 CPUEntry::PushFront(ThreadData* thread, int32 priority)
131 {
132 	SCHEDULER_ENTER_FUNCTION();
133 	fRunQueue.PushFront(thread, priority);
134 }
135 
136 
137 void
138 CPUEntry::PushBack(ThreadData* thread, int32 priority)
139 {
140 	SCHEDULER_ENTER_FUNCTION();
141 	fRunQueue.PushBack(thread, priority);
142 }
143 
144 
145 void
146 CPUEntry::Remove(ThreadData* thread)
147 {
148 	SCHEDULER_ENTER_FUNCTION();
149 	ASSERT(thread->IsEnqueued());
150 	thread->SetDequeued();
151 	fRunQueue.Remove(thread);
152 }
153 
154 
155 ThreadData*
156 CoreEntry::PeekThread() const
157 {
158 	SCHEDULER_ENTER_FUNCTION();
159 	return fRunQueue.PeekMaximum();
160 }
161 
162 
163 ThreadData*
164 CPUEntry::PeekThread() const
165 {
166 	SCHEDULER_ENTER_FUNCTION();
167 	return fRunQueue.PeekMaximum();
168 }
169 
170 
171 ThreadData*
172 CPUEntry::PeekIdleThread() const
173 {
174 	SCHEDULER_ENTER_FUNCTION();
175 	return fRunQueue.GetHead(B_IDLE_PRIORITY);
176 }
177 
178 
179 void
180 CPUEntry::UpdatePriority(int32 priority)
181 {
182 	SCHEDULER_ENTER_FUNCTION();
183 
184 	ASSERT(!gCPU[fCPUNumber].disabled);
185 
186 	int32 oldPriority = CPUPriorityHeap::GetKey(this);
187 	if (oldPriority == priority)
188 		return;
189 	fCore->CPUHeap()->ModifyKey(this, priority);
190 
191 	if (oldPriority == B_IDLE_PRIORITY)
192 		fCore->CPUWakesUp(this);
193 	else if (priority == B_IDLE_PRIORITY)
194 		fCore->CPUGoesIdle(this);
195 }
196 
197 
198 void
199 CPUEntry::ComputeLoad()
200 {
201 	SCHEDULER_ENTER_FUNCTION();
202 
203 	ASSERT(gTrackCPULoad);
204 	ASSERT(!gCPU[fCPUNumber].disabled);
205 	ASSERT(fCPUNumber == smp_get_current_cpu());
206 
207 	int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
208 			system_time());
209 	if (oldLoad < 0)
210 		return;
211 
212 	if (fLoad > kVeryHighLoad)
213 		gCurrentMode->rebalance_irqs(false);
214 }
215 
216 
217 ThreadData*
218 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
219 {
220 	SCHEDULER_ENTER_FUNCTION();
221 
222 	int32 oldPriority = -1;
223 	if (oldThread != NULL)
224 		oldPriority = oldThread->GetEffectivePriority();
225 
226 	CPURunQueueLocker cpuLocker(this);
227 
228 	ThreadData* pinnedThread = fRunQueue.PeekMaximum();
229 	int32 pinnedPriority = -1;
230 	if (pinnedThread != NULL)
231 		pinnedPriority = pinnedThread->GetEffectivePriority();
232 
233 	CoreRunQueueLocker coreLocker(fCore);
234 
235 	ThreadData* sharedThread = fCore->PeekThread();
236 	ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != NULL);
237 
238 	int32 sharedPriority = -1;
239 	if (sharedThread != NULL)
240 		sharedPriority = sharedThread->GetEffectivePriority();
241 
242 	int32 rest = std::max(pinnedPriority, sharedPriority);
243 	if (oldPriority > rest || (!putAtBack && oldPriority == rest))
244 		return oldThread;
245 
246 	if (sharedPriority > pinnedPriority) {
247 		fCore->Remove(sharedThread);
248 		return sharedThread;
249 	}
250 
251 	coreLocker.Unlock();
252 
253 	Remove(pinnedThread);
254 	return pinnedThread;
255 }
256 
257 
258 void
259 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
260 {
261 	SCHEDULER_ENTER_FUNCTION();
262 
263 	cpu_ent* cpuEntry = &gCPU[fCPUNumber];
264 
265 	Thread* oldThread = oldThreadData->GetThread();
266 	if (!thread_is_idle_thread(oldThread)) {
267 		bigtime_t active
268 			= (oldThread->kernel_time - cpuEntry->last_kernel_time)
269 				+ (oldThread->user_time - cpuEntry->last_user_time);
270 
271 		WriteSequentialLocker locker(cpuEntry->active_time_lock);
272 		cpuEntry->active_time += active;
273 		locker.Unlock();
274 
275 		fMeasureActiveTime += active;
276 		fCore->IncreaseActiveTime(active);
277 
278 		oldThreadData->UpdateActivity(active);
279 	}
280 
281 	if (gTrackCPULoad) {
282 		if (!cpuEntry->disabled)
283 			ComputeLoad();
284 		_RequestPerformanceLevel(nextThreadData);
285 	}
286 
287 	Thread* nextThread = nextThreadData->GetThread();
288 	if (!thread_is_idle_thread(nextThread)) {
289 		cpuEntry->last_kernel_time = nextThread->kernel_time;
290 		cpuEntry->last_user_time = nextThread->user_time;
291 
292 		nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
293 	}
294 }
295 
296 
297 void
298 CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
299 {
300 	cpu_ent* cpu = &gCPU[ID()];
301 
302 	if (!wasPreempted || fUpdateLoadEvent)
303 		cancel_timer(&cpu->quantum_timer);
304 	fUpdateLoadEvent = false;
305 
306 	if (!thread->IsIdle()) {
307 		bigtime_t quantum = thread->GetQuantumLeft();
308 		add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
309 			B_ONE_SHOT_RELATIVE_TIMER);
310 	} else if (gTrackCoreLoad) {
311 		add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
312 			kLoadMeasureInterval * 2, B_ONE_SHOT_RELATIVE_TIMER);
313 		fUpdateLoadEvent = true;
314 	}
315 }
316 
317 
318 void
319 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
320 {
321 	SCHEDULER_ENTER_FUNCTION();
322 
323 	if (gCPU[fCPUNumber].disabled) {
324 		decrease_cpu_performance(kCPUPerformanceScaleMax);
325 		return;
326 	}
327 
328 	int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
329 	ASSERT_PRINT(load >= 0 && load <= kMaxLoad, "load is out of range %"
330 		B_PRId32 " (max of %" B_PRId32 " %" B_PRId32 ")", load,
331 		threadData->GetLoad(), fCore->GetLoad());
332 
333 	if (load < kTargetLoad) {
334 		int32 delta = kTargetLoad - load;
335 
336 		delta *= kTargetLoad;
337 		delta /= kCPUPerformanceScaleMax;
338 
339 		decrease_cpu_performance(delta);
340 	} else {
341 		int32 delta = load - kTargetLoad;
342 		delta *= kMaxLoad - kTargetLoad;
343 		delta /= kCPUPerformanceScaleMax;
344 
345 		increase_cpu_performance(delta);
346 	}
347 }
348 
349 
350 /* static */ int32
351 CPUEntry::_RescheduleEvent(timer* /* unused */)
352 {
353 	get_cpu_struct()->invoke_scheduler = true;
354 	get_cpu_struct()->preempted = true;
355 	return B_HANDLED_INTERRUPT;
356 }
357 
358 
359 /* static */ int32
360 CPUEntry::_UpdateLoadEvent(timer* /* unused */)
361 {
362 	CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
363 	CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
364 	return B_HANDLED_INTERRUPT;
365 }
366 
367 
368 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
369 	:
370 	Heap<CPUEntry, int32>(cpuCount)
371 {
372 }
373 
374 
375 void
376 CPUPriorityHeap::Dump()
377 {
378 	kprintf("cpu priority load\n");
379 	CPUEntry* entry = PeekRoot();
380 	while (entry) {
381 		int32 cpu = entry->ID();
382 		int32 key = GetKey(entry);
383 		kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
384 			entry->GetLoad() / 10);
385 
386 		RemoveRoot();
387 		sDebugCPUHeap.Insert(entry, key);
388 
389 		entry = PeekRoot();
390 	}
391 
392 	entry = sDebugCPUHeap.PeekRoot();
393 	while (entry) {
394 		int32 key = GetKey(entry);
395 		sDebugCPUHeap.RemoveRoot();
396 		Insert(entry, key);
397 		entry = sDebugCPUHeap.PeekRoot();
398 	}
399 }
400 
401 
402 CoreEntry::CoreEntry()
403 	:
404 	fCPUCount(0),
405 	fIdleCPUCount(0),
406 	fThreadCount(0),
407 	fActiveTime(0),
408 	fLoad(0),
409 	fCurrentLoad(0),
410 	fLoadMeasurementEpoch(0),
411 	fHighLoad(false),
412 	fLastLoadUpdate(0)
413 {
414 	B_INITIALIZE_SPINLOCK(&fCPULock);
415 	B_INITIALIZE_SPINLOCK(&fQueueLock);
416 	B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
417 	B_INITIALIZE_RW_SPINLOCK(&fLoadLock);
418 }
419 
420 
421 void
422 CoreEntry::Init(int32 id, PackageEntry* package)
423 {
424 	fCoreID = id;
425 	fPackage = package;
426 }
427 
428 
429 void
430 CoreEntry::PushFront(ThreadData* thread, int32 priority)
431 {
432 	SCHEDULER_ENTER_FUNCTION();
433 
434 	fRunQueue.PushFront(thread, priority);
435 	atomic_add(&fThreadCount, 1);
436 }
437 
438 
439 void
440 CoreEntry::PushBack(ThreadData* thread, int32 priority)
441 {
442 	SCHEDULER_ENTER_FUNCTION();
443 
444 	fRunQueue.PushBack(thread, priority);
445 	atomic_add(&fThreadCount, 1);
446 }
447 
448 
449 void
450 CoreEntry::Remove(ThreadData* thread)
451 {
452 	SCHEDULER_ENTER_FUNCTION();
453 
454 	ASSERT(!thread->IsIdle());
455 
456 	ASSERT(thread->IsEnqueued());
457 	thread->SetDequeued();
458 
459 	fRunQueue.Remove(thread);
460 	atomic_add(&fThreadCount, -1);
461 }
462 
463 
464 void
465 CoreEntry::AddCPU(CPUEntry* cpu)
466 {
467 	ASSERT(fCPUCount >= 0);
468 	ASSERT(fIdleCPUCount >= 0);
469 
470 	fIdleCPUCount++;
471 	if (fCPUCount++ == 0) {
472 		// core has been reenabled
473 		fLoad = 0;
474 		fCurrentLoad = 0;
475 		fHighLoad = false;
476 		gCoreLoadHeap.Insert(this, 0);
477 
478 		fPackage->AddIdleCore(this);
479 	}
480 
481 	fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
482 }
483 
484 
485 void
486 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
487 {
488 	ASSERT(fCPUCount > 0);
489 	ASSERT(fIdleCPUCount > 0);
490 
491 	fIdleCPUCount--;
492 	if (--fCPUCount == 0) {
493 		// unassign threads
494 		thread_map(CoreEntry::_UnassignThread, this);
495 
496 		// core has been disabled
497 		if (fHighLoad) {
498 			gCoreHighLoadHeap.ModifyKey(this, -1);
499 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
500 			gCoreHighLoadHeap.RemoveMinimum();
501 		} else {
502 			gCoreLoadHeap.ModifyKey(this, -1);
503 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
504 			gCoreLoadHeap.RemoveMinimum();
505 		}
506 
507 		fPackage->RemoveIdleCore(this);
508 
509 		// get rid of threads
510 		while (fRunQueue.PeekMaximum() != NULL) {
511 			ThreadData* threadData = fRunQueue.PeekMaximum();
512 
513 			Remove(threadData);
514 
515 			ASSERT(threadData->Core() == NULL);
516 			threadPostProcessing(threadData);
517 		}
518 
519 		fThreadCount = 0;
520 	}
521 
522 	fCPUHeap.ModifyKey(cpu, -1);
523 	ASSERT(fCPUHeap.PeekRoot() == cpu);
524 	fCPUHeap.RemoveRoot();
525 
526 	ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
527 	ASSERT(fLoad >= 0);
528 }
529 
530 
531 void
532 CoreEntry::_UpdateLoad(bool forceUpdate)
533 {
534 	SCHEDULER_ENTER_FUNCTION();
535 
536 	if (fCPUCount <= 0)
537 		return;
538 
539 	bigtime_t now = system_time();
540 	bool intervalEnded = now >= kLoadMeasureInterval + fLastLoadUpdate;
541 	bool intervalSkipped = now >= kLoadMeasureInterval * 2 + fLastLoadUpdate;
542 
543 	if (!intervalEnded && !forceUpdate)
544 		return;
545 
546 	WriteSpinLocker coreLocker(gCoreHeapsLock);
547 
548 	int32 newKey;
549 	if (intervalEnded) {
550 		WriteSpinLocker locker(fLoadLock);
551 
552 		newKey = intervalSkipped ? fCurrentLoad : GetLoad();
553 
554 		ASSERT(fCurrentLoad >= 0);
555 		ASSERT(fLoad >= fCurrentLoad);
556 
557 		fLoad = fCurrentLoad;
558 		fLoadMeasurementEpoch++;
559 		fLastLoadUpdate = now;
560 	} else
561 		newKey = GetLoad();
562 
563 	int32 oldKey = CoreLoadHeap::GetKey(this);
564 
565 	ASSERT(oldKey >= 0);
566 	ASSERT(newKey >= 0);
567 
568 	if (oldKey == newKey)
569 		return;
570 
571 	if (newKey > kHighLoad) {
572 		if (!fHighLoad) {
573 			gCoreLoadHeap.ModifyKey(this, -1);
574 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
575 			gCoreLoadHeap.RemoveMinimum();
576 
577 			gCoreHighLoadHeap.Insert(this, newKey);
578 
579 			fHighLoad = true;
580 		} else
581 			gCoreHighLoadHeap.ModifyKey(this, newKey);
582 	} else if (newKey < kMediumLoad) {
583 		if (fHighLoad) {
584 			gCoreHighLoadHeap.ModifyKey(this, -1);
585 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
586 			gCoreHighLoadHeap.RemoveMinimum();
587 
588 			gCoreLoadHeap.Insert(this, newKey);
589 
590 			fHighLoad = false;
591 		} else
592 			gCoreLoadHeap.ModifyKey(this, newKey);
593 	} else {
594 		if (fHighLoad)
595 			gCoreHighLoadHeap.ModifyKey(this, newKey);
596 		else
597 			gCoreLoadHeap.ModifyKey(this, newKey);
598 	}
599 }
600 
601 
602 /* static */ void
603 CoreEntry::_UnassignThread(Thread* thread, void* data)
604 {
605 	CoreEntry* core = static_cast<CoreEntry*>(data);
606 	ThreadData* threadData = thread->scheduler_data;
607 
608 	if (threadData->Core() == core && thread->pinned_to_cpu == 0)
609 		threadData->UnassignCore();
610 }
611 
612 
613 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
614 	:
615 	MinMaxHeap<CoreEntry, int32>(coreCount)
616 {
617 }
618 
619 
620 void
621 CoreLoadHeap::Dump()
622 {
623 	CoreEntry* entry = PeekMinimum();
624 	while (entry) {
625 		int32 key = GetKey(entry);
626 
627 		DebugDumper::DumpCoreLoadHeapEntry(entry);
628 
629 		RemoveMinimum();
630 		sDebugCoreHeap.Insert(entry, key);
631 
632 		entry = PeekMinimum();
633 	}
634 
635 	entry = sDebugCoreHeap.PeekMinimum();
636 	while (entry) {
637 		int32 key = GetKey(entry);
638 		sDebugCoreHeap.RemoveMinimum();
639 		Insert(entry, key);
640 		entry = sDebugCoreHeap.PeekMinimum();
641 	}
642 }
643 
644 
645 PackageEntry::PackageEntry()
646 	:
647 	fIdleCoreCount(0),
648 	fCoreCount(0)
649 {
650 	B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
651 }
652 
653 
654 void
655 PackageEntry::Init(int32 id)
656 {
657 	fPackageID = id;
658 }
659 
660 
661 void
662 PackageEntry::AddIdleCore(CoreEntry* core)
663 {
664 	fCoreCount++;
665 	fIdleCoreCount++;
666 	fIdleCores.Add(core);
667 
668 	if (fCoreCount == 1)
669 		gIdlePackageList.Add(this);
670 }
671 
672 
673 void
674 PackageEntry::RemoveIdleCore(CoreEntry* core)
675 {
676 	fIdleCores.Remove(core);
677 	fIdleCoreCount--;
678 	fCoreCount--;
679 
680 	if (fCoreCount == 0)
681 		gIdlePackageList.Remove(this);
682 }
683 
684 
685 /* static */ void
686 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
687 {
688 	ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
689 
690 	if (iterator.HasNext()
691 		&& !thread_is_idle_thread(iterator.Next()->GetThread())) {
692 		kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
693 		cpu->fRunQueue.Dump();
694 	}
695 }
696 
697 
698 /* static */ void
699 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
700 {
701 	core->fRunQueue.Dump();
702 }
703 
704 
705 /* static */ void
706 DebugDumper::DumpCoreLoadHeapEntry(CoreEntry* entry)
707 {
708 	CoreThreadsData threadsData;
709 	threadsData.fCore = entry;
710 	threadsData.fLoad = 0;
711 	thread_map(DebugDumper::_AnalyzeCoreThreads, &threadsData);
712 
713 	kprintf("%4" B_PRId32 " %11" B_PRId32 "%% %11" B_PRId32 "%% %11" B_PRId32
714 		"%% %7" B_PRId32 " %5" B_PRIu32 "\n", entry->ID(), entry->fLoad / 10,
715 		entry->fCurrentLoad / 10, threadsData.fLoad, entry->ThreadCount(),
716 		entry->fLoadMeasurementEpoch);
717 }
718 
719 
720 /* static */ void
721 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
722 {
723 	kprintf("%-7" B_PRId32 " ", package->fPackageID);
724 
725 	DoublyLinkedList<CoreEntry>::ReverseIterator iterator
726 		= package->fIdleCores.GetReverseIterator();
727 	if (iterator.HasNext()) {
728 		while (iterator.HasNext()) {
729 			CoreEntry* coreEntry = iterator.Next();
730 			kprintf("%" B_PRId32 "%s", coreEntry->ID(),
731 				iterator.HasNext() ? ", " : "");
732 		}
733 	} else
734 		kprintf("-");
735 	kprintf("\n");
736 }
737 
738 
739 /* static */ void
740 DebugDumper::_AnalyzeCoreThreads(Thread* thread, void* data)
741 {
742 	CoreThreadsData* threadsData = static_cast<CoreThreadsData*>(data);
743 	if (thread->scheduler_data->Core() == threadsData->fCore)
744 		threadsData->fLoad += thread->scheduler_data->GetLoad();
745 }
746 
747 
748 static int
749 dump_run_queue(int /* argc */, char** /* argv */)
750 {
751 	int32 cpuCount = smp_get_num_cpus();
752 	int32 coreCount = gCoreCount;
753 
754 	for (int32 i = 0; i < coreCount; i++) {
755 		kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
756 		DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
757 	}
758 
759 	for (int32 i = 0; i < cpuCount; i++)
760 		DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
761 
762 	return 0;
763 }
764 
765 
766 static int
767 dump_cpu_heap(int /* argc */, char** /* argv */)
768 {
769 	kprintf("core average_load current_load threads_load threads epoch\n");
770 	gCoreLoadHeap.Dump();
771 	kprintf("\n");
772 	gCoreHighLoadHeap.Dump();
773 
774 	for (int32 i = 0; i < gCoreCount; i++) {
775 		if (gCoreEntries[i].CPUCount() < 2)
776 			continue;
777 
778 		kprintf("\nCore %" B_PRId32 " heap:\n", i);
779 		gCoreEntries[i].CPUHeap()->Dump();
780 	}
781 
782 	return 0;
783 }
784 
785 
786 static int
787 dump_idle_cores(int /* argc */, char** /* argv */)
788 {
789 	kprintf("Idle packages:\n");
790 	IdlePackageList::ReverseIterator idleIterator
791 		= gIdlePackageList.GetReverseIterator();
792 
793 	if (idleIterator.HasNext()) {
794 		kprintf("package cores\n");
795 
796 		while (idleIterator.HasNext())
797 			DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
798 	} else
799 		kprintf("No idle packages.\n");
800 
801 	return 0;
802 }
803 
804 
805 void Scheduler::init_debug_commands()
806 {
807 	new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
808 	new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
809 
810 	add_debugger_command_etc("run_queue", &dump_run_queue,
811 		"List threads in run queue", "\nLists threads in run queue", 0);
812 	if (!gSingleCore) {
813 		add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
814 			"List CPUs in CPU priority heap",
815 			"\nList CPUs in CPU priority heap", 0);
816 		add_debugger_command_etc("idle_cores", &dump_idle_cores,
817 			"List idle cores", "\nList idle cores", 0);
818 	}
819 }
820 
821