xref: /haiku/src/system/kernel/scheduler/scheduler_cpu.cpp (revision c075da70b67416e74b505171d06e4c60db8a8a87)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "scheduler_cpu.h"
8 
9 #include <util/AutoLock.h>
10 
11 #include <algorithm>
12 
13 #include "scheduler_thread.h"
14 
15 
16 namespace Scheduler {
17 
18 
19 CPUEntry* gCPUEntries;
20 
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
26 
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
31 
32 
33 }	// namespace Scheduler
34 
35 using namespace Scheduler;
36 
37 
38 class Scheduler::DebugDumper {
39 public:
40 	static	void		DumpCPURunQueue(CPUEntry* cpu);
41 	static	void		DumpCoreRunQueue(CoreEntry* core);
42 	static	void		DumpCoreLoadHeapEntry(CoreEntry* core);
43 	static	void		DumpIdleCoresInPackage(PackageEntry* package);
44 
45 private:
46 	struct CoreThreadsData {
47 			CoreEntry*	fCore;
48 			int32		fLoad;
49 	};
50 
51 	static	void		_AnalyzeCoreThreads(Thread* thread, void* data);
52 };
53 
54 
55 static CPUPriorityHeap sDebugCPUHeap;
56 static CoreLoadHeap sDebugCoreHeap;
57 
58 
59 void
60 ThreadRunQueue::Dump() const
61 {
62 	ThreadRunQueue::ConstIterator iterator = GetConstIterator();
63 	if (!iterator.HasNext())
64 		kprintf("Run queue is empty.\n");
65 	else {
66 		kprintf("thread      id      priority penalty  name\n");
67 		while (iterator.HasNext()) {
68 			ThreadData* threadData = iterator.Next();
69 			Thread* thread = threadData->GetThread();
70 
71 			kprintf("%p  %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
72 				thread, thread->id, thread->priority,
73 				thread->priority - threadData->GetEffectivePriority(),
74 				thread->name);
75 		}
76 	}
77 }
78 
79 
80 CPUEntry::CPUEntry()
81 	:
82 	fLoad(0),
83 	fMeasureActiveTime(0),
84 	fMeasureTime(0),
85 	fUpdateLoadEvent(false)
86 {
87 	B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
88 	B_INITIALIZE_SPINLOCK(&fQueueLock);
89 }
90 
91 
92 void
93 CPUEntry::Init(int32 id, CoreEntry* core)
94 {
95 	fCPUNumber = id;
96 	fCore = core;
97 }
98 
99 
100 void
101 CPUEntry::Start()
102 {
103 	fLoad = 0;
104 	fCore->AddCPU(this);
105 }
106 
107 
108 void
109 CPUEntry::Stop()
110 {
111 	cpu_ent* entry = &gCPU[fCPUNumber];
112 
113 	// get rid of irqs
114 	SpinLocker locker(entry->irqs_lock);
115 	irq_assignment* irq
116 		= (irq_assignment*)list_get_first_item(&entry->irqs);
117 	while (irq != NULL) {
118 		locker.Unlock();
119 
120 		assign_io_interrupt_to_cpu(irq->irq, -1);
121 
122 		locker.Lock();
123 		irq = (irq_assignment*)list_get_first_item(&entry->irqs);
124 	}
125 	locker.Unlock();
126 }
127 
128 
129 void
130 CPUEntry::PushFront(ThreadData* thread, int32 priority)
131 {
132 	SCHEDULER_ENTER_FUNCTION();
133 	fRunQueue.PushFront(thread, priority);
134 }
135 
136 
137 void
138 CPUEntry::PushBack(ThreadData* thread, int32 priority)
139 {
140 	SCHEDULER_ENTER_FUNCTION();
141 	fRunQueue.PushBack(thread, priority);
142 }
143 
144 
145 void
146 CPUEntry::Remove(ThreadData* thread)
147 {
148 	SCHEDULER_ENTER_FUNCTION();
149 	ASSERT(thread->IsEnqueued());
150 	thread->SetDequeued();
151 	fRunQueue.Remove(thread);
152 }
153 
154 
155 inline ThreadData*
156 CoreEntry::PeekThread() const
157 {
158 	SCHEDULER_ENTER_FUNCTION();
159 	return fRunQueue.PeekMaximum();
160 }
161 
162 
163 inline ThreadData*
164 CPUEntry::PeekThread() const
165 {
166 	SCHEDULER_ENTER_FUNCTION();
167 	return fRunQueue.PeekMaximum();
168 }
169 
170 
171 ThreadData*
172 CPUEntry::PeekIdleThread() const
173 {
174 	SCHEDULER_ENTER_FUNCTION();
175 	return fRunQueue.GetHead(B_IDLE_PRIORITY);
176 }
177 
178 
179 void
180 CPUEntry::UpdatePriority(int32 priority)
181 {
182 	SCHEDULER_ENTER_FUNCTION();
183 
184 	ASSERT(!gCPU[fCPUNumber].disabled);
185 
186 	int32 oldPriority = CPUPriorityHeap::GetKey(this);
187 	if (oldPriority == priority)
188 		return;
189 	fCore->CPUHeap()->ModifyKey(this, priority);
190 
191 	if (oldPriority == B_IDLE_PRIORITY)
192 		fCore->CPUWakesUp(this);
193 	else if (priority == B_IDLE_PRIORITY)
194 		fCore->CPUGoesIdle(this);
195 }
196 
197 
198 void
199 CPUEntry::ComputeLoad()
200 {
201 	SCHEDULER_ENTER_FUNCTION();
202 
203 	ASSERT(gTrackCPULoad);
204 	ASSERT(!gCPU[fCPUNumber].disabled);
205 	ASSERT(fCPUNumber == smp_get_current_cpu());
206 
207 	int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
208 			system_time());
209 	if (oldLoad < 0)
210 		return;
211 
212 	if (fLoad > kVeryHighLoad)
213 		gCurrentMode->rebalance_irqs(false);
214 }
215 
216 
217 ThreadData*
218 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
219 {
220 	SCHEDULER_ENTER_FUNCTION();
221 
222 	int32 oldPriority = -1;
223 	if (oldThread != NULL)
224 		oldPriority = oldThread->GetEffectivePriority();
225 
226 	CPURunQueueLocker cpuLocker(this);
227 
228 	ThreadData* pinnedThread = fRunQueue.PeekMaximum();
229 	int32 pinnedPriority = -1;
230 	if (pinnedThread != NULL)
231 		pinnedPriority = pinnedThread->GetEffectivePriority();
232 
233 	CoreRunQueueLocker coreLocker(fCore);
234 
235 	ThreadData* sharedThread = fCore->PeekThread();
236 	ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != NULL);
237 
238 	int32 sharedPriority = -1;
239 	if (sharedThread != NULL)
240 		sharedPriority = sharedThread->GetEffectivePriority();
241 
242 	int32 rest = std::max(pinnedPriority, sharedPriority);
243 	if (oldPriority > rest || (!putAtBack && oldPriority == rest))
244 		return oldThread;
245 
246 	if (sharedPriority > pinnedPriority) {
247 		fCore->Remove(sharedThread);
248 		return sharedThread;
249 	}
250 
251 	coreLocker.Unlock();
252 
253 	Remove(pinnedThread);
254 	return pinnedThread;
255 }
256 
257 
258 void
259 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
260 {
261 	SCHEDULER_ENTER_FUNCTION();
262 
263 	cpu_ent* cpuEntry = &gCPU[fCPUNumber];
264 
265 	Thread* oldThread = oldThreadData->GetThread();
266 	if (!thread_is_idle_thread(oldThread)) {
267 		bigtime_t active
268 			= (oldThread->kernel_time - cpuEntry->last_kernel_time)
269 				+ (oldThread->user_time - cpuEntry->last_user_time);
270 
271 		WriteSequentialLocker locker(cpuEntry->active_time_lock);
272 		cpuEntry->active_time += active;
273 		locker.Unlock();
274 
275 		fMeasureActiveTime += active;
276 		fCore->IncreaseActiveTime(active);
277 
278 		oldThreadData->UpdateActivity(active);
279 	}
280 
281 	if (gTrackCPULoad) {
282 		if (!cpuEntry->disabled)
283 			ComputeLoad();
284 		_RequestPerformanceLevel(nextThreadData);
285 	}
286 
287 	Thread* nextThread = nextThreadData->GetThread();
288 	if (!thread_is_idle_thread(nextThread)) {
289 		cpuEntry->last_kernel_time = nextThread->kernel_time;
290 		cpuEntry->last_user_time = nextThread->user_time;
291 
292 		nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
293 	}
294 }
295 
296 
297 void
298 CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
299 {
300 	cpu_ent* cpu = &gCPU[ID()];
301 
302 	if (!wasPreempted || fUpdateLoadEvent)
303 		cancel_timer(&cpu->quantum_timer);
304 	fUpdateLoadEvent = false;
305 
306 	if (!thread->IsIdle()) {
307 		bigtime_t quantum = thread->GetQuantumLeft();
308 		add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
309 			B_ONE_SHOT_RELATIVE_TIMER);
310 	} else if (gTrackCoreLoad) {
311 		add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
312 			kLoadMeasureInterval * 2, B_ONE_SHOT_RELATIVE_TIMER);
313 		fUpdateLoadEvent = true;
314 	}
315 }
316 
317 
318 void
319 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
320 {
321 	SCHEDULER_ENTER_FUNCTION();
322 
323 	if (gCPU[fCPUNumber].disabled) {
324 		decrease_cpu_performance(kCPUPerformanceScaleMax);
325 		return;
326 	}
327 
328 	int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
329 	ASSERT(load >= 0 && load <= kMaxLoad);
330 
331 	if (load < kTargetLoad) {
332 		int32 delta = kTargetLoad - load;
333 
334 		delta *= kTargetLoad;
335 		delta /= kCPUPerformanceScaleMax;
336 
337 		decrease_cpu_performance(delta);
338 	} else {
339 		int32 delta = load - kTargetLoad;
340 		delta *= kMaxLoad - kTargetLoad;
341 		delta /= kCPUPerformanceScaleMax;
342 
343 		increase_cpu_performance(delta);
344 	}
345 }
346 
347 
348 /* static */ int32
349 CPUEntry::_RescheduleEvent(timer* /* unused */)
350 {
351 	get_cpu_struct()->invoke_scheduler = true;
352 	get_cpu_struct()->preempted = true;
353 	return B_HANDLED_INTERRUPT;
354 }
355 
356 
357 /* static */ int32
358 CPUEntry::_UpdateLoadEvent(timer* /* unused */)
359 {
360 	CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
361 	CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
362 	return B_HANDLED_INTERRUPT;
363 }
364 
365 
366 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
367 	:
368 	Heap<CPUEntry, int32>(cpuCount)
369 {
370 }
371 
372 
373 void
374 CPUPriorityHeap::Dump()
375 {
376 	kprintf("cpu priority load\n");
377 	CPUEntry* entry = PeekRoot();
378 	while (entry) {
379 		int32 cpu = entry->ID();
380 		int32 key = GetKey(entry);
381 		kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
382 			entry->GetLoad() / 10);
383 
384 		RemoveRoot();
385 		sDebugCPUHeap.Insert(entry, key);
386 
387 		entry = PeekRoot();
388 	}
389 
390 	entry = sDebugCPUHeap.PeekRoot();
391 	while (entry) {
392 		int32 key = GetKey(entry);
393 		sDebugCPUHeap.RemoveRoot();
394 		Insert(entry, key);
395 		entry = sDebugCPUHeap.PeekRoot();
396 	}
397 }
398 
399 
400 CoreEntry::CoreEntry()
401 	:
402 	fCPUCount(0),
403 	fIdleCPUCount(0),
404 	fThreadCount(0),
405 	fActiveTime(0),
406 	fLoad(0),
407 	fCurrentLoad(0),
408 	fLoadMeasurementEpoch(0),
409 	fHighLoad(false),
410 	fLastLoadUpdate(0)
411 {
412 	B_INITIALIZE_SPINLOCK(&fCPULock);
413 	B_INITIALIZE_SPINLOCK(&fQueueLock);
414 	B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
415 	B_INITIALIZE_RW_SPINLOCK(&fLoadLock);
416 }
417 
418 
419 void
420 CoreEntry::Init(int32 id, PackageEntry* package)
421 {
422 	fCoreID = id;
423 	fPackage = package;
424 }
425 
426 
427 void
428 CoreEntry::PushFront(ThreadData* thread, int32 priority)
429 {
430 	SCHEDULER_ENTER_FUNCTION();
431 
432 	fRunQueue.PushFront(thread, priority);
433 	atomic_add(&fThreadCount, 1);
434 }
435 
436 
437 void
438 CoreEntry::PushBack(ThreadData* thread, int32 priority)
439 {
440 	SCHEDULER_ENTER_FUNCTION();
441 
442 	fRunQueue.PushBack(thread, priority);
443 	atomic_add(&fThreadCount, 1);
444 }
445 
446 
447 void
448 CoreEntry::Remove(ThreadData* thread)
449 {
450 	SCHEDULER_ENTER_FUNCTION();
451 
452 	ASSERT(!thread->IsIdle());
453 
454 	ASSERT(thread->IsEnqueued());
455 	thread->SetDequeued();
456 
457 	fRunQueue.Remove(thread);
458 	atomic_add(&fThreadCount, -1);
459 }
460 
461 
462 void
463 CoreEntry::AddCPU(CPUEntry* cpu)
464 {
465 	ASSERT(fCPUCount >= 0);
466 	ASSERT(fIdleCPUCount >= 0);
467 
468 	fIdleCPUCount++;
469 	if (fCPUCount++ == 0) {
470 		// core has been reenabled
471 		fLoad = 0;
472 		fCurrentLoad = 0;
473 		fHighLoad = false;
474 		gCoreLoadHeap.Insert(this, 0);
475 
476 		fPackage->AddIdleCore(this);
477 	}
478 
479 	fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
480 }
481 
482 
483 void
484 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
485 {
486 	ASSERT(fCPUCount > 0);
487 	ASSERT(fIdleCPUCount > 0);
488 
489 	fIdleCPUCount--;
490 	if (--fCPUCount == 0) {
491 		// unassign threads
492 		thread_map(CoreEntry::_UnassignThread, this);
493 
494 		// core has been disabled
495 		if (fHighLoad) {
496 			gCoreHighLoadHeap.ModifyKey(this, -1);
497 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
498 			gCoreHighLoadHeap.RemoveMinimum();
499 		} else {
500 			gCoreLoadHeap.ModifyKey(this, -1);
501 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
502 			gCoreLoadHeap.RemoveMinimum();
503 		}
504 
505 		fPackage->RemoveIdleCore(this);
506 
507 		// get rid of threads
508 		while (fRunQueue.PeekMaximum() != NULL) {
509 			ThreadData* threadData = fRunQueue.PeekMaximum();
510 
511 			Remove(threadData);
512 
513 			ASSERT(threadData->Core() == NULL);
514 			threadPostProcessing(threadData);
515 		}
516 
517 		fThreadCount = 0;
518 	}
519 
520 	fCPUHeap.ModifyKey(cpu, -1);
521 	ASSERT(fCPUHeap.PeekRoot() == cpu);
522 	fCPUHeap.RemoveRoot();
523 
524 	ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
525 	ASSERT(fLoad >= 0);
526 }
527 
528 
529 void
530 CoreEntry::_UpdateLoad(bool forceUpdate)
531 {
532 	SCHEDULER_ENTER_FUNCTION();
533 
534 	if (fCPUCount <= 0)
535 		return;
536 
537 	bigtime_t now = system_time();
538 	bool intervalEnded = now >= kLoadMeasureInterval + fLastLoadUpdate;
539 	bool intervalSkipped = now >= kLoadMeasureInterval * 2 + fLastLoadUpdate;
540 
541 	if (!intervalEnded && !forceUpdate)
542 		return;
543 
544 	WriteSpinLocker coreLocker(gCoreHeapsLock);
545 
546 	int32 newKey;
547 	if (intervalEnded) {
548 		WriteSpinLocker locker(fLoadLock);
549 
550 		newKey = intervalSkipped ? fCurrentLoad : GetLoad();
551 
552 		ASSERT(fCurrentLoad >= 0);
553 		ASSERT(fLoad >= fCurrentLoad);
554 
555 		fLoad = fCurrentLoad;
556 		fLoadMeasurementEpoch++;
557 		fLastLoadUpdate = now;
558 	} else
559 		newKey = GetLoad();
560 
561 	int32 oldKey = CoreLoadHeap::GetKey(this);
562 
563 	ASSERT(oldKey >= 0);
564 	ASSERT(newKey >= 0);
565 
566 	if (oldKey == newKey)
567 		return;
568 
569 	if (newKey > kHighLoad) {
570 		if (!fHighLoad) {
571 			gCoreLoadHeap.ModifyKey(this, -1);
572 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
573 			gCoreLoadHeap.RemoveMinimum();
574 
575 			gCoreHighLoadHeap.Insert(this, newKey);
576 
577 			fHighLoad = true;
578 		} else
579 			gCoreHighLoadHeap.ModifyKey(this, newKey);
580 	} else if (newKey < kMediumLoad) {
581 		if (fHighLoad) {
582 			gCoreHighLoadHeap.ModifyKey(this, -1);
583 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
584 			gCoreHighLoadHeap.RemoveMinimum();
585 
586 			gCoreLoadHeap.Insert(this, newKey);
587 
588 			fHighLoad = false;
589 		} else
590 			gCoreLoadHeap.ModifyKey(this, newKey);
591 	} else {
592 		if (fHighLoad)
593 			gCoreHighLoadHeap.ModifyKey(this, newKey);
594 		else
595 			gCoreLoadHeap.ModifyKey(this, newKey);
596 	}
597 }
598 
599 
600 /* static */ void
601 CoreEntry::_UnassignThread(Thread* thread, void* data)
602 {
603 	CoreEntry* core = static_cast<CoreEntry*>(data);
604 	ThreadData* threadData = thread->scheduler_data;
605 
606 	if (threadData->Core() == core && thread->pinned_to_cpu == 0)
607 		threadData->UnassignCore();
608 }
609 
610 
611 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
612 	:
613 	MinMaxHeap<CoreEntry, int32>(coreCount)
614 {
615 }
616 
617 
618 void
619 CoreLoadHeap::Dump()
620 {
621 	CoreEntry* entry = PeekMinimum();
622 	while (entry) {
623 		int32 key = GetKey(entry);
624 
625 		DebugDumper::DumpCoreLoadHeapEntry(entry);
626 
627 		RemoveMinimum();
628 		sDebugCoreHeap.Insert(entry, key);
629 
630 		entry = PeekMinimum();
631 	}
632 
633 	entry = sDebugCoreHeap.PeekMinimum();
634 	while (entry) {
635 		int32 key = GetKey(entry);
636 		sDebugCoreHeap.RemoveMinimum();
637 		Insert(entry, key);
638 		entry = sDebugCoreHeap.PeekMinimum();
639 	}
640 }
641 
642 
643 PackageEntry::PackageEntry()
644 	:
645 	fIdleCoreCount(0),
646 	fCoreCount(0)
647 {
648 	B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
649 }
650 
651 
652 void
653 PackageEntry::Init(int32 id)
654 {
655 	fPackageID = id;
656 }
657 
658 
659 void
660 PackageEntry::AddIdleCore(CoreEntry* core)
661 {
662 	fCoreCount++;
663 	fIdleCoreCount++;
664 	fIdleCores.Add(core);
665 
666 	if (fCoreCount == 1)
667 		gIdlePackageList.Add(this);
668 }
669 
670 
671 void
672 PackageEntry::RemoveIdleCore(CoreEntry* core)
673 {
674 	fIdleCores.Remove(core);
675 	fIdleCoreCount--;
676 	fCoreCount--;
677 
678 	if (fCoreCount == 0)
679 		gIdlePackageList.Remove(this);
680 }
681 
682 
683 /* static */ void
684 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
685 {
686 	ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
687 
688 	if (iterator.HasNext()
689 		&& !thread_is_idle_thread(iterator.Next()->GetThread())) {
690 		kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
691 		cpu->fRunQueue.Dump();
692 	}
693 }
694 
695 
696 /* static */ void
697 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
698 {
699 	core->fRunQueue.Dump();
700 }
701 
702 
703 /* static */ void
704 DebugDumper::DumpCoreLoadHeapEntry(CoreEntry* entry)
705 {
706 	CoreThreadsData threadsData;
707 	threadsData.fCore = entry;
708 	threadsData.fLoad = 0;
709 	thread_map(DebugDumper::_AnalyzeCoreThreads, &threadsData);
710 
711 	kprintf("%4" B_PRId32 " %11" B_PRId32 "%% %11" B_PRId32 "%% %11" B_PRId32
712 		"%% %7" B_PRId32 " %5" B_PRIu32 "\n", entry->ID(), entry->fLoad / 10,
713 		entry->fCurrentLoad / 10, threadsData.fLoad, entry->ThreadCount(),
714 		entry->fLoadMeasurementEpoch);
715 }
716 
717 
718 /* static */ void
719 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
720 {
721 	kprintf("%-7" B_PRId32 " ", package->fPackageID);
722 
723 	DoublyLinkedList<CoreEntry>::ReverseIterator iterator
724 		= package->fIdleCores.GetReverseIterator();
725 	if (iterator.HasNext()) {
726 		while (iterator.HasNext()) {
727 			CoreEntry* coreEntry = iterator.Next();
728 			kprintf("%" B_PRId32 "%s", coreEntry->ID(),
729 				iterator.HasNext() ? ", " : "");
730 		}
731 	} else
732 		kprintf("-");
733 	kprintf("\n");
734 }
735 
736 
737 /* static */ void
738 DebugDumper::_AnalyzeCoreThreads(Thread* thread, void* data)
739 {
740 	CoreThreadsData* threadsData = static_cast<CoreThreadsData*>(data);
741 	if (thread->scheduler_data->Core() == threadsData->fCore)
742 		threadsData->fLoad += thread->scheduler_data->GetLoad();
743 }
744 
745 
746 static int
747 dump_run_queue(int /* argc */, char** /* argv */)
748 {
749 	int32 cpuCount = smp_get_num_cpus();
750 	int32 coreCount = gCoreCount;
751 
752 	for (int32 i = 0; i < coreCount; i++) {
753 		kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
754 		DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
755 	}
756 
757 	for (int32 i = 0; i < cpuCount; i++)
758 		DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
759 
760 	return 0;
761 }
762 
763 
764 static int
765 dump_cpu_heap(int /* argc */, char** /* argv */)
766 {
767 	kprintf("core average_load current_load threads_load threads epoch\n");
768 	gCoreLoadHeap.Dump();
769 	kprintf("\n");
770 	gCoreHighLoadHeap.Dump();
771 
772 	for (int32 i = 0; i < gCoreCount; i++) {
773 		if (gCoreEntries[i].CPUCount() < 2)
774 			continue;
775 
776 		kprintf("\nCore %" B_PRId32 " heap:\n", i);
777 		gCoreEntries[i].CPUHeap()->Dump();
778 	}
779 
780 	return 0;
781 }
782 
783 
784 static int
785 dump_idle_cores(int /* argc */, char** /* argv */)
786 {
787 	kprintf("Idle packages:\n");
788 	IdlePackageList::ReverseIterator idleIterator
789 		= gIdlePackageList.GetReverseIterator();
790 
791 	if (idleIterator.HasNext()) {
792 		kprintf("package cores\n");
793 
794 		while (idleIterator.HasNext())
795 			DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
796 	} else
797 		kprintf("No idle packages.\n");
798 
799 	return 0;
800 }
801 
802 
803 void Scheduler::init_debug_commands()
804 {
805 	new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
806 	new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
807 
808 	add_debugger_command_etc("run_queue", &dump_run_queue,
809 		"List threads in run queue", "\nLists threads in run queue", 0);
810 	if (!gSingleCore) {
811 		add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
812 			"List CPUs in CPU priority heap",
813 			"\nList CPUs in CPU priority heap", 0);
814 		add_debugger_command_etc("idle_cores", &dump_idle_cores,
815 			"List idle cores", "\nList idle cores", 0);
816 	}
817 }
818 
819