xref: /haiku/src/system/kernel/scheduler/scheduler_cpu.cpp (revision 3b07762c548ec4016dea480d1061577cd15ec614)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include "scheduler_cpu.h"
8 
9 #include <util/AutoLock.h>
10 
11 #include <algorithm>
12 
13 #include "scheduler_thread.h"
14 
15 
16 namespace Scheduler {
17 
18 
19 CPUEntry* gCPUEntries;
20 
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
26 
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
31 
32 
33 }	// namespace Scheduler
34 
35 using namespace Scheduler;
36 
37 
38 class Scheduler::DebugDumper {
39 public:
40 	static	void		DumpCPURunQueue(CPUEntry* cpu);
41 	static	void		DumpCoreRunQueue(CoreEntry* core);
42 	static	void		DumpIdleCoresInPackage(PackageEntry* package);
43 };
44 
45 
46 static CPUPriorityHeap sDebugCPUHeap;
47 static CoreLoadHeap sDebugCoreHeap;
48 
49 
50 void
51 ThreadRunQueue::Dump() const
52 {
53 	ThreadRunQueue::ConstIterator iterator = GetConstIterator();
54 	if (!iterator.HasNext())
55 		kprintf("Run queue is empty.\n");
56 	else {
57 		kprintf("thread      id      priority penalty  name\n");
58 		while (iterator.HasNext()) {
59 			ThreadData* threadData = iterator.Next();
60 			Thread* thread = threadData->GetThread();
61 
62 			kprintf("%p  %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
63 				thread, thread->id, thread->priority,
64 				thread->priority - threadData->GetEffectivePriority(),
65 				thread->name);
66 		}
67 	}
68 }
69 
70 
71 CPUEntry::CPUEntry()
72 	:
73 	fLoad(0),
74 	fMeasureActiveTime(0),
75 	fMeasureTime(0)
76 {
77 	B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
78 	B_INITIALIZE_SPINLOCK(&fQueueLock);
79 }
80 
81 
82 void
83 CPUEntry::Init(int32 id, CoreEntry* core)
84 {
85 	fCPUNumber = id;
86 	fCore = core;
87 }
88 
89 
90 void
91 CPUEntry::Start()
92 {
93 	fLoad = 0;
94 	fCore->AddCPU(this);
95 }
96 
97 
98 void
99 CPUEntry::Stop()
100 {
101 	cpu_ent* entry = &gCPU[fCPUNumber];
102 
103 	// get rid of irqs
104 	SpinLocker locker(entry->irqs_lock);
105 	irq_assignment* irq
106 		= (irq_assignment*)list_get_first_item(&entry->irqs);
107 	while (irq != NULL) {
108 		locker.Unlock();
109 
110 		assign_io_interrupt_to_cpu(irq->irq, -1);
111 
112 		locker.Lock();
113 		irq = (irq_assignment*)list_get_first_item(&entry->irqs);
114 	}
115 	locker.Unlock();
116 }
117 
118 
119 void
120 CPUEntry::PushFront(ThreadData* thread, int32 priority)
121 {
122 	SCHEDULER_ENTER_FUNCTION();
123 	fRunQueue.PushFront(thread, priority);
124 }
125 
126 
127 void
128 CPUEntry::PushBack(ThreadData* thread, int32 priority)
129 {
130 	SCHEDULER_ENTER_FUNCTION();
131 	fRunQueue.PushBack(thread, priority);
132 }
133 
134 
135 void
136 CPUEntry::Remove(ThreadData* thread)
137 {
138 	SCHEDULER_ENTER_FUNCTION();
139 	ASSERT(thread->IsEnqueued());
140 	thread->SetDequeued();
141 	fRunQueue.Remove(thread);
142 }
143 
144 
145 inline ThreadData*
146 CoreEntry::PeekThread() const
147 {
148 	SCHEDULER_ENTER_FUNCTION();
149 	return fRunQueue.PeekMaximum();
150 }
151 
152 
153 inline ThreadData*
154 CPUEntry::PeekThread() const
155 {
156 	SCHEDULER_ENTER_FUNCTION();
157 	return fRunQueue.PeekMaximum();
158 }
159 
160 
161 ThreadData*
162 CPUEntry::PeekIdleThread() const
163 {
164 	SCHEDULER_ENTER_FUNCTION();
165 	return fRunQueue.GetHead(B_IDLE_PRIORITY);
166 }
167 
168 
169 void
170 CPUEntry::UpdatePriority(int32 priority)
171 {
172 	SCHEDULER_ENTER_FUNCTION();
173 
174 	ASSERT(!gCPU[fCPUNumber].disabled);
175 
176 	int32 oldPriority = CPUPriorityHeap::GetKey(this);
177 	if (oldPriority == priority)
178 		return;
179 	fCore->CPUHeap()->ModifyKey(this, priority);
180 
181 	if (oldPriority == B_IDLE_PRIORITY)
182 		fCore->CPUWakesUp(this);
183 	else if (priority == B_IDLE_PRIORITY)
184 		fCore->CPUGoesIdle(this);
185 }
186 
187 
188 void
189 CPUEntry::ComputeLoad()
190 {
191 	SCHEDULER_ENTER_FUNCTION();
192 
193 	ASSERT(gTrackCPULoad);
194 	ASSERT(!gCPU[fCPUNumber].disabled);
195 	ASSERT(fCPUNumber == smp_get_current_cpu());
196 
197 	int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
198 			system_time());
199 	if (oldLoad < 0)
200 		return;
201 
202 	if (fLoad > kVeryHighLoad)
203 		gCurrentMode->rebalance_irqs(false);
204 }
205 
206 
207 ThreadData*
208 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
209 {
210 	SCHEDULER_ENTER_FUNCTION();
211 
212 	int32 oldPriority = -1;
213 	if (oldThread != NULL)
214 		oldPriority = oldThread->GetEffectivePriority();
215 
216 	CPURunQueueLocker cpuLocker(this);
217 
218 	ThreadData* pinnedThread = fRunQueue.PeekMaximum();
219 	int32 pinnedPriority = -1;
220 	if (pinnedThread != NULL)
221 		pinnedPriority = pinnedThread->GetEffectivePriority();
222 
223 	CoreRunQueueLocker coreLocker(fCore);
224 
225 	ThreadData* sharedThread = fCore->PeekThread();
226 	ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != NULL);
227 
228 	int32 sharedPriority = -1;
229 	if (sharedThread != NULL)
230 		sharedPriority = sharedThread->GetEffectivePriority();
231 
232 	int32 rest = std::max(pinnedPriority, sharedPriority);
233 	if (oldPriority > rest || (!putAtBack && oldPriority == rest))
234 		return oldThread;
235 
236 	if (sharedPriority > pinnedPriority) {
237 		fCore->Remove(sharedThread);
238 		return sharedThread;
239 	}
240 
241 	coreLocker.Unlock();
242 
243 	Remove(pinnedThread);
244 	return pinnedThread;
245 }
246 
247 
248 void
249 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
250 {
251 	SCHEDULER_ENTER_FUNCTION();
252 
253 	cpu_ent* cpuEntry = &gCPU[fCPUNumber];
254 
255 	Thread* oldThread = oldThreadData->GetThread();
256 	if (!thread_is_idle_thread(oldThread)) {
257 		bigtime_t active
258 			= (oldThread->kernel_time - cpuEntry->last_kernel_time)
259 				+ (oldThread->user_time - cpuEntry->last_user_time);
260 
261 		WriteSequentialLocker locker(cpuEntry->active_time_lock);
262 		cpuEntry->active_time += active;
263 		locker.Unlock();
264 
265 		fMeasureActiveTime += active;
266 		fCore->IncreaseActiveTime(active);
267 
268 		oldThreadData->UpdateActivity(active);
269 	}
270 
271 	if (gTrackCPULoad) {
272 		if (!cpuEntry->disabled)
273 			ComputeLoad();
274 		_RequestPerformanceLevel(nextThreadData);
275 	}
276 
277 	Thread* nextThread = nextThreadData->GetThread();
278 	if (!thread_is_idle_thread(nextThread)) {
279 		cpuEntry->last_kernel_time = nextThread->kernel_time;
280 		cpuEntry->last_user_time = nextThread->user_time;
281 
282 		nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
283 	}
284 }
285 
286 
287 void
288 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
289 {
290 	SCHEDULER_ENTER_FUNCTION();
291 
292 	if (gCPU[fCPUNumber].disabled) {
293 		decrease_cpu_performance(kCPUPerformanceScaleMax);
294 		return;
295 	}
296 
297 	int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
298 	ASSERT(load >= 0 && load <= kMaxLoad);
299 
300 	if (load < kTargetLoad) {
301 		int32 delta = kTargetLoad - load;
302 
303 		delta *= kTargetLoad;
304 		delta /= kCPUPerformanceScaleMax;
305 
306 		decrease_cpu_performance(delta);
307 	} else {
308 		int32 delta = load - kTargetLoad;
309 		delta *= kMaxLoad - kTargetLoad;
310 		delta /= kCPUPerformanceScaleMax;
311 
312 		increase_cpu_performance(delta);
313 	}
314 }
315 
316 
317 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
318 	:
319 	Heap<CPUEntry, int32>(cpuCount)
320 {
321 }
322 
323 
324 void
325 CPUPriorityHeap::Dump()
326 {
327 	kprintf("cpu priority load\n");
328 	CPUEntry* entry = PeekRoot();
329 	while (entry) {
330 		int32 cpu = entry->ID();
331 		int32 key = GetKey(entry);
332 		kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
333 			entry->GetLoad() / 10);
334 
335 		RemoveRoot();
336 		sDebugCPUHeap.Insert(entry, key);
337 
338 		entry = PeekRoot();
339 	}
340 
341 	entry = sDebugCPUHeap.PeekRoot();
342 	while (entry) {
343 		int32 key = GetKey(entry);
344 		sDebugCPUHeap.RemoveRoot();
345 		Insert(entry, key);
346 		entry = sDebugCPUHeap.PeekRoot();
347 	}
348 }
349 
350 
351 CoreEntry::CoreEntry()
352 	:
353 	fCPUCount(0),
354 	fIdleCPUCount(0),
355 	fStarvationCounter(0),
356 	fStarvationCounterIdle(0),
357 	fThreadCount(0),
358 	fActiveTime(0),
359 	fLoad(0),
360 	fHighLoad(false),
361 	fLastLoadUpdate(0)
362 {
363 	B_INITIALIZE_SPINLOCK(&fCPULock);
364 	B_INITIALIZE_SPINLOCK(&fQueueLock);
365 	B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
366 }
367 
368 
369 void
370 CoreEntry::Init(int32 id, PackageEntry* package)
371 {
372 	fCoreID = id;
373 	fPackage = package;
374 }
375 
376 
377 void
378 CoreEntry::PushFront(ThreadData* thread, int32 priority)
379 {
380 	SCHEDULER_ENTER_FUNCTION();
381 
382 	fRunQueue.PushFront(thread, priority);
383 	atomic_add(&fThreadCount, 1);
384 }
385 
386 
387 void
388 CoreEntry::PushBack(ThreadData* thread, int32 priority)
389 {
390 	SCHEDULER_ENTER_FUNCTION();
391 
392 	fRunQueue.PushBack(thread, priority);
393 	atomic_add(&fThreadCount, 1);
394 }
395 
396 
397 void
398 CoreEntry::Remove(ThreadData* thread)
399 {
400 	SCHEDULER_ENTER_FUNCTION();
401 
402 	ASSERT(thread->IsEnqueued());
403 	thread->SetDequeued();
404 
405 	ASSERT(!thread_is_idle_thread(thread->GetThread()));
406 	if (thread->GetEffectivePriority() == B_LOWEST_ACTIVE_PRIORITY
407 		|| thread->IsCPUBound()) {
408 		atomic_add(&fStarvationCounter, 1);
409 	}
410 
411 	fRunQueue.Remove(thread);
412 	atomic_add(&fThreadCount, -1);
413 }
414 
415 
416 void
417 CoreEntry::UpdateLoad(int32 delta)
418 {
419 	SCHEDULER_ENTER_FUNCTION();
420 
421 	ASSERT(gTrackCoreLoad);
422 
423 	if (fCPUCount <= 0)
424 		return;
425 
426 	atomic_add(&fLoad, delta);
427 
428 	bigtime_t now = system_time();
429 	if (now < kLoadMeasureInterval + fLastLoadUpdate)
430 		return;
431 	if (!try_acquire_write_spinlock(&gCoreHeapsLock))
432 		return;
433 	WriteSpinLocker coreLocker(gCoreHeapsLock, true);
434 
435 	fLastLoadUpdate = now;
436 
437 	int32 newKey = GetLoad();
438 	int32 oldKey = CoreLoadHeap::GetKey(this);
439 
440 	ASSERT(oldKey >= 0);
441 	ASSERT(newKey >= 0);
442 
443 	if (oldKey == newKey)
444 		return;
445 
446 	if (newKey > kHighLoad) {
447 		if (!fHighLoad) {
448 			gCoreLoadHeap.ModifyKey(this, -1);
449 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
450 			gCoreLoadHeap.RemoveMinimum();
451 
452 			gCoreHighLoadHeap.Insert(this, newKey);
453 
454 			fHighLoad = true;
455 		} else
456 			gCoreHighLoadHeap.ModifyKey(this, newKey);
457 	} else if (newKey < kMediumLoad) {
458 		if (fHighLoad) {
459 			gCoreHighLoadHeap.ModifyKey(this, -1);
460 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
461 			gCoreHighLoadHeap.RemoveMinimum();
462 
463 			gCoreLoadHeap.Insert(this, newKey);
464 
465 			fHighLoad = false;
466 		} else
467 			gCoreLoadHeap.ModifyKey(this, newKey);
468 	} else {
469 		if (fHighLoad)
470 			gCoreHighLoadHeap.ModifyKey(this, newKey);
471 		else
472 			gCoreLoadHeap.ModifyKey(this, newKey);
473 	}
474 }
475 
476 
477 void
478 CoreEntry::AddCPU(CPUEntry* cpu)
479 {
480 	ASSERT(fCPUCount >= 0);
481 	ASSERT(fIdleCPUCount >= 0);
482 
483 	fIdleCPUCount++;
484 	if (fCPUCount++ == 0) {
485 		// core has been reenabled
486 		fLoad = 0;
487 		fHighLoad = false;
488 		gCoreLoadHeap.Insert(this, 0);
489 
490 		fPackage->AddIdleCore(this);
491 	}
492 
493 	fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
494 }
495 
496 
497 void
498 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
499 {
500 	ASSERT(fCPUCount > 0);
501 	ASSERT(fIdleCPUCount > 0);
502 
503 	fIdleCPUCount--;
504 	if (--fCPUCount == 0) {
505 		// unassign threads
506 		thread_map(CoreEntry::_UnassignThread, this);
507 
508 		// core has been disabled
509 		if (fHighLoad) {
510 			gCoreHighLoadHeap.ModifyKey(this, -1);
511 			ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
512 			gCoreHighLoadHeap.RemoveMinimum();
513 		} else {
514 			gCoreLoadHeap.ModifyKey(this, -1);
515 			ASSERT(gCoreLoadHeap.PeekMinimum() == this);
516 			gCoreLoadHeap.RemoveMinimum();
517 		}
518 
519 		fPackage->RemoveIdleCore(this);
520 
521 		// get rid of threads
522 		while (fRunQueue.PeekMaximum() != NULL) {
523 			ThreadData* threadData = fRunQueue.PeekMaximum();
524 
525 			Remove(threadData);
526 
527 			ASSERT(threadData->Core() == NULL);
528 			threadPostProcessing(threadData);
529 		}
530 
531 		fThreadCount = 0;
532 	}
533 
534 	fCPUHeap.ModifyKey(cpu, -1);
535 	ASSERT(fCPUHeap.PeekRoot() == cpu);
536 	fCPUHeap.RemoveRoot();
537 
538 	ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
539 	ASSERT(fLoad >= 0);
540 }
541 
542 
543 /* static */ void
544 CoreEntry::_UnassignThread(Thread* thread, void* data)
545 {
546 	CoreEntry* core = static_cast<CoreEntry*>(data);
547 	ThreadData* threadData = thread->scheduler_data;
548 
549 	if (threadData->Core() == core && thread->pinned_to_cpu == 0)
550 		threadData->UnassignCore();
551 }
552 
553 
554 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
555 	:
556 	MinMaxHeap<CoreEntry, int32>(coreCount)
557 {
558 }
559 
560 
561 void
562 CoreLoadHeap::Dump()
563 {
564 	CoreEntry* entry = PeekMinimum();
565 	while (entry) {
566 		int32 key = GetKey(entry);
567 
568 		kprintf("%4" B_PRId32 " %3" B_PRId32 "%% %7" B_PRId32 "\n", entry->ID(),
569 			entry->GetLoad() / 10, entry->ThreadCount());
570 
571 		RemoveMinimum();
572 		sDebugCoreHeap.Insert(entry, key);
573 
574 		entry = PeekMinimum();
575 	}
576 
577 	entry = sDebugCoreHeap.PeekMinimum();
578 	while (entry) {
579 		int32 key = GetKey(entry);
580 		sDebugCoreHeap.RemoveMinimum();
581 		Insert(entry, key);
582 		entry = sDebugCoreHeap.PeekMinimum();
583 	}
584 }
585 
586 
587 PackageEntry::PackageEntry()
588 	:
589 	fIdleCoreCount(0),
590 	fCoreCount(0)
591 {
592 	B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
593 }
594 
595 
596 void
597 PackageEntry::Init(int32 id)
598 {
599 	fPackageID = id;
600 }
601 
602 
603 void
604 PackageEntry::AddIdleCore(CoreEntry* core)
605 {
606 	fCoreCount++;
607 	fIdleCoreCount++;
608 	fIdleCores.Add(core);
609 
610 	if (fCoreCount == 1)
611 		gIdlePackageList.Add(this);
612 }
613 
614 
615 void
616 PackageEntry::RemoveIdleCore(CoreEntry* core)
617 {
618 	fIdleCores.Remove(core);
619 	fIdleCoreCount--;
620 	fCoreCount--;
621 
622 	if (fCoreCount == 0)
623 		gIdlePackageList.Remove(this);
624 }
625 
626 
627 /* static */ void
628 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
629 {
630 	ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
631 
632 	if (iterator.HasNext()
633 		&& !thread_is_idle_thread(iterator.Next()->GetThread())) {
634 		kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
635 		cpu->fRunQueue.Dump();
636 	}
637 }
638 
639 
640 /* static */ void
641 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
642 {
643 	core->fRunQueue.Dump();
644 }
645 
646 
647 /* static */ void
648 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
649 {
650 	kprintf("%-7" B_PRId32 " ", package->fPackageID);
651 
652 	DoublyLinkedList<CoreEntry>::ReverseIterator iterator
653 		= package->fIdleCores.GetReverseIterator();
654 	if (iterator.HasNext()) {
655 		while (iterator.HasNext()) {
656 			CoreEntry* coreEntry = iterator.Next();
657 			kprintf("%" B_PRId32 "%s", coreEntry->ID(),
658 				iterator.HasNext() ? ", " : "");
659 		}
660 	} else
661 		kprintf("-");
662 	kprintf("\n");
663 }
664 
665 
666 static int
667 dump_run_queue(int /* argc */, char** /* argv */)
668 {
669 	int32 cpuCount = smp_get_num_cpus();
670 	int32 coreCount = gCoreCount;
671 
672 	for (int32 i = 0; i < coreCount; i++) {
673 		kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
674 		DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
675 	}
676 
677 	for (int32 i = 0; i < cpuCount; i++)
678 		DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
679 
680 	return 0;
681 }
682 
683 
684 static int
685 dump_cpu_heap(int /* argc */, char** /* argv */)
686 {
687 	kprintf("core load threads\n");
688 	gCoreLoadHeap.Dump();
689 	kprintf("\n");
690 	gCoreHighLoadHeap.Dump();
691 
692 	for (int32 i = 0; i < gCoreCount; i++) {
693 		if (gCoreEntries[i].CPUCount() < 2)
694 			continue;
695 
696 		kprintf("\nCore %" B_PRId32 " heap:\n", i);
697 		gCoreEntries[i].CPUHeap()->Dump();
698 	}
699 
700 	return 0;
701 }
702 
703 
704 static int
705 dump_idle_cores(int /* argc */, char** /* argv */)
706 {
707 	kprintf("Idle packages:\n");
708 	IdlePackageList::ReverseIterator idleIterator
709 		= gIdlePackageList.GetReverseIterator();
710 
711 	if (idleIterator.HasNext()) {
712 		kprintf("package cores\n");
713 
714 		while (idleIterator.HasNext())
715 			DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
716 	} else
717 		kprintf("No idle packages.\n");
718 
719 	return 0;
720 }
721 
722 
723 void Scheduler::init_debug_commands()
724 {
725 	new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
726 	new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
727 
728 	add_debugger_command_etc("run_queue", &dump_run_queue,
729 		"List threads in run queue", "\nLists threads in run queue", 0);
730 	if (!gSingleCore) {
731 		add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
732 			"List CPUs in CPU priority heap",
733 			"\nList CPUs in CPU priority heap", 0);
734 		add_debugger_command_etc("idle_cores", &dump_idle_cores,
735 			"List idle cores", "\nList idle cores", 0);
736 	}
737 }
738 
739