xref: /haiku/src/system/kernel/scheduler/scheduler.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2013-2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2009, Rene Gollent, rene@gollent.com.
4  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  * Distributed under the terms of the MIT License.
8  *
9  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
10  * Distributed under the terms of the NewOS License.
11  */
12 
13 
14 /*! The thread scheduler */
15 
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <cpu.h>
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <kscheduler.h>
25 #include <listeners.h>
26 #include <load_tracking.h>
27 #include <scheduler_defs.h>
28 #include <smp.h>
29 #include <timer.h>
30 #include <util/Random.h>
31 
32 #include "scheduler_common.h"
33 #include "scheduler_cpu.h"
34 #include "scheduler_locking.h"
35 #include "scheduler_modes.h"
36 #include "scheduler_profiler.h"
37 #include "scheduler_thread.h"
38 #include "scheduler_tracing.h"
39 
40 
41 namespace Scheduler {
42 
43 
44 class ThreadEnqueuer : public ThreadProcessing {
45 public:
46 	void		operator()(ThreadData* thread);
47 };
48 
49 scheduler_mode gCurrentModeID;
50 scheduler_mode_operations* gCurrentMode;
51 
52 bool gSingleCore;
53 bool gTrackCoreLoad;
54 bool gTrackCPULoad;
55 
56 }	// namespace Scheduler
57 
58 using namespace Scheduler;
59 
60 
61 static bool sSchedulerEnabled;
62 
63 SchedulerListenerList gSchedulerListeners;
64 spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER;
65 
66 static scheduler_mode_operations* sSchedulerModes[] = {
67 	&gSchedulerLowLatencyMode,
68 	&gSchedulerPowerSavingMode,
69 };
70 
71 // Since CPU IDs used internally by the kernel bear no relation to the actual
72 // CPU topology the following arrays are used to efficiently get the core
73 // and the package that CPU in question belongs to.
74 static int32* sCPUToCore;
75 static int32* sCPUToPackage;
76 
77 
78 static void enqueue(Thread* thread, bool newOne);
79 
80 
81 void
82 ThreadEnqueuer::operator()(ThreadData* thread)
83 {
84 	enqueue(thread->GetThread(), false);
85 }
86 
87 
88 void
89 scheduler_dump_thread_data(Thread* thread)
90 {
91 	thread->scheduler_data->Dump();
92 }
93 
94 
95 static void
96 enqueue(Thread* thread, bool newOne)
97 {
98 	SCHEDULER_ENTER_FUNCTION();
99 
100 	ThreadData* threadData = thread->scheduler_data;
101 
102 	int32 threadPriority = threadData->GetEffectivePriority();
103 	T(EnqueueThread(thread, threadPriority));
104 
105 	CPUEntry* targetCPU = NULL;
106 	CoreEntry* targetCore = NULL;
107 	if (thread->pinned_to_cpu > 0) {
108 		ASSERT(thread->previous_cpu != NULL);
109 		ASSERT(threadData->Core() != NULL);
110 		targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num];
111 	} else if (gSingleCore) {
112 		targetCore = &gCoreEntries[0];
113 	} else if (threadData->Core() != NULL
114 		&& (!newOne || !threadData->HasCacheExpired())) {
115 		targetCore = threadData->Rebalance();
116 	}
117 
118 	const bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU);
119 
120 	TRACE("enqueueing thread %" B_PRId32 " with priority %" B_PRId32 " on CPU %" B_PRId32 " (core %" B_PRId32 ")\n",
121 		thread->id, threadPriority, targetCPU->ID(), targetCore->ID());
122 
123 	bool wasRunQueueEmpty = false;
124 	threadData->Enqueue(wasRunQueueEmpty);
125 
126 	// notify listeners
127 	NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
128 		thread);
129 
130 	int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU);
131 	if (threadPriority > heapPriority
132 		|| (threadPriority == heapPriority && rescheduleNeeded)
133 		|| wasRunQueueEmpty) {
134 
135 		if (targetCPU->ID() == smp_get_current_cpu()) {
136 			gCPU[targetCPU->ID()].invoke_scheduler = true;
137 		} else {
138 			smp_send_ici(targetCPU->ID(), SMP_MSG_RESCHEDULE, 0, 0, 0,
139 				NULL, SMP_MSG_FLAG_ASYNC);
140 		}
141 	}
142 }
143 
144 
145 /*!	Enqueues the thread into the run queue.
146 	Note: thread lock must be held when entering this function
147 */
148 void
149 scheduler_enqueue_in_run_queue(Thread *thread)
150 {
151 	ASSERT(!are_interrupts_enabled());
152 	SCHEDULER_ENTER_FUNCTION();
153 
154 	SchedulerModeLocker _;
155 
156 	TRACE("enqueueing new thread %" B_PRId32 " with static priority %" B_PRId32 "\n", thread->id,
157 		thread->priority);
158 
159 	ThreadData* threadData = thread->scheduler_data;
160 
161 	if (threadData->ShouldCancelPenalty())
162 		threadData->CancelPenalty();
163 
164 	enqueue(thread, true);
165 }
166 
167 
168 /*!	Sets the priority of a thread.
169 */
170 int32
171 scheduler_set_thread_priority(Thread *thread, int32 priority)
172 {
173 	ASSERT(are_interrupts_enabled());
174 
175 	InterruptsSpinLocker _(thread->scheduler_lock);
176 	SchedulerModeLocker modeLocker;
177 
178 	SCHEDULER_ENTER_FUNCTION();
179 
180 	ThreadData* threadData = thread->scheduler_data;
181 	int32 oldPriority = thread->priority;
182 
183 	TRACE("changing thread %" B_PRId32 " priority to %" B_PRId32 " (old: %" B_PRId32 ", effective: %" B_PRId32 ")\n",
184 		thread->id, priority, oldPriority, threadData->GetEffectivePriority());
185 
186 	thread->priority = priority;
187 	threadData->CancelPenalty();
188 
189 	if (priority == oldPriority)
190 		return oldPriority;
191 
192 	if (thread->state != B_THREAD_READY) {
193 		if (thread->state == B_THREAD_RUNNING) {
194 			ASSERT(threadData->Core() != NULL);
195 
196 			ASSERT(thread->cpu != NULL);
197 			CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num];
198 
199 			CoreCPUHeapLocker _(threadData->Core());
200 			cpu->UpdatePriority(priority);
201 		}
202 
203 		return oldPriority;
204 	}
205 
206 	// The thread is in the run queue. We need to remove it and re-insert it at
207 	// a new position.
208 
209 	T(RemoveThread(thread));
210 
211 	// notify listeners
212 	NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue,
213 		thread);
214 
215 	if (threadData->Dequeue())
216 		enqueue(thread, true);
217 
218 	return oldPriority;
219 }
220 
221 
222 void
223 scheduler_reschedule_ici()
224 {
225 	// This function is called as a result of an incoming ICI.
226 	// Make sure the reschedule() is invoked.
227 	get_cpu_struct()->invoke_scheduler = true;
228 }
229 
230 
231 static inline void
232 stop_cpu_timers(Thread* fromThread, Thread* toThread)
233 {
234 	SpinLocker teamLocker(&fromThread->team->time_lock);
235 	SpinLocker threadLocker(&fromThread->time_lock);
236 
237 	if (fromThread->HasActiveCPUTimeUserTimers()
238 		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
239 		user_timer_stop_cpu_timers(fromThread, toThread);
240 	}
241 }
242 
243 
244 static inline void
245 continue_cpu_timers(Thread* thread, cpu_ent* cpu)
246 {
247 	SpinLocker teamLocker(&thread->team->time_lock);
248 	SpinLocker threadLocker(&thread->time_lock);
249 
250 	if (thread->HasActiveCPUTimeUserTimers()
251 		|| thread->team->HasActiveCPUTimeUserTimers()) {
252 		user_timer_continue_cpu_timers(thread, cpu->previous_thread);
253 	}
254 }
255 
256 
257 static void
258 thread_resumes(Thread* thread)
259 {
260 	cpu_ent* cpu = thread->cpu;
261 
262 	release_spinlock(&cpu->previous_thread->scheduler_lock);
263 
264 	// continue CPU time based user timers
265 	continue_cpu_timers(thread, cpu);
266 
267 	// notify the user debugger code
268 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
269 		user_debug_thread_scheduled(thread);
270 }
271 
272 
273 void
274 scheduler_new_thread_entry(Thread* thread)
275 {
276 	thread_resumes(thread);
277 
278 	SpinLocker locker(thread->time_lock);
279 	thread->last_time = system_time();
280 }
281 
282 
283 /*!	Switches the currently running thread.
284 	This is a service function for scheduler implementations.
285 
286 	\param fromThread The currently running thread.
287 	\param toThread The thread to switch to. Must be different from
288 		\a fromThread.
289 */
290 static inline void
291 switch_thread(Thread* fromThread, Thread* toThread)
292 {
293 	// notify the user debugger code
294 	if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
295 		user_debug_thread_unscheduled(fromThread);
296 
297 	// stop CPU time based user timers
298 	stop_cpu_timers(fromThread, toThread);
299 
300 	// update CPU and Thread structures and perform the context switch
301 	cpu_ent* cpu = fromThread->cpu;
302 	toThread->previous_cpu = toThread->cpu = cpu;
303 	fromThread->cpu = NULL;
304 	cpu->running_thread = toThread;
305 	cpu->previous_thread = fromThread;
306 
307 	arch_thread_set_current_thread(toThread);
308 	arch_thread_context_switch(fromThread, toThread);
309 
310 	// The use of fromThread below looks weird, but is correct. fromThread had
311 	// been unscheduled earlier, but is back now. For a thread scheduled the
312 	// first time the same is done in thread.cpp:common_thread_entry().
313 	thread_resumes(fromThread);
314 }
315 
316 
317 static void
318 reschedule(int32 nextState)
319 {
320 	ASSERT(!are_interrupts_enabled());
321 	SCHEDULER_ENTER_FUNCTION();
322 
323 	int32 thisCPU = smp_get_current_cpu();
324 	gCPU[thisCPU].invoke_scheduler = false;
325 
326 	CPUEntry* cpu = CPUEntry::GetCPU(thisCPU);
327 	CoreEntry* core = CoreEntry::GetCore(thisCPU);
328 
329 	Thread* oldThread = thread_get_current_thread();
330 	ThreadData* oldThreadData = oldThread->scheduler_data;
331 
332 	CPUSet oldThreadMask;
333 	bool useOldThreadMask, fetchedOldThreadMask = false;
334 
335 	oldThreadData->StopCPUTime();
336 
337 	SchedulerModeLocker modeLocker;
338 
339 	TRACE("reschedule(): cpu %" B_PRId32 ", current thread = %" B_PRId32 "\n", thisCPU,
340 		oldThread->id);
341 
342 	oldThread->state = nextState;
343 
344 	// return time spent in interrupts
345 	oldThreadData->SetStolenInterruptTime(gCPU[thisCPU].interrupt_time);
346 
347 	bool enqueueOldThread = false;
348 	bool putOldThreadAtBack = false;
349 	switch (nextState) {
350 		case B_THREAD_RUNNING:
351 		case B_THREAD_READY:
352 			enqueueOldThread = true;
353 
354 			oldThreadMask = oldThreadData->GetCPUMask();
355 			useOldThreadMask = !oldThreadMask.IsEmpty();
356 			fetchedOldThreadMask = true;
357 
358 			if (!oldThreadData->IsIdle() && (!useOldThreadMask || oldThreadMask.GetBit(thisCPU))) {
359 				oldThreadData->Continues();
360 				if (oldThreadData->HasQuantumEnded(oldThread->cpu->preempted,
361 						oldThread->has_yielded)) {
362 					TRACE("enqueueing thread %ld into run queue priority ="
363 						" %ld\n", oldThread->id,
364 						oldThreadData->GetEffectivePriority());
365 					putOldThreadAtBack = true;
366 				} else {
367 					TRACE("putting thread %ld back in run queue priority ="
368 						" %ld\n", oldThread->id,
369 						oldThreadData->GetEffectivePriority());
370 					putOldThreadAtBack = false;
371 				}
372 			}
373 
374 			break;
375 		case THREAD_STATE_FREE_ON_RESCHED:
376 			oldThreadData->Dies();
377 			break;
378 		default:
379 			oldThreadData->GoesAway();
380 			TRACE("not enqueueing thread %ld into run queue next_state = %ld\n",
381 				oldThread->id, nextState);
382 			break;
383 	}
384 
385 	oldThread->has_yielded = false;
386 
387 	// select thread with the biggest priority and enqueue back the old thread
388 	ThreadData* nextThreadData;
389 	if (gCPU[thisCPU].disabled) {
390 		if (!oldThreadData->IsIdle()) {
391 			if (oldThread->pinned_to_cpu == 0) {
392 				putOldThreadAtBack = true;
393 				oldThreadData->UnassignCore(true);
394 			} else {
395 				putOldThreadAtBack = false;
396 			}
397 
398 			CPURunQueueLocker cpuLocker(cpu);
399 			nextThreadData = cpu->PeekIdleThread();
400 			cpu->Remove(nextThreadData);
401 		} else
402 			nextThreadData = oldThreadData;
403 	} else {
404 		if (!fetchedOldThreadMask) {
405 			oldThreadMask = oldThreadData->GetCPUMask();
406 			useOldThreadMask = !oldThreadMask.IsEmpty();
407 			fetchedOldThreadMask = true;
408 		}
409 		bool oldThreadShouldMigrate = useOldThreadMask && !oldThreadMask.GetBit(thisCPU);
410 		if (oldThreadShouldMigrate)
411 			enqueueOldThread = false;
412 
413 		nextThreadData
414 			= cpu->ChooseNextThread(enqueueOldThread ? oldThreadData : NULL,
415 				putOldThreadAtBack);
416 
417 		if (oldThreadShouldMigrate) {
418 			enqueue(oldThread, true);
419 			// replace with the idle thread, if no other thread could be found
420 			if (oldThreadData == nextThreadData)
421 				nextThreadData = cpu->PeekIdleThread();
422 		}
423 
424 		// update CPU heap
425 		CoreCPUHeapLocker cpuLocker(core);
426 		cpu->UpdatePriority(nextThreadData->GetEffectivePriority());
427 	}
428 
429 	Thread* nextThread = nextThreadData->GetThread();
430 	ASSERT(!gCPU[thisCPU].disabled || nextThreadData->IsIdle());
431 
432 	if (nextThread != oldThread) {
433 		if (enqueueOldThread) {
434 			if (putOldThreadAtBack)
435 				enqueue(oldThread, false);
436 			else
437 				oldThreadData->PutBack();
438 		}
439 
440 		acquire_spinlock(&nextThread->scheduler_lock);
441 	}
442 
443 	TRACE("reschedule(): cpu %" B_PRId32 ", next thread = %" B_PRId32 "\n", thisCPU,
444 		nextThread->id);
445 
446 	T(ScheduleThread(nextThread, oldThread));
447 
448 	// notify listeners
449 	NotifySchedulerListeners(&SchedulerListener::ThreadScheduled,
450 		oldThread, nextThread);
451 
452 	ASSERT(nextThreadData->Core() == core);
453 	nextThread->state = B_THREAD_RUNNING;
454 	nextThreadData->StartCPUTime();
455 
456 	// track CPU activity
457 	cpu->TrackActivity(oldThreadData, nextThreadData);
458 
459 	if (nextThread != oldThread || oldThread->cpu->preempted) {
460 		cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted);
461 
462 		oldThread->cpu->preempted = false;
463 		if (!nextThreadData->IsIdle())
464 			nextThreadData->Continues();
465 		else
466 			gCurrentMode->rebalance_irqs(true);
467 		nextThreadData->StartQuantum();
468 
469 		modeLocker.Unlock();
470 
471 		SCHEDULER_EXIT_FUNCTION();
472 
473 		if (nextThread != oldThread)
474 			switch_thread(oldThread, nextThread);
475 	}
476 }
477 
478 
479 /*!	Runs the scheduler.
480 	Note: expects thread spinlock to be held
481 */
482 void
483 scheduler_reschedule(int32 nextState)
484 {
485 	ASSERT(!are_interrupts_enabled());
486 	SCHEDULER_ENTER_FUNCTION();
487 
488 	if (!sSchedulerEnabled) {
489 		Thread* thread = thread_get_current_thread();
490 		if (thread != NULL && nextState != B_THREAD_READY)
491 			panic("scheduler_reschedule_no_op() called in non-ready thread");
492 		return;
493 	}
494 
495 	reschedule(nextState);
496 }
497 
498 
499 status_t
500 scheduler_on_thread_create(Thread* thread, bool idleThread)
501 {
502 	thread->scheduler_data = new(std::nothrow) ThreadData(thread);
503 	if (thread->scheduler_data == NULL)
504 		return B_NO_MEMORY;
505 	return B_OK;
506 }
507 
508 
509 void
510 scheduler_on_thread_init(Thread* thread)
511 {
512 	ASSERT(thread->scheduler_data != NULL);
513 
514 	if (thread_is_idle_thread(thread)) {
515 		static int32 sIdleThreadsID;
516 		int32 cpuID = atomic_add(&sIdleThreadsID, 1);
517 
518 		thread->previous_cpu = &gCPU[cpuID];
519 		thread->pinned_to_cpu = 1;
520 
521 		thread->scheduler_data->Init(CoreEntry::GetCore(cpuID));
522 	} else
523 		thread->scheduler_data->Init();
524 }
525 
526 
527 void
528 scheduler_on_thread_destroy(Thread* thread)
529 {
530 	delete thread->scheduler_data;
531 }
532 
533 
534 /*!	This starts the scheduler. Must be run in the context of the initial idle
535 	thread. Interrupts must be disabled and will be disabled when returning.
536 */
537 void
538 scheduler_start()
539 {
540 	InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
541 	SCHEDULER_ENTER_FUNCTION();
542 
543 	reschedule(B_THREAD_READY);
544 }
545 
546 
547 status_t
548 scheduler_set_operation_mode(scheduler_mode mode)
549 {
550 	if (mode != SCHEDULER_MODE_LOW_LATENCY
551 		&& mode != SCHEDULER_MODE_POWER_SAVING) {
552 		return B_BAD_VALUE;
553 	}
554 
555 	dprintf("scheduler: switching to %s mode\n", sSchedulerModes[mode]->name);
556 
557 	InterruptsBigSchedulerLocker _;
558 
559 	gCurrentModeID = mode;
560 	gCurrentMode = sSchedulerModes[mode];
561 	gCurrentMode->switch_to_mode();
562 
563 	ThreadData::ComputeQuantumLengths();
564 
565 	return B_OK;
566 }
567 
568 
569 void
570 scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
571 {
572 #if KDEBUG
573 	if (are_interrupts_enabled())
574 		panic("scheduler_set_cpu_enabled: called with interrupts enabled");
575 #endif
576 
577 	dprintf("scheduler: %s CPU %" B_PRId32 "\n",
578 		enabled ? "enabling" : "disabling", cpuID);
579 
580 	InterruptsBigSchedulerLocker _;
581 
582 	gCurrentMode->set_cpu_enabled(cpuID, enabled);
583 
584 	CPUEntry* cpu = &gCPUEntries[cpuID];
585 	CoreEntry* core = cpu->Core();
586 
587 	ASSERT(core->CPUCount() >= 0);
588 	if (enabled)
589 		cpu->Start();
590 	else {
591 		cpu->UpdatePriority(B_IDLE_PRIORITY);
592 
593 		ThreadEnqueuer enqueuer;
594 		core->RemoveCPU(cpu, enqueuer);
595 	}
596 
597 	gCPU[cpuID].disabled = !enabled;
598 	if (enabled)
599 		gCPUEnabled.SetBitAtomic(cpuID);
600 	else
601 		gCPUEnabled.ClearBitAtomic(cpuID);
602 
603 	if (!enabled) {
604 		cpu->Stop();
605 
606 		// don't wait until the thread quantum ends
607 		if (smp_get_current_cpu() != cpuID) {
608 			smp_send_ici(cpuID, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
609 				SMP_MSG_FLAG_ASYNC);
610 		}
611 	}
612 }
613 
614 
615 static void
616 traverse_topology_tree(const cpu_topology_node* node, int packageID, int coreID)
617 {
618 	switch (node->level) {
619 		case CPU_TOPOLOGY_SMT:
620 			sCPUToCore[node->id] = coreID;
621 			sCPUToPackage[node->id] = packageID;
622 			return;
623 
624 		case CPU_TOPOLOGY_CORE:
625 			coreID = node->id;
626 			break;
627 
628 		case CPU_TOPOLOGY_PACKAGE:
629 			packageID = node->id;
630 			break;
631 
632 		default:
633 			break;
634 	}
635 
636 	for (int32 i = 0; i < node->children_count; i++)
637 		traverse_topology_tree(node->children[i], packageID, coreID);
638 }
639 
640 
641 static status_t
642 build_topology_mappings(int32& cpuCount, int32& coreCount, int32& packageCount)
643 {
644 	cpuCount = smp_get_num_cpus();
645 
646 	sCPUToCore = new(std::nothrow) int32[cpuCount];
647 	if (sCPUToCore == NULL)
648 		return B_NO_MEMORY;
649 	ArrayDeleter<int32> cpuToCoreDeleter(sCPUToCore);
650 
651 	sCPUToPackage = new(std::nothrow) int32[cpuCount];
652 	if (sCPUToPackage == NULL)
653 		return B_NO_MEMORY;
654 	ArrayDeleter<int32> cpuToPackageDeleter(sCPUToPackage);
655 
656 	coreCount = 0;
657 	for (int32 i = 0; i < cpuCount; i++) {
658 		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0)
659 			coreCount++;
660 	}
661 
662 	packageCount = 0;
663 	for (int32 i = 0; i < cpuCount; i++) {
664 		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0
665 			&& gCPU[i].topology_id[CPU_TOPOLOGY_CORE] == 0) {
666 			packageCount++;
667 		}
668 	}
669 
670 	const cpu_topology_node* root = get_cpu_topology();
671 	traverse_topology_tree(root, 0, 0);
672 
673 	cpuToCoreDeleter.Detach();
674 	cpuToPackageDeleter.Detach();
675 	return B_OK;
676 }
677 
678 
679 static status_t
680 init()
681 {
682 	// create logical processor to core and package mappings
683 	int32 cpuCount, coreCount, packageCount;
684 	status_t result = build_topology_mappings(cpuCount, coreCount,
685 		packageCount);
686 	if (result != B_OK)
687 		return result;
688 
689 	// disable parts of the scheduler logic that are not needed
690 	gSingleCore = coreCount == 1;
691 	scheduler_update_policy();
692 
693 	gCoreCount = coreCount;
694 	gPackageCount = packageCount;
695 
696 	gCPUEntries = new(std::nothrow) CPUEntry[cpuCount];
697 	if (gCPUEntries == NULL)
698 		return B_NO_MEMORY;
699 	ArrayDeleter<CPUEntry> cpuEntriesDeleter(gCPUEntries);
700 
701 	gCoreEntries = new(std::nothrow) CoreEntry[coreCount];
702 	if (gCoreEntries == NULL)
703 		return B_NO_MEMORY;
704 	ArrayDeleter<CoreEntry> coreEntriesDeleter(gCoreEntries);
705 
706 	gPackageEntries = new(std::nothrow) PackageEntry[packageCount];
707 	if (gPackageEntries == NULL)
708 		return B_NO_MEMORY;
709 	ArrayDeleter<PackageEntry> packageEntriesDeleter(gPackageEntries);
710 
711 	new(&gCoreLoadHeap) CoreLoadHeap(coreCount);
712 	new(&gCoreHighLoadHeap) CoreLoadHeap(coreCount);
713 
714 	new(&gIdlePackageList) IdlePackageList;
715 
716 	for (int32 i = 0; i < cpuCount; i++) {
717 		CoreEntry* core = &gCoreEntries[sCPUToCore[i]];
718 		PackageEntry* package = &gPackageEntries[sCPUToPackage[i]];
719 
720 		package->Init(sCPUToPackage[i]);
721 		core->Init(sCPUToCore[i], package);
722 		gCPUEntries[i].Init(i, core);
723 
724 		core->AddCPU(&gCPUEntries[i]);
725 	}
726 
727 	packageEntriesDeleter.Detach();
728 	coreEntriesDeleter.Detach();
729 	cpuEntriesDeleter.Detach();
730 
731 	return B_OK;
732 }
733 
734 
735 void
736 scheduler_init()
737 {
738 	int32 cpuCount = smp_get_num_cpus();
739 	dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32
740 		" cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "",
741 		gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : "");
742 
743 #ifdef SCHEDULER_PROFILING
744 	Profiling::Profiler::Initialize();
745 #endif
746 
747 	status_t result = init();
748 	if (result != B_OK)
749 		panic("scheduler_init: failed to initialize scheduler\n");
750 
751 	scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY);
752 
753 	init_debug_commands();
754 
755 #if SCHEDULER_TRACING
756 	add_debugger_command_etc("scheduler", &cmd_scheduler,
757 		"Analyze scheduler tracing information",
758 		"<thread>\n"
759 		"Analyzes scheduler tracing information for a given thread.\n"
760 		"  <thread>  - ID of the thread.\n", 0);
761 #endif
762 }
763 
764 
765 void
766 scheduler_enable_scheduling()
767 {
768 	sSchedulerEnabled = true;
769 }
770 
771 
772 void
773 scheduler_update_policy()
774 {
775 	gTrackCPULoad = increase_cpu_performance(0) == B_OK;
776 	gTrackCoreLoad = !gSingleCore || gTrackCPULoad;
777 	dprintf("scheduler switches: single core: %s, cpu load tracking: %s,"
778 		" core load tracking: %s\n", gSingleCore ? "true" : "false",
779 		gTrackCPULoad ? "true" : "false",
780 		gTrackCoreLoad ? "true" : "false");
781 }
782 
783 
784 // #pragma mark - SchedulerListener
785 
786 
787 SchedulerListener::~SchedulerListener()
788 {
789 }
790 
791 
792 // #pragma mark - kernel private
793 
794 
795 /*!	Add the given scheduler listener. Thread lock must be held.
796 */
797 void
798 scheduler_add_listener(struct SchedulerListener* listener)
799 {
800 	InterruptsSpinLocker _(gSchedulerListenersLock);
801 	gSchedulerListeners.Add(listener);
802 }
803 
804 
805 /*!	Remove the given scheduler listener. Thread lock must be held.
806 */
807 void
808 scheduler_remove_listener(struct SchedulerListener* listener)
809 {
810 	InterruptsSpinLocker _(gSchedulerListenersLock);
811 	gSchedulerListeners.Remove(listener);
812 }
813 
814 
815 // #pragma mark - Syscalls
816 
817 
818 bigtime_t
819 _user_estimate_max_scheduling_latency(thread_id id)
820 {
821 	syscall_64_bit_return_value();
822 
823 	// get the thread
824 	Thread* thread;
825 	if (id < 0) {
826 		thread = thread_get_current_thread();
827 		thread->AcquireReference();
828 	} else {
829 		thread = Thread::Get(id);
830 		if (thread == NULL)
831 			return 0;
832 	}
833 	BReference<Thread> threadReference(thread, true);
834 
835 #ifdef SCHEDULER_PROFILING
836 	InterruptsLocker _;
837 #endif
838 
839 	ThreadData* threadData = thread->scheduler_data;
840 	CoreEntry* core = threadData->Core();
841 	if (core == NULL)
842 		core = &gCoreEntries[get_random<int32>() % gCoreCount];
843 
844 	int32 threadCount = core->ThreadCount();
845 	if (core->CPUCount() > 0)
846 		threadCount /= core->CPUCount();
847 
848 	if (threadData->GetEffectivePriority() > 0) {
849 		threadCount -= threadCount * THREAD_MAX_SET_PRIORITY
850 				/ threadData->GetEffectivePriority();
851 	}
852 
853 	return std::min(std::max(threadCount * gCurrentMode->base_quantum,
854 			gCurrentMode->minimal_quantum),
855 		gCurrentMode->maximum_latency);
856 }
857 
858 
859 status_t
860 _user_set_scheduler_mode(int32 mode)
861 {
862 	scheduler_mode schedulerMode = static_cast<scheduler_mode>(mode);
863 	status_t error = scheduler_set_operation_mode(schedulerMode);
864 	if (error == B_OK)
865 		cpu_set_scheduler_mode(schedulerMode);
866 	return error;
867 }
868 
869 
870 int32
871 _user_get_scheduler_mode()
872 {
873 	return gCurrentModeID;
874 }
875 
876