xref: /haiku/src/system/kernel/scheduler/scheduler.cpp (revision 4c8e85b316c35a9161f5a1c50ad70bc91c83a76f)
1 /*
2  * Copyright 2013-2014, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2009, Rene Gollent, rene@gollent.com.
4  * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
5  * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
6  * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7  * Distributed under the terms of the MIT License.
8  *
9  * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
10  * Distributed under the terms of the NewOS License.
11  */
12 
13 
14 /*! The thread scheduler */
15 
16 
17 #include <OS.h>
18 
19 #include <AutoDeleter.h>
20 #include <cpu.h>
21 #include <debug.h>
22 #include <int.h>
23 #include <kernel.h>
24 #include <kscheduler.h>
25 #include <listeners.h>
26 #include <load_tracking.h>
27 #include <scheduler_defs.h>
28 #include <smp.h>
29 #include <timer.h>
30 #include <util/Random.h>
31 
32 #include "scheduler_common.h"
33 #include "scheduler_cpu.h"
34 #include "scheduler_locking.h"
35 #include "scheduler_modes.h"
36 #include "scheduler_profiler.h"
37 #include "scheduler_thread.h"
38 #include "scheduler_tracing.h"
39 
40 
41 namespace Scheduler {
42 
43 
44 class ThreadEnqueuer : public ThreadProcessing {
45 public:
46 	void		operator()(ThreadData* thread);
47 };
48 
49 scheduler_mode gCurrentModeID;
50 scheduler_mode_operations* gCurrentMode;
51 
52 bool gSingleCore;
53 bool gTrackCoreLoad;
54 bool gTrackCPULoad;
55 
56 }	// namespace Scheduler
57 
58 using namespace Scheduler;
59 
60 
61 static bool sSchedulerEnabled;
62 
63 SchedulerListenerList gSchedulerListeners;
64 spinlock gSchedulerListenersLock = B_SPINLOCK_INITIALIZER;
65 
66 static scheduler_mode_operations* sSchedulerModes[] = {
67 	&gSchedulerLowLatencyMode,
68 	&gSchedulerPowerSavingMode,
69 };
70 
71 // Since CPU IDs used internally by the kernel bear no relation to the actual
72 // CPU topology the following arrays are used to efficiently get the core
73 // and the package that CPU in question belongs to.
74 static int32* sCPUToCore;
75 static int32* sCPUToPackage;
76 
77 
78 static void enqueue(Thread* thread, bool newOne);
79 
80 
81 void
82 ThreadEnqueuer::operator()(ThreadData* thread)
83 {
84 	enqueue(thread->GetThread(), false);
85 }
86 
87 
88 void
89 scheduler_dump_thread_data(Thread* thread)
90 {
91 	thread->scheduler_data->Dump();
92 }
93 
94 
95 static void
96 enqueue(Thread* thread, bool newOne)
97 {
98 	SCHEDULER_ENTER_FUNCTION();
99 
100 	ThreadData* threadData = thread->scheduler_data;
101 
102 	int32 threadPriority = threadData->GetEffectivePriority();
103 	T(EnqueueThread(thread, threadPriority));
104 
105 	CPUEntry* targetCPU = NULL;
106 	CoreEntry* targetCore = NULL;
107 	if (thread->pinned_to_cpu > 0) {
108 		ASSERT(thread->previous_cpu != NULL);
109 		ASSERT(threadData->Core() != NULL);
110 		targetCPU = &gCPUEntries[thread->previous_cpu->cpu_num];
111 	} else if (gSingleCore)
112 		targetCore = &gCoreEntries[0];
113 	else if (threadData->Core() != NULL
114 		&& (!newOne || !threadData->HasCacheExpired())) {
115 		targetCore = threadData->Rebalance();
116 	}
117 
118 	bool rescheduleNeeded = threadData->ChooseCoreAndCPU(targetCore, targetCPU);
119 
120 	TRACE("enqueueing thread %ld with priority %ld on CPU %ld (core %ld)\n",
121 		thread->id, threadPriority, targetCPU->ID(), targetCore->ID());
122 
123 	threadData->Enqueue();
124 
125 	// notify listeners
126 	NotifySchedulerListeners(&SchedulerListener::ThreadEnqueuedInRunQueue,
127 		thread);
128 
129 	int32 heapPriority = CPUPriorityHeap::GetKey(targetCPU);
130 	if (threadPriority > heapPriority
131 		|| (threadPriority == heapPriority && rescheduleNeeded)) {
132 
133 		if (targetCPU->ID() == smp_get_current_cpu())
134 			gCPU[targetCPU->ID()].invoke_scheduler = true;
135 		else {
136 			smp_send_ici(targetCPU->ID(), SMP_MSG_RESCHEDULE, 0, 0, 0,
137 				NULL, SMP_MSG_FLAG_ASYNC);
138 		}
139 	}
140 }
141 
142 
143 /*!	Enqueues the thread into the run queue.
144 	Note: thread lock must be held when entering this function
145 */
146 void
147 scheduler_enqueue_in_run_queue(Thread *thread)
148 {
149 	ASSERT(!are_interrupts_enabled());
150 	SCHEDULER_ENTER_FUNCTION();
151 
152 	SchedulerModeLocker _;
153 
154 	TRACE("enqueueing new thread %ld with static priority %ld\n", thread->id,
155 		thread->priority);
156 
157 	ThreadData* threadData = thread->scheduler_data;
158 
159 	if (threadData->ShouldCancelPenalty())
160 		threadData->CancelPenalty();
161 
162 	enqueue(thread, true);
163 }
164 
165 
166 /*!	Sets the priority of a thread.
167 */
168 int32
169 scheduler_set_thread_priority(Thread *thread, int32 priority)
170 {
171 	ASSERT(are_interrupts_enabled());
172 
173 	InterruptsSpinLocker _(thread->scheduler_lock);
174 	SchedulerModeLocker modeLocker;
175 
176 	SCHEDULER_ENTER_FUNCTION();
177 
178 	ThreadData* threadData = thread->scheduler_data;
179 	int32 oldPriority = thread->priority;
180 
181 	TRACE("changing thread %ld priority to %ld (old: %ld, effective: %ld)\n",
182 		thread->id, priority, oldPriority, threadData->GetEffectivePriority());
183 
184 	thread->priority = priority;
185 	threadData->CancelPenalty();
186 
187 	if (priority == oldPriority)
188 		return oldPriority;
189 
190 	if (thread->state != B_THREAD_READY) {
191 		if (thread->state == B_THREAD_RUNNING) {
192 			ASSERT(threadData->Core() != NULL);
193 
194 			ASSERT(thread->cpu != NULL);
195 			CPUEntry* cpu = &gCPUEntries[thread->cpu->cpu_num];
196 
197 			CoreCPUHeapLocker _(threadData->Core());
198 			cpu->UpdatePriority(priority);
199 		}
200 
201 		return oldPriority;
202 	}
203 
204 	// The thread is in the run queue. We need to remove it and re-insert it at
205 	// a new position.
206 
207 	T(RemoveThread(thread));
208 
209 	// notify listeners
210 	NotifySchedulerListeners(&SchedulerListener::ThreadRemovedFromRunQueue,
211 		thread);
212 
213 	if (threadData->Dequeue())
214 		enqueue(thread, true);
215 
216 	return oldPriority;
217 }
218 
219 
220 void
221 scheduler_reschedule_ici()
222 {
223 	// This function is called as a result of an incoming ICI.
224 	// Make sure the reschedule() is invoked.
225 	get_cpu_struct()->invoke_scheduler = true;
226 }
227 
228 
229 static inline void
230 stop_cpu_timers(Thread* fromThread, Thread* toThread)
231 {
232 	SpinLocker teamLocker(&fromThread->team->time_lock);
233 	SpinLocker threadLocker(&fromThread->time_lock);
234 
235 	if (fromThread->HasActiveCPUTimeUserTimers()
236 		|| fromThread->team->HasActiveCPUTimeUserTimers()) {
237 		user_timer_stop_cpu_timers(fromThread, toThread);
238 	}
239 }
240 
241 
242 static inline void
243 continue_cpu_timers(Thread* thread, cpu_ent* cpu)
244 {
245 	SpinLocker teamLocker(&thread->team->time_lock);
246 	SpinLocker threadLocker(&thread->time_lock);
247 
248 	if (thread->HasActiveCPUTimeUserTimers()
249 		|| thread->team->HasActiveCPUTimeUserTimers()) {
250 		user_timer_continue_cpu_timers(thread, cpu->previous_thread);
251 	}
252 }
253 
254 
255 static void
256 thread_resumes(Thread* thread)
257 {
258 	cpu_ent* cpu = thread->cpu;
259 
260 	release_spinlock(&cpu->previous_thread->scheduler_lock);
261 
262 	// continue CPU time based user timers
263 	continue_cpu_timers(thread, cpu);
264 
265 	// notify the user debugger code
266 	if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
267 		user_debug_thread_scheduled(thread);
268 }
269 
270 
271 void
272 scheduler_new_thread_entry(Thread* thread)
273 {
274 	thread_resumes(thread);
275 
276 	SpinLocker locker(thread->time_lock);
277 	thread->last_time = system_time();
278 }
279 
280 
281 /*!	Switches the currently running thread.
282 	This is a service function for scheduler implementations.
283 
284 	\param fromThread The currently running thread.
285 	\param toThread The thread to switch to. Must be different from
286 		\a fromThread.
287 */
288 static inline void
289 switch_thread(Thread* fromThread, Thread* toThread)
290 {
291 	// notify the user debugger code
292 	if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
293 		user_debug_thread_unscheduled(fromThread);
294 
295 	// stop CPU time based user timers
296 	stop_cpu_timers(fromThread, toThread);
297 
298 	// update CPU and Thread structures and perform the context switch
299 	cpu_ent* cpu = fromThread->cpu;
300 	toThread->previous_cpu = toThread->cpu = cpu;
301 	fromThread->cpu = NULL;
302 	cpu->running_thread = toThread;
303 	cpu->previous_thread = fromThread;
304 
305 	arch_thread_set_current_thread(toThread);
306 	arch_thread_context_switch(fromThread, toThread);
307 
308 	// The use of fromThread below looks weird, but is correct. fromThread had
309 	// been unscheduled earlier, but is back now. For a thread scheduled the
310 	// first time the same is done in thread.cpp:common_thread_entry().
311 	thread_resumes(fromThread);
312 }
313 
314 
315 static void
316 reschedule(int32 nextState)
317 {
318 	ASSERT(!are_interrupts_enabled());
319 	SCHEDULER_ENTER_FUNCTION();
320 
321 	int32 thisCPU = smp_get_current_cpu();
322 
323 	CPUEntry* cpu = CPUEntry::GetCPU(thisCPU);
324 	CoreEntry* core = CoreEntry::GetCore(thisCPU);
325 
326 	Thread* oldThread = thread_get_current_thread();
327 	ThreadData* oldThreadData = oldThread->scheduler_data;
328 
329 	oldThreadData->StopCPUTime();
330 
331 	SchedulerModeLocker modeLocker;
332 
333 	TRACE("reschedule(): cpu %ld, current thread = %ld\n", thisCPU,
334 		oldThread->id);
335 
336 	oldThread->state = nextState;
337 
338 	// return time spent in interrupts
339 	oldThreadData->SetStolenInterruptTime(gCPU[thisCPU].interrupt_time);
340 
341 	bool enqueueOldThread = false;
342 	bool putOldThreadAtBack = false;
343 	switch (nextState) {
344 		case B_THREAD_RUNNING:
345 		case B_THREAD_READY:
346 			enqueueOldThread = true;
347 
348 			if (!oldThreadData->IsIdle()) {
349 				oldThreadData->Continues();
350 				if (oldThreadData->HasQuantumEnded(oldThread->cpu->preempted,
351 						oldThread->has_yielded)) {
352 					TRACE("enqueueing thread %ld into run queue priority ="
353 						" %ld\n", oldThread->id,
354 						oldThreadData->GetEffectivePriority());
355 					putOldThreadAtBack = true;
356 				} else {
357 					TRACE("putting thread %ld back in run queue priority ="
358 						" %ld\n", oldThread->id,
359 						oldThreadData->GetEffectivePriority());
360 					putOldThreadAtBack = false;
361 				}
362 			}
363 
364 			break;
365 		case THREAD_STATE_FREE_ON_RESCHED:
366 			oldThreadData->Dies();
367 			break;
368 		default:
369 			oldThreadData->GoesAway();
370 			TRACE("not enqueueing thread %ld into run queue next_state = %ld\n",
371 				oldThread->id, nextState);
372 			break;
373 	}
374 
375 	oldThread->has_yielded = false;
376 
377 	// select thread with the biggest priority and enqueue back the old thread
378 	ThreadData* nextThreadData;
379 	if (gCPU[thisCPU].disabled) {
380 		if (!oldThreadData->IsIdle()) {
381 			putOldThreadAtBack = oldThread->pinned_to_cpu == 0;
382 			oldThreadData->UnassignCore(true);
383 
384 			CPURunQueueLocker cpuLocker(cpu);
385 			nextThreadData = cpu->PeekIdleThread();
386 			cpu->Remove(nextThreadData);
387 		} else
388 			nextThreadData = oldThreadData;
389 	} else {
390 		nextThreadData
391 			= cpu->ChooseNextThread(enqueueOldThread ? oldThreadData : NULL,
392 				putOldThreadAtBack);
393 
394 		// update CPU heap
395 		CoreCPUHeapLocker cpuLocker(core);
396 		cpu->UpdatePriority(nextThreadData->GetEffectivePriority());
397 	}
398 
399 	Thread* nextThread = nextThreadData->GetThread();
400 	ASSERT(!gCPU[thisCPU].disabled || nextThreadData->IsIdle());
401 
402 	if (nextThread != oldThread) {
403 		if (enqueueOldThread) {
404 			if (putOldThreadAtBack)
405 				enqueue(oldThread, false);
406 			else
407 				oldThreadData->PutBack();
408 		}
409 
410 		acquire_spinlock(&nextThread->scheduler_lock);
411 	}
412 
413 	TRACE("reschedule(): cpu %ld, next thread = %ld\n", thisCPU,
414 		nextThread->id);
415 
416 	T(ScheduleThread(nextThread, oldThread));
417 
418 	// notify listeners
419 	NotifySchedulerListeners(&SchedulerListener::ThreadScheduled,
420 		oldThread, nextThread);
421 
422 	ASSERT(nextThreadData->Core() == core);
423 	nextThread->state = B_THREAD_RUNNING;
424 	nextThreadData->StartCPUTime();
425 
426 	// track CPU activity
427 	cpu->TrackActivity(oldThreadData, nextThreadData);
428 
429 	if (nextThread != oldThread || oldThread->cpu->preempted) {
430 		cpu->StartQuantumTimer(nextThreadData, oldThread->cpu->preempted);
431 
432 		oldThread->cpu->preempted = false;
433 		if (!nextThreadData->IsIdle())
434 			nextThreadData->Continues();
435 		else
436 			gCurrentMode->rebalance_irqs(true);
437 		nextThreadData->StartQuantum();
438 
439 		modeLocker.Unlock();
440 
441 		SCHEDULER_EXIT_FUNCTION();
442 
443 		if (nextThread != oldThread)
444 			switch_thread(oldThread, nextThread);
445 	}
446 }
447 
448 
449 /*!	Runs the scheduler.
450 	Note: expects thread spinlock to be held
451 */
452 void
453 scheduler_reschedule(int32 nextState)
454 {
455 	ASSERT(!are_interrupts_enabled());
456 	SCHEDULER_ENTER_FUNCTION();
457 
458 	if (!sSchedulerEnabled) {
459 		Thread* thread = thread_get_current_thread();
460 		if (thread != NULL && nextState != B_THREAD_READY)
461 			panic("scheduler_reschedule_no_op() called in non-ready thread");
462 		return;
463 	}
464 
465 	reschedule(nextState);
466 }
467 
468 
469 status_t
470 scheduler_on_thread_create(Thread* thread, bool idleThread)
471 {
472 	thread->scheduler_data = new(std::nothrow) ThreadData(thread);
473 	if (thread->scheduler_data == NULL)
474 		return B_NO_MEMORY;
475 	return B_OK;
476 }
477 
478 
479 void
480 scheduler_on_thread_init(Thread* thread)
481 {
482 	ASSERT(thread->scheduler_data != NULL);
483 
484 	if (thread_is_idle_thread(thread)) {
485 		static int32 sIdleThreadsID;
486 		int32 cpuID = atomic_add(&sIdleThreadsID, 1);
487 
488 		thread->previous_cpu = &gCPU[cpuID];
489 		thread->pinned_to_cpu = 1;
490 
491 		thread->scheduler_data->Init(CoreEntry::GetCore(cpuID));
492 	} else
493 		thread->scheduler_data->Init();
494 }
495 
496 
497 void
498 scheduler_on_thread_destroy(Thread* thread)
499 {
500 	delete thread->scheduler_data;
501 }
502 
503 
504 /*!	This starts the scheduler. Must be run in the context of the initial idle
505 	thread. Interrupts must be disabled and will be disabled when returning.
506 */
507 void
508 scheduler_start()
509 {
510 	InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
511 	SCHEDULER_ENTER_FUNCTION();
512 
513 	reschedule(B_THREAD_READY);
514 }
515 
516 
517 status_t
518 scheduler_set_operation_mode(scheduler_mode mode)
519 {
520 	if (mode != SCHEDULER_MODE_LOW_LATENCY
521 		&& mode != SCHEDULER_MODE_POWER_SAVING) {
522 		return B_BAD_VALUE;
523 	}
524 
525 	dprintf("scheduler: switching to %s mode\n", sSchedulerModes[mode]->name);
526 
527 	InterruptsBigSchedulerLocker _;
528 
529 	gCurrentModeID = mode;
530 	gCurrentMode = sSchedulerModes[mode];
531 	gCurrentMode->switch_to_mode();
532 
533 	ThreadData::ComputeQuantumLengths();
534 
535 	return B_OK;
536 }
537 
538 
539 void
540 scheduler_set_cpu_enabled(int32 cpuID, bool enabled)
541 {
542 #if KDEBUG
543 	if (are_interrupts_enabled())
544 		panic("scheduler_set_cpu_enabled: called with interrupts enabled");
545 #endif
546 
547 	dprintf("scheduler: %s CPU %" B_PRId32 "\n",
548 		enabled ? "enabling" : "disabling", cpuID);
549 
550 	InterruptsBigSchedulerLocker _;
551 
552 	gCurrentMode->set_cpu_enabled(cpuID, enabled);
553 
554 	CPUEntry* cpu = &gCPUEntries[cpuID];
555 	CoreEntry* core = cpu->Core();
556 
557 	ASSERT(core->CPUCount() >= 0);
558 	if (enabled)
559 		cpu->Start();
560 	else {
561 		cpu->UpdatePriority(B_IDLE_PRIORITY);
562 
563 		ThreadEnqueuer enqueuer;
564 		core->RemoveCPU(cpu, enqueuer);
565 	}
566 
567 	gCPU[cpuID].disabled = !enabled;
568 
569 	if (!enabled) {
570 		cpu->Stop();
571 
572 		// don't wait until the thread quantum ends
573 		if (smp_get_current_cpu() != cpuID) {
574 			smp_send_ici(cpuID, SMP_MSG_RESCHEDULE, 0, 0, 0, NULL,
575 				SMP_MSG_FLAG_ASYNC);
576 		}
577 	}
578 }
579 
580 
581 static void
582 traverse_topology_tree(const cpu_topology_node* node, int packageID, int coreID)
583 {
584 	switch (node->level) {
585 		case CPU_TOPOLOGY_SMT:
586 			sCPUToCore[node->id] = coreID;
587 			sCPUToPackage[node->id] = packageID;
588 			return;
589 
590 		case CPU_TOPOLOGY_CORE:
591 			coreID = node->id;
592 			break;
593 
594 		case CPU_TOPOLOGY_PACKAGE:
595 			packageID = node->id;
596 			break;
597 
598 		default:
599 			break;
600 	}
601 
602 	for (int32 i = 0; i < node->children_count; i++)
603 		traverse_topology_tree(node->children[i], packageID, coreID);
604 }
605 
606 
607 static status_t
608 build_topology_mappings(int32& cpuCount, int32& coreCount, int32& packageCount)
609 {
610 	cpuCount = smp_get_num_cpus();
611 
612 	sCPUToCore = new(std::nothrow) int32[cpuCount];
613 	if (sCPUToCore == NULL)
614 		return B_NO_MEMORY;
615 	ArrayDeleter<int32> cpuToCoreDeleter(sCPUToCore);
616 
617 	sCPUToPackage = new(std::nothrow) int32[cpuCount];
618 	if (sCPUToPackage == NULL)
619 		return B_NO_MEMORY;
620 	ArrayDeleter<int32> cpuToPackageDeleter(sCPUToPackage);
621 
622 	coreCount = 0;
623 	for (int32 i = 0; i < cpuCount; i++) {
624 		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0)
625 			coreCount++;
626 	}
627 
628 	packageCount = 0;
629 	for (int32 i = 0; i < cpuCount; i++) {
630 		if (gCPU[i].topology_id[CPU_TOPOLOGY_SMT] == 0
631 			&& gCPU[i].topology_id[CPU_TOPOLOGY_CORE] == 0) {
632 			packageCount++;
633 		}
634 	}
635 
636 	const cpu_topology_node* root = get_cpu_topology();
637 	traverse_topology_tree(root, 0, 0);
638 
639 	cpuToCoreDeleter.Detach();
640 	cpuToPackageDeleter.Detach();
641 	return B_OK;
642 }
643 
644 
645 static status_t
646 init()
647 {
648 	// create logical processor to core and package mappings
649 	int32 cpuCount, coreCount, packageCount;
650 	status_t result = build_topology_mappings(cpuCount, coreCount,
651 		packageCount);
652 	if (result != B_OK)
653 		return result;
654 
655 	// disable parts of the scheduler logic that are not needed
656 	gSingleCore = coreCount == 1;
657 	scheduler_update_policy();
658 
659 	gCoreCount = coreCount;
660 	gPackageCount = packageCount;
661 
662 	gCPUEntries = new(std::nothrow) CPUEntry[cpuCount];
663 	if (gCPUEntries == NULL)
664 		return B_NO_MEMORY;
665 	ArrayDeleter<CPUEntry> cpuEntriesDeleter(gCPUEntries);
666 
667 	gCoreEntries = new(std::nothrow) CoreEntry[coreCount];
668 	if (gCoreEntries == NULL)
669 		return B_NO_MEMORY;
670 	ArrayDeleter<CoreEntry> coreEntriesDeleter(gCoreEntries);
671 
672 	gPackageEntries = new(std::nothrow) PackageEntry[packageCount];
673 	if (gPackageEntries == NULL)
674 		return B_NO_MEMORY;
675 	ArrayDeleter<PackageEntry> packageEntriesDeleter(gPackageEntries);
676 
677 	new(&gCoreLoadHeap) CoreLoadHeap(coreCount);
678 	new(&gCoreHighLoadHeap) CoreLoadHeap(coreCount);
679 
680 	new(&gIdlePackageList) IdlePackageList;
681 
682 	for (int32 i = 0; i < cpuCount; i++) {
683 		CoreEntry* core = &gCoreEntries[sCPUToCore[i]];
684 		PackageEntry* package = &gPackageEntries[sCPUToPackage[i]];
685 
686 		package->Init(sCPUToPackage[i]);
687 		core->Init(sCPUToCore[i], package);
688 		gCPUEntries[i].Init(i, core);
689 
690 		core->AddCPU(&gCPUEntries[i]);
691 	}
692 
693 	packageEntriesDeleter.Detach();
694 	coreEntriesDeleter.Detach();
695 	cpuEntriesDeleter.Detach();
696 
697 	return B_OK;
698 }
699 
700 
701 void
702 scheduler_init()
703 {
704 	int32 cpuCount = smp_get_num_cpus();
705 	dprintf("scheduler_init: found %" B_PRId32 " logical cpu%s and %" B_PRId32
706 		" cache level%s\n", cpuCount, cpuCount != 1 ? "s" : "",
707 		gCPUCacheLevelCount, gCPUCacheLevelCount != 1 ? "s" : "");
708 
709 #ifdef SCHEDULER_PROFILING
710 	Profiling::Profiler::Initialize();
711 #endif
712 
713 	status_t result = init();
714 	if (result != B_OK)
715 		panic("scheduler_init: failed to initialize scheduler\n");
716 
717 	scheduler_set_operation_mode(SCHEDULER_MODE_LOW_LATENCY);
718 
719 	init_debug_commands();
720 
721 #if SCHEDULER_TRACING
722 	add_debugger_command_etc("scheduler", &cmd_scheduler,
723 		"Analyze scheduler tracing information",
724 		"<thread>\n"
725 		"Analyzes scheduler tracing information for a given thread.\n"
726 		"  <thread>  - ID of the thread.\n", 0);
727 #endif
728 }
729 
730 
731 void
732 scheduler_enable_scheduling()
733 {
734 	sSchedulerEnabled = true;
735 }
736 
737 
738 void
739 scheduler_update_policy()
740 {
741 	gTrackCPULoad = increase_cpu_performance(0) == B_OK;
742 	gTrackCoreLoad = !gSingleCore || gTrackCPULoad;
743 	dprintf("scheduler switches: single core: %s, cpu load tracking: %s,"
744 		" core load tracking: %s\n", gSingleCore ? "true" : "false",
745 		gTrackCPULoad ? "true" : "false",
746 		gTrackCoreLoad ? "true" : "false");
747 }
748 
749 
750 // #pragma mark - SchedulerListener
751 
752 
753 SchedulerListener::~SchedulerListener()
754 {
755 }
756 
757 
758 // #pragma mark - kernel private
759 
760 
761 /*!	Add the given scheduler listener. Thread lock must be held.
762 */
763 void
764 scheduler_add_listener(struct SchedulerListener* listener)
765 {
766 	InterruptsSpinLocker _(gSchedulerListenersLock);
767 	gSchedulerListeners.Add(listener);
768 }
769 
770 
771 /*!	Remove the given scheduler listener. Thread lock must be held.
772 */
773 void
774 scheduler_remove_listener(struct SchedulerListener* listener)
775 {
776 	InterruptsSpinLocker _(gSchedulerListenersLock);
777 	gSchedulerListeners.Remove(listener);
778 }
779 
780 
781 // #pragma mark - Syscalls
782 
783 
784 bigtime_t
785 _user_estimate_max_scheduling_latency(thread_id id)
786 {
787 	syscall_64_bit_return_value();
788 
789 	// get the thread
790 	Thread* thread;
791 	if (id < 0) {
792 		thread = thread_get_current_thread();
793 		thread->AcquireReference();
794 	} else {
795 		thread = Thread::Get(id);
796 		if (thread == NULL)
797 			return 0;
798 	}
799 	BReference<Thread> threadReference(thread, true);
800 
801 #ifdef SCHEDULER_PROFILING
802 	InterruptsLocker _;
803 #endif
804 
805 	ThreadData* threadData = thread->scheduler_data;
806 	CoreEntry* core = threadData->Core();
807 	if (core == NULL)
808 		core = &gCoreEntries[get_random<int32>() % gCoreCount];
809 
810 	int32 threadCount = core->ThreadCount();
811 	if (core->CPUCount() > 0)
812 		threadCount /= core->CPUCount();
813 
814 	if (threadData->GetEffectivePriority() > 0) {
815 		threadCount -= threadCount * THREAD_MAX_SET_PRIORITY
816 				/ threadData->GetEffectivePriority();
817 	}
818 
819 	return std::min(std::max(threadCount * gCurrentMode->base_quantum,
820 			gCurrentMode->minimal_quantum),
821 		gCurrentMode->maximum_latency);
822 }
823 
824 
825 status_t
826 _user_set_scheduler_mode(int32 mode)
827 {
828 	scheduler_mode schedulerMode = static_cast<scheduler_mode>(mode);
829 	status_t error = scheduler_set_operation_mode(schedulerMode);
830 	if (error == B_OK)
831 		cpu_set_scheduler_mode(schedulerMode);
832 	return error;
833 }
834 
835 
836 int32
837 _user_get_scheduler_mode()
838 {
839 	return gCurrentModeID;
840 }
841 
842