xref: /haiku/src/system/kernel/scheduler/low_latency.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/AutoLock.h>
8 
9 #include "scheduler_common.h"
10 #include "scheduler_cpu.h"
11 #include "scheduler_modes.h"
12 #include "scheduler_profiler.h"
13 #include "scheduler_thread.h"
14 
15 
16 using namespace Scheduler;
17 
18 
19 const bigtime_t kCacheExpire = 100000;
20 
21 
22 static void
23 switch_to_mode()
24 {
25 }
26 
27 
28 static void
29 set_cpu_enabled(int32 /* cpu */, bool /* enabled */)
30 {
31 }
32 
33 
34 static bool
35 has_cache_expired(const ThreadData* threadData)
36 {
37 	SCHEDULER_ENTER_FUNCTION();
38 	if (threadData->WentSleepActive() == 0)
39 		return false;
40 	CoreEntry* core = threadData->Core();
41 	bigtime_t activeTime = core->GetActiveTime();
42 	return activeTime - threadData->WentSleepActive() > kCacheExpire;
43 }
44 
45 
46 static CoreEntry*
47 choose_core(const ThreadData* threadData)
48 {
49 	SCHEDULER_ENTER_FUNCTION();
50 
51 	// wake new package
52 	PackageEntry* package = gIdlePackageList.Last();
53 	if (package == NULL) {
54 		// wake new core
55 		package = PackageEntry::GetMostIdlePackage();
56 	}
57 
58 	int32 index = 0;
59 	CPUSet mask = threadData->GetCPUMask();
60 	const bool useMask = !mask.IsEmpty();
61 
62 	CoreEntry* core = NULL;
63 	if (package != NULL) {
64 		do {
65 			core = package->GetIdleCore(index++);
66 		} while (useMask && core != NULL && !core->CPUMask().Matches(mask));
67 	}
68 	if (core == NULL) {
69 		ReadSpinLocker coreLocker(gCoreHeapsLock);
70 		index = 0;
71 		// no idle cores, use least occupied core
72 		do {
73 			core = gCoreLoadHeap.PeekMinimum(index++);
74 		} while (useMask && core != NULL && !core->CPUMask().Matches(mask));
75 		if (core == NULL) {
76 			index = 0;
77 			do {
78 				core = gCoreHighLoadHeap.PeekMinimum(index++);
79 			} while (useMask && core != NULL && !core->CPUMask().Matches(mask));
80 		}
81 	}
82 
83 	ASSERT(core != NULL);
84 	return core;
85 }
86 
87 
88 static CoreEntry*
89 rebalance(const ThreadData* threadData)
90 {
91 	SCHEDULER_ENTER_FUNCTION();
92 
93 	CoreEntry* core = threadData->Core();
94 	ASSERT(core != NULL);
95 
96 	// Get the least loaded core.
97 	ReadSpinLocker coreLocker(gCoreHeapsLock);
98 	CPUSet mask = threadData->GetCPUMask();
99 	const bool useMask = !mask.IsEmpty();
100 
101 	int32 index = 0;
102 	CoreEntry* other;
103 	do {
104 		other = gCoreLoadHeap.PeekMinimum(index++);
105 		if (other != NULL && (useMask && other->CPUMask().IsEmpty()))
106 			panic("other->CPUMask().IsEmpty()\n");
107 	} while (useMask && other != NULL && !other->CPUMask().Matches(mask));
108 
109 	if (other == NULL) {
110 		index = 0;
111 		do {
112 			other = gCoreHighLoadHeap.PeekMinimum(index++);
113 		} while (useMask && other != NULL && !other->CPUMask().Matches(mask));
114 	}
115 	coreLocker.Unlock();
116 	ASSERT(other != NULL);
117 
118 	// Check if the least loaded core is significantly less loaded than
119 	// the current one.
120 	int32 coreLoad = core->GetLoad();
121 	int32 otherLoad = other->GetLoad();
122 	if (other == core || otherLoad + kLoadDifference >= coreLoad)
123 		return core;
124 
125 	// Check whether migrating the current thread would result in both core
126 	// loads become closer to the average.
127 	int32 difference = coreLoad - otherLoad - kLoadDifference;
128 	ASSERT(difference > 0);
129 
130 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
131 	return difference >= threadLoad ? other : core;
132 }
133 
134 
135 static void
136 rebalance_irqs(bool idle)
137 {
138 	SCHEDULER_ENTER_FUNCTION();
139 
140 	if (idle)
141 		return;
142 
143 	cpu_ent* cpu = get_cpu_struct();
144 	SpinLocker locker(cpu->irqs_lock);
145 
146 	irq_assignment* chosen = NULL;
147 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
148 
149 	int32 totalLoad = 0;
150 	while (irq != NULL) {
151 		if (chosen == NULL || chosen->load < irq->load)
152 			chosen = irq;
153 		totalLoad += irq->load;
154 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
155 	}
156 
157 	locker.Unlock();
158 
159 	if (chosen == NULL || totalLoad < kLowLoad)
160 		return;
161 
162 	ReadSpinLocker coreLocker(gCoreHeapsLock);
163 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
164 	if (other == NULL)
165 		other = gCoreHighLoadHeap.PeekMinimum();
166 	coreLocker.Unlock();
167 
168 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
169 
170 	ASSERT(other != NULL);
171 
172 	CoreEntry* core = CoreEntry::GetCore(cpu->cpu_num);
173 	if (other == core)
174 		return;
175 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
176 		return;
177 
178 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
179 }
180 
181 
182 scheduler_mode_operations gSchedulerLowLatencyMode = {
183 	"low latency",
184 
185 	1000,
186 	100,
187 	{ 2, 5 },
188 
189 	5000,
190 
191 	switch_to_mode,
192 	set_cpu_enabled,
193 	has_cache_expired,
194 	choose_core,
195 	rebalance,
196 	rebalance_irqs,
197 };
198 
199