1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
9
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
15
16
17 using namespace Scheduler;
18
19
20 const bigtime_t kCacheExpire = 100000;
21
22 static CoreEntry* sSmallTaskCore;
23
24
25 static void
switch_to_mode()26 switch_to_mode()
27 {
28 sSmallTaskCore = NULL;
29 }
30
31
32 static void
set_cpu_enabled(int32 cpu,bool enabled)33 set_cpu_enabled(int32 cpu, bool enabled)
34 {
35 if (!enabled)
36 sSmallTaskCore = NULL;
37 }
38
39
40 static bool
has_cache_expired(const ThreadData * threadData)41 has_cache_expired(const ThreadData* threadData)
42 {
43 SCHEDULER_ENTER_FUNCTION();
44 if (threadData->WentSleep() == 0)
45 return false;
46 return system_time() - threadData->WentSleep() > kCacheExpire;
47 }
48
49
50 static CoreEntry*
choose_small_task_core()51 choose_small_task_core()
52 {
53 SCHEDULER_ENTER_FUNCTION();
54
55 ReadSpinLocker coreLocker(gCoreHeapsLock);
56 CoreEntry* core = gCoreLoadHeap.PeekMaximum();
57 if (core == NULL)
58 return sSmallTaskCore;
59
60 CoreEntry* smallTaskCore
61 = atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
62 if (smallTaskCore == NULL)
63 return core;
64 return smallTaskCore;
65 }
66
67
68 static CoreEntry*
choose_idle_core()69 choose_idle_core()
70 {
71 SCHEDULER_ENTER_FUNCTION();
72
73 PackageEntry* package = PackageEntry::GetLeastIdlePackage();
74
75 if (package == NULL)
76 package = gIdlePackageList.Last();
77
78 if (package != NULL)
79 return package->GetIdleCore();
80 return NULL;
81 }
82
83
84 static CoreEntry*
choose_core(const ThreadData * threadData)85 choose_core(const ThreadData* threadData)
86 {
87 SCHEDULER_ENTER_FUNCTION();
88
89 CoreEntry* core = NULL;
90
91 CPUSet mask = threadData->GetCPUMask();
92 const bool useMask = !mask.IsEmpty();
93
94 // try to pack all threads on one core
95 core = choose_small_task_core();
96 if (core != NULL && (useMask && !core->CPUMask().Matches(mask)))
97 core = NULL;
98
99 if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
100 ReadSpinLocker coreLocker(gCoreHeapsLock);
101
102 // run immediately on already woken core
103 int32 index = 0;
104 do {
105 core = gCoreLoadHeap.PeekMinimum(index++);
106 } while (useMask && core != NULL && !core->CPUMask().Matches(mask));
107 if (core == NULL) {
108 coreLocker.Unlock();
109
110 core = choose_idle_core();
111 if (useMask && !core->CPUMask().Matches(mask))
112 core = NULL;
113
114 if (core == NULL) {
115 coreLocker.Lock();
116 index = 0;
117 do {
118 core = gCoreHighLoadHeap.PeekMinimum(index++);
119 } while (useMask && core != NULL && !core->CPUMask().Matches(mask));
120 }
121 }
122 }
123
124 ASSERT(core != NULL);
125 return core;
126 }
127
128
129 static CoreEntry*
rebalance(const ThreadData * threadData)130 rebalance(const ThreadData* threadData)
131 {
132 SCHEDULER_ENTER_FUNCTION();
133
134 ASSERT(!gSingleCore);
135
136 CPUSet mask = threadData->GetCPUMask();
137 const bool useMask = !mask.IsEmpty();
138
139 CoreEntry* core = threadData->Core();
140
141 int32 coreLoad = core->GetLoad();
142 int32 threadLoad = threadData->GetLoad() / core->CPUCount();
143 if (coreLoad > kHighLoad) {
144 if (sSmallTaskCore == core) {
145 sSmallTaskCore = NULL;
146 CoreEntry* smallTaskCore = choose_small_task_core();
147
148 if (threadLoad > coreLoad / 3 || smallTaskCore == NULL
149 || (useMask && !smallTaskCore->CPUMask().Matches(mask))) {
150 return core;
151 }
152 return coreLoad > kVeryHighLoad ? smallTaskCore : core;
153 }
154
155 if (threadLoad >= coreLoad / 2)
156 return core;
157
158 ReadSpinLocker coreLocker(gCoreHeapsLock);
159 CoreEntry* other;
160 int32 index = 0;
161 do {
162 other = gCoreLoadHeap.PeekMaximum(index++);
163 } while (useMask && other != NULL && !other->CPUMask().Matches(mask));
164 if (other == NULL) {
165 index = 0;
166 do {
167 other = gCoreHighLoadHeap.PeekMinimum(index++);
168 } while (useMask && other != NULL && !other->CPUMask().Matches(mask));
169 }
170 coreLocker.Unlock();
171 ASSERT(other != NULL);
172
173 int32 coreNewLoad = coreLoad - threadLoad;
174 int32 otherNewLoad = other->GetLoad() + threadLoad;
175 return coreNewLoad - otherNewLoad >= kLoadDifference / 2 ? other : core;
176 }
177
178 if (coreLoad >= kMediumLoad)
179 return core;
180
181 CoreEntry* smallTaskCore = choose_small_task_core();
182 if (smallTaskCore == NULL || (useMask && !smallTaskCore->CPUMask().Matches(mask)))
183 return core;
184 return smallTaskCore->GetLoad() + threadLoad < kHighLoad
185 ? smallTaskCore : core;
186 }
187
188
189 static inline void
pack_irqs()190 pack_irqs()
191 {
192 SCHEDULER_ENTER_FUNCTION();
193
194 CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
195 if (smallTaskCore == NULL)
196 return;
197
198 cpu_ent* cpu = get_cpu_struct();
199 if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
200 return;
201
202 SpinLocker locker(cpu->irqs_lock);
203 while (list_get_first_item(&cpu->irqs) != NULL) {
204 irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
205 locker.Unlock();
206
207 int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
208
209 if (newCPU != cpu->cpu_num)
210 assign_io_interrupt_to_cpu(irq->irq, newCPU);
211
212 locker.Lock();
213 }
214 }
215
216
217 static void
rebalance_irqs(bool idle)218 rebalance_irqs(bool idle)
219 {
220 SCHEDULER_ENTER_FUNCTION();
221
222 if (idle && sSmallTaskCore != NULL) {
223 pack_irqs();
224 return;
225 }
226
227 if (idle || sSmallTaskCore != NULL)
228 return;
229
230 cpu_ent* cpu = get_cpu_struct();
231 SpinLocker locker(cpu->irqs_lock);
232
233 irq_assignment* chosen = NULL;
234 irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
235
236 while (irq != NULL) {
237 if (chosen == NULL || chosen->load < irq->load)
238 chosen = irq;
239 irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
240 }
241
242 locker.Unlock();
243
244 if (chosen == NULL || chosen->load < kLowLoad)
245 return;
246
247 ReadSpinLocker coreLocker(gCoreHeapsLock);
248 CoreEntry* other = gCoreLoadHeap.PeekMinimum();
249 coreLocker.Unlock();
250 if (other == NULL)
251 return;
252 int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
253
254 CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
255 if (other == core)
256 return;
257 if (other->GetLoad() + kLoadDifference >= core->GetLoad())
258 return;
259
260 assign_io_interrupt_to_cpu(chosen->irq, newCPU);
261 }
262
263
264 scheduler_mode_operations gSchedulerPowerSavingMode = {
265 "power saving",
266
267 2000,
268 500,
269 { 3, 10 },
270
271 20000,
272
273 switch_to_mode,
274 set_cpu_enabled,
275 has_cache_expired,
276 choose_core,
277 rebalance,
278 rebalance_irqs,
279 };
280
281