xref: /haiku/src/system/kernel/scheduler/power_saving.cpp (revision b8a45b3a2df2379b4301bf3bd5949b9a105be4ba)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
9 
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
15 
16 
17 using namespace Scheduler;
18 
19 
20 const bigtime_t kCacheExpire = 100000;
21 
22 static CoreEntry* sSmallTaskCore;
23 
24 
25 static void
26 switch_to_mode()
27 {
28 	sSmallTaskCore = NULL;
29 }
30 
31 
32 static void
33 set_cpu_enabled(int32 cpu, bool enabled)
34 {
35 	if (!enabled)
36 		sSmallTaskCore = NULL;
37 }
38 
39 
40 static bool
41 has_cache_expired(const ThreadData* threadData)
42 {
43 	SCHEDULER_ENTER_FUNCTION();
44 	if (threadData->WentSleep() == 0)
45 		return false;
46 	return system_time() - threadData->WentSleep() > kCacheExpire;
47 }
48 
49 
50 static CoreEntry*
51 choose_small_task_core()
52 {
53 	SCHEDULER_ENTER_FUNCTION();
54 
55 	ReadSpinLocker coreLocker(gCoreHeapsLock);
56 	CoreEntry* core = gCoreLoadHeap.PeekMaximum();
57 	if (core == NULL)
58 		return sSmallTaskCore;
59 
60 	CoreEntry* smallTaskCore
61 		= atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
62 	if (smallTaskCore == NULL)
63 		return core;
64 	return smallTaskCore;
65 }
66 
67 
68 static CoreEntry*
69 choose_idle_core()
70 {
71 	SCHEDULER_ENTER_FUNCTION();
72 
73 	PackageEntry* package = PackageEntry::GetLeastIdlePackage();
74 
75 	if (package == NULL)
76 		package = gIdlePackageList.Last();
77 
78 	if (package != NULL)
79 		return package->GetIdleCore();
80 	return NULL;
81 }
82 
83 
84 static CoreEntry*
85 choose_core(const ThreadData* threadData)
86 {
87 	SCHEDULER_ENTER_FUNCTION();
88 
89 	CoreEntry* core = NULL;
90 
91 	CPUSet mask = threadData->GetCPUMask();
92 	if (mask.IsEmpty()) {
93 		// ignore when empty
94 		mask.SetAll();
95 	}
96 
97 	// try to pack all threads on one core
98 	core = choose_small_task_core();
99 	if (core != NULL && !core->CPUMask().Matches(mask))
100 		core = NULL;
101 
102 	if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
103 		ReadSpinLocker coreLocker(gCoreHeapsLock);
104 
105 		// run immediately on already woken core
106 		int32 index = 0;
107 		do {
108 			core = gCoreLoadHeap.PeekMinimum(index++);
109 		} while (core != NULL && !core->CPUMask().Matches(mask));
110 		if (core == NULL) {
111 			coreLocker.Unlock();
112 
113 			core = choose_idle_core();
114 			if (!core->CPUMask().Matches(mask))
115 				core = NULL;
116 
117 			if (core == NULL) {
118 				coreLocker.Lock();
119 				index = 0;
120 				do {
121 					core = gCoreHighLoadHeap.PeekMinimum(index++);
122 				} while (core != NULL && !core->CPUMask().Matches(mask));
123 			}
124 		}
125 	}
126 
127 	ASSERT(core != NULL);
128 	return core;
129 }
130 
131 
132 static CoreEntry*
133 rebalance(const ThreadData* threadData)
134 {
135 	SCHEDULER_ENTER_FUNCTION();
136 
137 	ASSERT(!gSingleCore);
138 
139 	CPUSet mask = threadData->GetCPUMask();
140 	if (mask.IsEmpty()) {
141 		// ignore when empty
142 		mask.SetAll();
143 	}
144 	CoreEntry* core = threadData->Core();
145 
146 	int32 coreLoad = core->GetLoad();
147 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
148 	if (coreLoad > kHighLoad) {
149 		if (sSmallTaskCore == core) {
150 			sSmallTaskCore = NULL;
151 			CoreEntry* smallTaskCore = choose_small_task_core();
152 
153 			if (threadLoad > coreLoad / 3 || smallTaskCore == NULL
154 				|| !smallTaskCore->CPUMask().Matches(mask)) {
155 				return core;
156 			}
157 			return coreLoad > kVeryHighLoad ? smallTaskCore : core;
158 		}
159 
160 		if (threadLoad >= coreLoad / 2)
161 			return core;
162 
163 		ReadSpinLocker coreLocker(gCoreHeapsLock);
164 		CoreEntry* other;
165 		int32 index = 0;
166 		do {
167 			other = gCoreLoadHeap.PeekMaximum(index++);
168 		} while (other != NULL && !core->CPUMask().Matches(mask));
169 		if (other == NULL) {
170 			index = 0;
171 			do {
172 				other = gCoreHighLoadHeap.PeekMinimum(index++);
173 			} while (other != NULL && !core->CPUMask().Matches(mask));
174 		}
175 		coreLocker.Unlock();
176 		ASSERT(other != NULL);
177 
178 		int32 coreNewLoad = coreLoad - threadLoad;
179 		int32 otherNewLoad = other->GetLoad() + threadLoad;
180 		return coreNewLoad - otherNewLoad >= kLoadDifference / 2 ? other : core;
181 	}
182 
183 	if (coreLoad >= kMediumLoad)
184 		return core;
185 
186 	CoreEntry* smallTaskCore = choose_small_task_core();
187 	if (smallTaskCore == NULL || !smallTaskCore->CPUMask().Matches(mask))
188 		return core;
189 	return smallTaskCore->GetLoad() + threadLoad < kHighLoad
190 		? smallTaskCore : core;
191 }
192 
193 
194 static inline void
195 pack_irqs()
196 {
197 	SCHEDULER_ENTER_FUNCTION();
198 
199 	CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
200 	if (smallTaskCore == NULL)
201 		return;
202 
203 	cpu_ent* cpu = get_cpu_struct();
204 	if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
205 		return;
206 
207 	SpinLocker locker(cpu->irqs_lock);
208 	while (list_get_first_item(&cpu->irqs) != NULL) {
209 		irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
210 		locker.Unlock();
211 
212 		int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
213 
214 		if (newCPU != cpu->cpu_num)
215 			assign_io_interrupt_to_cpu(irq->irq, newCPU);
216 
217 		locker.Lock();
218 	}
219 }
220 
221 
222 static void
223 rebalance_irqs(bool idle)
224 {
225 	SCHEDULER_ENTER_FUNCTION();
226 
227 	if (idle && sSmallTaskCore != NULL) {
228 		pack_irqs();
229 		return;
230 	}
231 
232 	if (idle || sSmallTaskCore != NULL)
233 		return;
234 
235 	cpu_ent* cpu = get_cpu_struct();
236 	SpinLocker locker(cpu->irqs_lock);
237 
238 	irq_assignment* chosen = NULL;
239 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
240 
241 	while (irq != NULL) {
242 		if (chosen == NULL || chosen->load < irq->load)
243 			chosen = irq;
244 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
245 	}
246 
247 	locker.Unlock();
248 
249 	if (chosen == NULL || chosen->load < kLowLoad)
250 		return;
251 
252 	ReadSpinLocker coreLocker(gCoreHeapsLock);
253 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
254 	coreLocker.Unlock();
255 	if (other == NULL)
256 		return;
257 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
258 
259 	CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
260 	if (other == core)
261 		return;
262 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
263 		return;
264 
265 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
266 }
267 
268 
269 scheduler_mode_operations gSchedulerPowerSavingMode = {
270 	"power saving",
271 
272 	2000,
273 	500,
274 	{ 3, 10 },
275 
276 	20000,
277 
278 	switch_to_mode,
279 	set_cpu_enabled,
280 	has_cache_expired,
281 	choose_core,
282 	rebalance,
283 	rebalance_irqs,
284 };
285 
286