xref: /haiku/src/system/kernel/scheduler/power_saving.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
9 
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
15 
16 
17 using namespace Scheduler;
18 
19 
20 const bigtime_t kCacheExpire = 100000;
21 
22 static CoreEntry* sSmallTaskCore;
23 
24 
25 static void
26 switch_to_mode()
27 {
28 	sSmallTaskCore = NULL;
29 }
30 
31 
32 static void
33 set_cpu_enabled(int32 cpu, bool enabled)
34 {
35 	if (!enabled)
36 		sSmallTaskCore = NULL;
37 }
38 
39 
40 static bool
41 has_cache_expired(const ThreadData* threadData)
42 {
43 	SCHEDULER_ENTER_FUNCTION();
44 	if (threadData->WentSleep() == 0)
45 		return false;
46 	return system_time() - threadData->WentSleep() > kCacheExpire;
47 }
48 
49 
50 static CoreEntry*
51 choose_small_task_core()
52 {
53 	SCHEDULER_ENTER_FUNCTION();
54 
55 	ReadSpinLocker coreLocker(gCoreHeapsLock);
56 	CoreEntry* core = gCoreLoadHeap.PeekMaximum();
57 	if (core == NULL)
58 		return sSmallTaskCore;
59 
60 	CoreEntry* smallTaskCore
61 		= atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
62 	if (smallTaskCore == NULL)
63 		return core;
64 	return smallTaskCore;
65 }
66 
67 
68 static CoreEntry*
69 choose_idle_core()
70 {
71 	SCHEDULER_ENTER_FUNCTION();
72 
73 	PackageEntry* package = PackageEntry::GetLeastIdlePackage();
74 
75 	if (package == NULL)
76 		package = gIdlePackageList.Last();
77 
78 	if (package != NULL)
79 		return package->GetIdleCore();
80 	return NULL;
81 }
82 
83 
84 static CoreEntry*
85 choose_core(const ThreadData* threadData)
86 {
87 	SCHEDULER_ENTER_FUNCTION();
88 
89 	CoreEntry* core = NULL;
90 
91 	CPUSet mask = threadData->GetCPUMask();
92 	if (mask.IsEmpty()) {
93 		// ignore when empty
94 		mask.SetAll();
95 	}
96 
97 	// try to pack all threads on one core
98 	core = choose_small_task_core();
99 	if (!core->CPUMask().Matches(mask))
100 		core = NULL;
101 
102 	if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
103 		ReadSpinLocker coreLocker(gCoreHeapsLock);
104 
105 		// run immediately on already woken core
106 		int32 index = 0;
107 		do {
108 			core = gCoreLoadHeap.PeekMinimum(index++);
109 		} while (core != NULL && !core->CPUMask().Matches(mask));
110 		if (core == NULL) {
111 			coreLocker.Unlock();
112 
113 			core = choose_idle_core();
114 			if (!core->CPUMask().Matches(mask))
115 				core = NULL;
116 
117 			if (core == NULL) {
118 				coreLocker.Lock();
119 				index = 0;
120 				do {
121 					core = gCoreHighLoadHeap.PeekMinimum(index++);
122 				} while (core != NULL && !core->CPUMask().Matches(mask));
123 			}
124 		}
125 	}
126 
127 	ASSERT(core != NULL);
128 	return core;
129 }
130 
131 
132 static CoreEntry*
133 rebalance(const ThreadData* threadData)
134 {
135 	SCHEDULER_ENTER_FUNCTION();
136 
137 	ASSERT(!gSingleCore);
138 
139 	CPUSet mask = threadData->GetCPUMask();
140 	if (mask.IsEmpty()) {
141 		// ignore when empty
142 		mask.SetAll();
143 	}
144 	CoreEntry* core = threadData->Core();
145 
146 	int32 coreLoad = core->GetLoad();
147 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
148 	if (coreLoad > kHighLoad) {
149 		if (sSmallTaskCore == core) {
150 			sSmallTaskCore = NULL;
151 			CoreEntry* smallTaskCore = choose_small_task_core();
152 
153 			if (threadLoad > coreLoad / 3 || !smallTaskCore->CPUMask().Matches(mask))
154 				return core;
155 			return coreLoad > kVeryHighLoad ? smallTaskCore : core;
156 		}
157 
158 		if (threadLoad >= coreLoad / 2)
159 			return core;
160 
161 		ReadSpinLocker coreLocker(gCoreHeapsLock);
162 		CoreEntry* other;
163 		int32 index = 0;
164 		do {
165 			other = gCoreLoadHeap.PeekMaximum(index++);
166 		} while (other != NULL && !core->CPUMask().Matches(mask));
167 		if (other == NULL) {
168 			index = 0;
169 			do {
170 				other = gCoreHighLoadHeap.PeekMinimum(index++);
171 			} while (other != NULL && !core->CPUMask().Matches(mask));
172 		}
173 		coreLocker.Unlock();
174 		ASSERT(other != NULL);
175 
176 		int32 coreNewLoad = coreLoad - threadLoad;
177 		int32 otherNewLoad = other->GetLoad() + threadLoad;
178 		return coreNewLoad - otherNewLoad >= kLoadDifference / 2 ? other : core;
179 	}
180 
181 	if (coreLoad >= kMediumLoad)
182 		return core;
183 
184 	CoreEntry* smallTaskCore = choose_small_task_core();
185 	if (smallTaskCore == NULL || !smallTaskCore->CPUMask().Matches(mask))
186 		return core;
187 	return smallTaskCore->GetLoad() + threadLoad < kHighLoad
188 		? smallTaskCore : core;
189 }
190 
191 
192 static inline void
193 pack_irqs()
194 {
195 	SCHEDULER_ENTER_FUNCTION();
196 
197 	CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
198 	if (smallTaskCore == NULL)
199 		return;
200 
201 	cpu_ent* cpu = get_cpu_struct();
202 	if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
203 		return;
204 
205 	SpinLocker locker(cpu->irqs_lock);
206 	while (list_get_first_item(&cpu->irqs) != NULL) {
207 		irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
208 		locker.Unlock();
209 
210 		int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
211 
212 		if (newCPU != cpu->cpu_num)
213 			assign_io_interrupt_to_cpu(irq->irq, newCPU);
214 
215 		locker.Lock();
216 	}
217 }
218 
219 
220 static void
221 rebalance_irqs(bool idle)
222 {
223 	SCHEDULER_ENTER_FUNCTION();
224 
225 	if (idle && sSmallTaskCore != NULL) {
226 		pack_irqs();
227 		return;
228 	}
229 
230 	if (idle || sSmallTaskCore != NULL)
231 		return;
232 
233 	cpu_ent* cpu = get_cpu_struct();
234 	SpinLocker locker(cpu->irqs_lock);
235 
236 	irq_assignment* chosen = NULL;
237 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
238 
239 	while (irq != NULL) {
240 		if (chosen == NULL || chosen->load < irq->load)
241 			chosen = irq;
242 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
243 	}
244 
245 	locker.Unlock();
246 
247 	if (chosen == NULL || chosen->load < kLowLoad)
248 		return;
249 
250 	ReadSpinLocker coreLocker(gCoreHeapsLock);
251 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
252 	coreLocker.Unlock();
253 	if (other == NULL)
254 		return;
255 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
256 
257 	CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
258 	if (other == core)
259 		return;
260 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
261 		return;
262 
263 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
264 }
265 
266 
267 scheduler_mode_operations gSchedulerPowerSavingMode = {
268 	"power saving",
269 
270 	2000,
271 	500,
272 	{ 3, 10 },
273 
274 	20000,
275 
276 	switch_to_mode,
277 	set_cpu_enabled,
278 	has_cache_expired,
279 	choose_core,
280 	rebalance,
281 	rebalance_irqs,
282 };
283 
284