xref: /haiku/src/system/kernel/scheduler/power_saving.cpp (revision e81a954787e50e56a7f06f72705b7859b6ab06d1)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
9 
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
15 
16 
17 using namespace Scheduler;
18 
19 
20 const bigtime_t kCacheExpire = 100000;
21 
22 static CoreEntry* sSmallTaskCore;
23 
24 
25 static void
26 switch_to_mode()
27 {
28 	sSmallTaskCore = NULL;
29 }
30 
31 
32 static void
33 set_cpu_enabled(int32 cpu, bool enabled)
34 {
35 	if (!enabled)
36 		sSmallTaskCore = NULL;
37 }
38 
39 
40 static bool
41 has_cache_expired(const ThreadData* threadData)
42 {
43 	SCHEDULER_ENTER_FUNCTION();
44 	if (threadData->WentSleep() == 0)
45 		return false;
46 	return system_time() - threadData->WentSleep() > kCacheExpire;
47 }
48 
49 
50 static CoreEntry*
51 choose_small_task_core()
52 {
53 	SCHEDULER_ENTER_FUNCTION();
54 
55 	ReadSpinLocker coreLocker(gCoreHeapsLock);
56 	CoreEntry* core = gCoreLoadHeap.PeekMaximum();
57 	if (core == NULL)
58 		return sSmallTaskCore;
59 
60 	CoreEntry* smallTaskCore
61 		= atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
62 	if (smallTaskCore == NULL)
63 		return core;
64 	return smallTaskCore;
65 }
66 
67 
68 static CoreEntry*
69 choose_idle_core()
70 {
71 	SCHEDULER_ENTER_FUNCTION();
72 
73 	PackageEntry* package = PackageEntry::GetLeastIdlePackage();
74 
75 	if (package == NULL)
76 		package = gIdlePackageList.Last();
77 
78 	if (package != NULL)
79 		return package->GetIdleCore();
80 
81 	return NULL;
82 }
83 
84 
85 static CoreEntry*
86 choose_core(const ThreadData* threadData)
87 {
88 	SCHEDULER_ENTER_FUNCTION();
89 
90 	CoreEntry* core = NULL;
91 
92 	// try to pack all threads on one core
93 	core = choose_small_task_core();
94 
95 	if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
96 		ReadSpinLocker coreLocker(gCoreHeapsLock);
97 
98 		// run immediately on already woken core
99 		core = gCoreLoadHeap.PeekMinimum();
100 		if (core == NULL) {
101 			coreLocker.Unlock();
102 
103 			core = choose_idle_core();
104 
105 			if (core == NULL) {
106 				coreLocker.Lock();
107 				core = gCoreHighLoadHeap.PeekMinimum();
108 			}
109 		}
110 	}
111 
112 	ASSERT(core != NULL);
113 	return core;
114 }
115 
116 
117 static CoreEntry*
118 rebalance(const ThreadData* threadData)
119 {
120 	SCHEDULER_ENTER_FUNCTION();
121 
122 	ASSERT(!gSingleCore);
123 
124 	CoreEntry* core = threadData->Core();
125 
126 	int32 coreLoad = core->GetLoad();
127 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
128 	if (coreLoad > kHighLoad) {
129 		if (sSmallTaskCore == core) {
130 			sSmallTaskCore = NULL;
131 			CoreEntry* smallTaskCore = choose_small_task_core();
132 
133 			if (threadLoad > coreLoad / 3)
134 				return core;
135 			return coreLoad > kVeryHighLoad ? smallTaskCore : core;
136 		}
137 
138 		if (threadLoad >= coreLoad / 2)
139 			return core;
140 
141 		ReadSpinLocker coreLocker(gCoreHeapsLock);
142 		CoreEntry* other = gCoreLoadHeap.PeekMaximum();
143 		if (other == NULL)
144 			other = gCoreHighLoadHeap.PeekMinimum();
145 		coreLocker.Unlock();
146 		ASSERT(other != NULL);
147 
148 		int32 coreNewLoad = coreLoad - threadLoad;
149 		int32 otherNewLoad = other->GetLoad() + threadLoad;
150 		return coreNewLoad - otherNewLoad >= kLoadDifference / 2 ? other : core;
151 	}
152 
153 	if (coreLoad >= kMediumLoad)
154 		return core;
155 
156 	CoreEntry* smallTaskCore = choose_small_task_core();
157 	if (smallTaskCore == NULL)
158 		return core;
159 	return smallTaskCore->GetLoad() + threadLoad < kHighLoad
160 		? smallTaskCore : core;
161 }
162 
163 
164 static inline void
165 pack_irqs()
166 {
167 	SCHEDULER_ENTER_FUNCTION();
168 
169 	CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
170 	if (smallTaskCore == NULL)
171 		return;
172 
173 	cpu_ent* cpu = get_cpu_struct();
174 	if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
175 		return;
176 
177 	SpinLocker locker(cpu->irqs_lock);
178 	while (list_get_first_item(&cpu->irqs) != NULL) {
179 		irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
180 		locker.Unlock();
181 
182 		int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
183 
184 		if (newCPU != cpu->cpu_num)
185 			assign_io_interrupt_to_cpu(irq->irq, newCPU);
186 
187 		locker.Lock();
188 	}
189 }
190 
191 
192 static void
193 rebalance_irqs(bool idle)
194 {
195 	SCHEDULER_ENTER_FUNCTION();
196 
197 	if (idle && sSmallTaskCore != NULL) {
198 		pack_irqs();
199 		return;
200 	}
201 
202 	if (idle || sSmallTaskCore != NULL)
203 		return;
204 
205 	cpu_ent* cpu = get_cpu_struct();
206 	SpinLocker locker(cpu->irqs_lock);
207 
208 	irq_assignment* chosen = NULL;
209 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
210 
211 	while (irq != NULL) {
212 		if (chosen == NULL || chosen->load < irq->load)
213 			chosen = irq;
214 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
215 	}
216 
217 	locker.Unlock();
218 
219 	if (chosen == NULL || chosen->load < kLowLoad)
220 		return;
221 
222 	ReadSpinLocker coreLocker(gCoreHeapsLock);
223 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
224 	coreLocker.Unlock();
225 	if (other == NULL)
226 		return;
227 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
228 
229 	CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
230 	if (other == core)
231 		return;
232 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
233 		return;
234 
235 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
236 }
237 
238 
239 scheduler_mode_operations gSchedulerPowerSavingMode = {
240 	"power saving",
241 
242 	2000,
243 	500,
244 	{ 3, 10 },
245 
246 	20000,
247 
248 	switch_to_mode,
249 	set_cpu_enabled,
250 	has_cache_expired,
251 	choose_core,
252 	rebalance,
253 	rebalance_irqs,
254 };
255 
256