xref: /haiku/src/system/kernel/scheduler/power_saving.cpp (revision 5e96d7d537fbec23bad4ae9b4c8e7b02e769f0c6)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
9 
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
15 
16 
17 using namespace Scheduler;
18 
19 
20 const bigtime_t kCacheExpire = 100000;
21 
22 static CoreEntry* sSmallTaskCore;
23 
24 
25 static void
26 switch_to_mode()
27 {
28 	sSmallTaskCore = NULL;
29 }
30 
31 
32 static void
33 set_cpu_enabled(int32 cpu, bool enabled)
34 {
35 	if (!enabled)
36 		sSmallTaskCore = NULL;
37 }
38 
39 
40 static bool
41 has_cache_expired(const ThreadData* threadData)
42 {
43 	SCHEDULER_ENTER_FUNCTION();
44 	if (threadData->WentSleep() == 0)
45 		return false;
46 	return system_time() - threadData->WentSleep() > kCacheExpire;
47 }
48 
49 
50 static CoreEntry*
51 choose_small_task_core()
52 {
53 	SCHEDULER_ENTER_FUNCTION();
54 
55 	CoreEntry* core = gCoreLoadHeap.PeekMaximum();
56 	if (core == NULL)
57 		return sSmallTaskCore;
58 
59 	CoreEntry* smallTaskCore
60 		= atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
61 	if (smallTaskCore == NULL)
62 		return core;
63 	return smallTaskCore;
64 }
65 
66 
67 static CoreEntry*
68 choose_idle_core()
69 {
70 	SCHEDULER_ENTER_FUNCTION();
71 
72 	PackageEntry* package = PackageEntry::GetLeastIdlePackage();
73 
74 	if (package == NULL)
75 		package = gIdlePackageList.Last();
76 
77 	if (package != NULL)
78 		return package->GetIdleCore();
79 
80 	return NULL;
81 }
82 
83 
84 static CoreEntry*
85 choose_core(const ThreadData* threadData)
86 {
87 	SCHEDULER_ENTER_FUNCTION();
88 
89 	CoreEntry* core = NULL;
90 
91 	// try to pack all threads on one core
92 	core = choose_small_task_core();
93 
94 	if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
95 		ReadSpinLocker coreLocker(gCoreHeapsLock);
96 
97 		// run immediately on already woken core
98 		core = gCoreLoadHeap.PeekMinimum();
99 		if (core == NULL) {
100 			coreLocker.Unlock();
101 
102 			core = choose_idle_core();
103 
104 			if (core == NULL) {
105 				coreLocker.Lock();
106 				core = gCoreHighLoadHeap.PeekMinimum();
107 			}
108 		}
109 	}
110 
111 	ASSERT(core != NULL);
112 	return core;
113 }
114 
115 
116 static bool
117 should_rebalance(const ThreadData* threadData)
118 {
119 	SCHEDULER_ENTER_FUNCTION();
120 
121 	ASSERT(!gSingleCore);
122 
123 	CoreEntry* core = threadData->Core();
124 
125 	int32 coreLoad = core->GetLoad();
126 	int32 threadLoad = threadData->GetLoad() / core->CPUCount();
127 	if (coreLoad > kHighLoad) {
128 		if (sSmallTaskCore == core) {
129 			sSmallTaskCore = NULL;
130 			choose_small_task_core();
131 
132 			if (threadLoad > coreLoad / 3)
133 				return false;
134 			return coreLoad > kVeryHighLoad;
135 		}
136 
137 		if (threadLoad >= coreLoad / 2)
138 			return false;
139 
140 		CoreEntry* other = gCoreLoadHeap.PeekMaximum();
141 		if (other == NULL)
142 			other = gCoreHighLoadHeap.PeekMinimum();
143 		ASSERT(other != NULL);
144 
145 		int32 coreNewLoad = coreLoad - threadLoad;
146 		int32 otherNewLoad = other->GetLoad() + threadLoad;
147 		return coreNewLoad - otherNewLoad >= kLoadDifference / 2;
148 	}
149 
150 	if (coreLoad >= kMediumLoad)
151 		return false;
152 
153 	CoreEntry* smallTaskCore = choose_small_task_core();
154 	if (smallTaskCore == NULL)
155 		return false;
156 	return smallTaskCore != core
157 		&& smallTaskCore->GetLoad() + threadLoad < kHighLoad;
158 }
159 
160 
161 static inline void
162 pack_irqs()
163 {
164 	SCHEDULER_ENTER_FUNCTION();
165 
166 	CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
167 	if (smallTaskCore == NULL)
168 		return;
169 
170 	cpu_ent* cpu = get_cpu_struct();
171 	if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
172 		return;
173 
174 	SpinLocker locker(cpu->irqs_lock);
175 	while (list_get_first_item(&cpu->irqs) != NULL) {
176 		irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
177 		locker.Unlock();
178 
179 		int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
180 
181 		if (newCPU != cpu->cpu_num)
182 			assign_io_interrupt_to_cpu(irq->irq, newCPU);
183 
184 		locker.Lock();
185 	}
186 }
187 
188 
189 static void
190 rebalance_irqs(bool idle)
191 {
192 	SCHEDULER_ENTER_FUNCTION();
193 
194 	if (idle && sSmallTaskCore != NULL) {
195 		pack_irqs();
196 		return;
197 	}
198 
199 	if (idle || sSmallTaskCore != NULL)
200 		return;
201 
202 	cpu_ent* cpu = get_cpu_struct();
203 	SpinLocker locker(cpu->irqs_lock);
204 
205 	irq_assignment* chosen = NULL;
206 	irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
207 
208 	while (irq != NULL) {
209 		if (chosen == NULL || chosen->load < irq->load)
210 			chosen = irq;
211 		irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
212 	}
213 
214 	locker.Unlock();
215 
216 	if (chosen == NULL || chosen->load < kLowLoad)
217 		return;
218 
219 	ReadSpinLocker coreLocker(gCoreHeapsLock);
220 	CoreEntry* other = gCoreLoadHeap.PeekMinimum();
221 	coreLocker.Unlock();
222 	if (other == NULL)
223 		return;
224 	int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
225 
226 	CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
227 	if (other == core)
228 		return;
229 	if (other->GetLoad() + kLoadDifference >= core->GetLoad())
230 		return;
231 
232 	assign_io_interrupt_to_cpu(chosen->irq, newCPU);
233 }
234 
235 
236 scheduler_mode_operations gSchedulerPowerSavingMode = {
237 	"power saving",
238 
239 	2000,
240 	500,
241 	{ 3, 10 },
242 
243 	20000,
244 
245 	switch_to_mode,
246 	set_cpu_enabled,
247 	has_cache_expired,
248 	choose_core,
249 	should_rebalance,
250 	rebalance_irqs,
251 };
252 
253