xref: /haiku/src/system/kernel/scheduler/scheduler_thread.cpp (revision 909af08f4328301fbdef1ffb41f566c3b5bec0c7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include "scheduler_thread.h"
7 
8 
9 using namespace Scheduler;
10 
11 
12 static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
13 
14 const int32 kMaximumQuantumLengthsCount	= 20;
15 static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount];
16 
17 
18 void
19 ThreadData::_InitBase()
20 {
21 	fStolenTime = 0;
22 	fQuantumStart = 0;
23 	fLastInterruptTime = 0;
24 
25 	fWentSleep = 0;
26 	fWentSleepActive = 0;
27 
28 	fEnqueued = false;
29 	fReady = false;
30 
31 	fPriorityPenalty = 0;
32 	fAdditionalPenalty = 0;
33 
34 	fEffectivePriority = GetPriority();
35 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
36 
37 	fTimeUsed = 0;
38 
39 	fMeasureAvailableActiveTime = 0;
40 	fLastMeasureAvailableTime = 0;
41 	fMeasureAvailableTime = 0;
42 }
43 
44 
45 inline CoreEntry*
46 ThreadData::_ChooseCore() const
47 {
48 	SCHEDULER_ENTER_FUNCTION();
49 
50 	ASSERT(!gSingleCore);
51 	return gCurrentMode->choose_core(this);
52 }
53 
54 
55 inline CPUEntry*
56 ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
57 {
58 	SCHEDULER_ENTER_FUNCTION();
59 
60 	int32 threadPriority = GetEffectivePriority();
61 
62 	CPUSet mask = GetCPUMask();
63 	const bool useMask = !mask.IsEmpty();
64 	ASSERT(!useMask || mask.Matches(core->CPUMask()));
65 
66 	if (fThread->previous_cpu != NULL && !fThread->previous_cpu->disabled
67 			&& (!useMask || mask.GetBit(fThread->previous_cpu->cpu_num))) {
68 		CPUEntry* previousCPU
69 			= CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
70 		if (previousCPU->Core() == core) {
71 			CoreCPUHeapLocker _(core);
72 			if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
73 				previousCPU->UpdatePriority(threadPriority);
74 				rescheduleNeeded = true;
75 				return previousCPU;
76 			}
77 		}
78 	}
79 
80 	CoreCPUHeapLocker _(core);
81 	int32 index = 0;
82 	CPUEntry* cpu;
83 	do {
84 		cpu = core->CPUHeap()->PeekRoot(index++);
85 	} while (useMask && cpu != NULL && !mask.GetBit(cpu->ID()));
86 	ASSERT(cpu != NULL);
87 
88 	if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
89 		cpu->UpdatePriority(threadPriority);
90 		rescheduleNeeded = true;
91 	} else
92 		rescheduleNeeded = false;
93 
94 	return cpu;
95 }
96 
97 
98 ThreadData::ThreadData(Thread* thread)
99 	:
100 	fThread(thread)
101 {
102 }
103 
104 
105 void
106 ThreadData::Init()
107 {
108 	_InitBase();
109 	fCore = NULL;
110 
111 	Thread* currentThread = thread_get_current_thread();
112 	ThreadData* currentThreadData = currentThread->scheduler_data;
113 	fNeededLoad = currentThreadData->fNeededLoad;
114 
115 	if (!IsRealTime()) {
116 		fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty,
117 				std::max(GetPriority() - _GetMinimalPriority(), int32(0)));
118 		fAdditionalPenalty = currentThreadData->fAdditionalPenalty;
119 
120 		_ComputeEffectivePriority();
121 	}
122 }
123 
124 
125 void
126 ThreadData::Init(CoreEntry* core)
127 {
128 	_InitBase();
129 
130 	fCore = core;
131 	fReady = true;
132 	fNeededLoad = 0;
133 }
134 
135 
136 void
137 ThreadData::Dump() const
138 {
139 	kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty);
140 
141 	int32 priority = GetPriority() - _GetPenalty();
142 	priority = std::max(priority, int32(1));
143 	kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
144 		fAdditionalPenalty % priority, fAdditionalPenalty);
145 	kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority());
146 
147 	kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n",
148 		fTimeUsed, ComputeQuantum());
149 	kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime);
150 	kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart);
151 	kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
152 	kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
153 	kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
154 	kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
155 		fCore != NULL ? fCore->ID() : -1);
156 	if (fCore != NULL && HasCacheExpired())
157 		kprintf("\tcache affinity has expired\n");
158 }
159 
160 
161 bool
162 ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
163 {
164 	SCHEDULER_ENTER_FUNCTION();
165 
166 	bool rescheduleNeeded = false;
167 
168 	CPUSet mask = GetCPUMask();
169 	const bool useMask = !mask.IsEmpty();
170 
171 	if (targetCore != NULL && (useMask && !targetCore->CPUMask().Matches(mask)))
172 		targetCore = NULL;
173 	if (targetCPU != NULL && (useMask && !mask.GetBit(targetCPU->ID())))
174 		targetCPU = NULL;
175 
176 	if (targetCore == NULL && targetCPU != NULL)
177 		targetCore = targetCPU->Core();
178 	else if (targetCore != NULL && targetCPU == NULL)
179 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
180 	else if (targetCore == NULL && targetCPU == NULL) {
181 		targetCore = _ChooseCore();
182 		ASSERT(!useMask || mask.Matches(targetCore->CPUMask()));
183 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
184 	}
185 
186 	ASSERT(targetCore != NULL);
187 	ASSERT(targetCPU != NULL);
188 
189 	if (fCore != targetCore) {
190 		fLoadMeasurementEpoch = targetCore->LoadMeasurementEpoch() - 1;
191 		if (fReady) {
192 			if (fCore != NULL)
193 				fCore->RemoveLoad(fNeededLoad, true);
194 			targetCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, true);
195 		}
196 	}
197 
198 	fCore = targetCore;
199 	return rescheduleNeeded;
200 }
201 
202 
203 bigtime_t
204 ThreadData::ComputeQuantum() const
205 {
206 	SCHEDULER_ENTER_FUNCTION();
207 
208 	if (IsRealTime())
209 		return fBaseQuantum;
210 
211 	int32 threadCount = fCore->ThreadCount();
212 	if (fCore->CPUCount() > 0)
213 		threadCount /= fCore->CPUCount();
214 
215 	bigtime_t quantum = fBaseQuantum;
216 	if (threadCount < kMaximumQuantumLengthsCount)
217 		quantum = std::min(sMaximumQuantumLengths[threadCount], quantum);
218 	return quantum;
219 }
220 
221 
222 void
223 ThreadData::UnassignCore(bool running)
224 {
225 	SCHEDULER_ENTER_FUNCTION();
226 
227 	ASSERT(fCore != NULL);
228 	if (running || fThread->state == B_THREAD_READY)
229 		fReady = false;
230 	if (!fReady)
231 		fCore = NULL;
232 }
233 
234 
235 /* static */ void
236 ThreadData::ComputeQuantumLengths()
237 {
238 	SCHEDULER_ENTER_FUNCTION();
239 
240 	for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
241 		const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
242 		if (priority >= B_URGENT_DISPLAY_PRIORITY) {
243 			sQuantumLengths[priority] = kQuantum0;
244 			continue;
245 		}
246 
247 		const bigtime_t kQuantum1
248 			= kQuantum0 * gCurrentMode->quantum_multipliers[0];
249 		if (priority > B_NORMAL_PRIORITY) {
250 			sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0,
251 				B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority);
252 			continue;
253 		}
254 
255 		const bigtime_t kQuantum2
256 			= kQuantum0 * gCurrentMode->quantum_multipliers[1];
257 		sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1,
258 			B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority);
259 	}
260 
261 	for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount;
262 		threadCount++) {
263 
264 		bigtime_t quantum = gCurrentMode->maximum_latency;
265 		if (threadCount != 0)
266 			quantum /= threadCount;
267 		quantum = std::max(quantum, gCurrentMode->minimal_quantum);
268 		sMaximumQuantumLengths[threadCount] = quantum;
269 	}
270 }
271 
272 
273 inline int32
274 ThreadData::_GetPenalty() const
275 {
276 	SCHEDULER_ENTER_FUNCTION();
277 	return fPriorityPenalty;
278 }
279 
280 
281 void
282 ThreadData::_ComputeNeededLoad()
283 {
284 	SCHEDULER_ENTER_FUNCTION();
285 	ASSERT(!IsIdle());
286 
287 	int32 oldLoad = compute_load(fLastMeasureAvailableTime,
288 		fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime);
289 	if (oldLoad < 0 || oldLoad == fNeededLoad)
290 		return;
291 
292 	fCore->ChangeLoad(fNeededLoad - oldLoad);
293 }
294 
295 
296 void
297 ThreadData::_ComputeEffectivePriority() const
298 {
299 	SCHEDULER_ENTER_FUNCTION();
300 
301 	if (IsIdle())
302 		fEffectivePriority = B_IDLE_PRIORITY;
303 	else if (IsRealTime())
304 		fEffectivePriority = GetPriority();
305 	else {
306 		fEffectivePriority = GetPriority();
307 		fEffectivePriority -= _GetPenalty();
308 		if (fEffectivePriority > 0)
309 			fEffectivePriority -= fAdditionalPenalty % fEffectivePriority;
310 
311 		ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY);
312 		ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
313 	}
314 
315 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
316 }
317 
318 
319 /* static */ bigtime_t
320 ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
321 	int32 maxPriority, int32 minPriority, int32 priority)
322 {
323 	SCHEDULER_ENTER_FUNCTION();
324 
325 	ASSERT(priority <= maxPriority);
326 	ASSERT(priority >= minPriority);
327 
328 	bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority);
329 	result /= maxPriority - minPriority;
330 	return maxQuantum - result;
331 }
332 
333 
334 ThreadProcessing::~ThreadProcessing()
335 {
336 }
337 
338