xref: /haiku/src/system/kernel/scheduler/scheduler_thread.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include "scheduler_thread.h"
7 
8 
9 using namespace Scheduler;
10 
11 
12 static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
13 
14 const int32 kMaximumQuantumLengthsCount	= 20;
15 static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount];
16 
17 
18 void
19 ThreadData::_InitBase()
20 {
21 	fStolenTime = 0;
22 	fQuantumStart = 0;
23 	fLastInterruptTime = 0;
24 
25 	fWentSleep = 0;
26 	fWentSleepActive = 0;
27 
28 	fEnqueued = false;
29 	fReady = false;
30 
31 	fPriorityPenalty = 0;
32 	fAdditionalPenalty = 0;
33 
34 	fEffectivePriority = GetPriority();
35 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
36 
37 	fTimeUsed = 0;
38 
39 	fMeasureAvailableActiveTime = 0;
40 	fLastMeasureAvailableTime = 0;
41 	fMeasureAvailableTime = 0;
42 }
43 
44 
45 inline CoreEntry*
46 ThreadData::_ChooseCore() const
47 {
48 	SCHEDULER_ENTER_FUNCTION();
49 
50 	ASSERT(!gSingleCore);
51 	return gCurrentMode->choose_core(this);
52 }
53 
54 
55 inline CPUEntry*
56 ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
57 {
58 	SCHEDULER_ENTER_FUNCTION();
59 
60 	int32 threadPriority = GetEffectivePriority();
61 
62 	CPUSet mask = GetCPUMask();
63 	if (mask.IsEmpty())
64 		mask.SetAll();
65 	ASSERT(mask.Matches(core->CPUMask()));
66 
67 	if (fThread->previous_cpu != NULL && !fThread->previous_cpu->disabled
68 		&& mask.GetBit(fThread->previous_cpu->cpu_num)) {
69 		CPUEntry* previousCPU
70 			= CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
71 		if (previousCPU->Core() == core) {
72 			CoreCPUHeapLocker _(core);
73 			if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
74 				previousCPU->UpdatePriority(threadPriority);
75 				rescheduleNeeded = true;
76 				return previousCPU;
77 			}
78 		}
79 	}
80 
81 	CoreCPUHeapLocker _(core);
82 	int32 index = 0;
83 	CPUEntry* cpu;
84 	do {
85 		cpu = core->CPUHeap()->PeekRoot(index++);
86 	} while (cpu != NULL && !mask.GetBit(cpu->ID()));
87 	ASSERT(cpu != NULL);
88 
89 	if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
90 		cpu->UpdatePriority(threadPriority);
91 		rescheduleNeeded = true;
92 	} else
93 		rescheduleNeeded = false;
94 
95 	return cpu;
96 }
97 
98 
99 ThreadData::ThreadData(Thread* thread)
100 	:
101 	fThread(thread)
102 {
103 }
104 
105 
106 void
107 ThreadData::Init()
108 {
109 	_InitBase();
110 	fCore = NULL;
111 
112 	Thread* currentThread = thread_get_current_thread();
113 	ThreadData* currentThreadData = currentThread->scheduler_data;
114 	fNeededLoad = currentThreadData->fNeededLoad;
115 
116 	if (!IsRealTime()) {
117 		fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty,
118 				std::max(GetPriority() - _GetMinimalPriority(), int32(0)));
119 		fAdditionalPenalty = currentThreadData->fAdditionalPenalty;
120 
121 		_ComputeEffectivePriority();
122 	}
123 }
124 
125 
126 void
127 ThreadData::Init(CoreEntry* core)
128 {
129 	_InitBase();
130 
131 	fCore = core;
132 	fReady = true;
133 	fNeededLoad = 0;
134 }
135 
136 
137 void
138 ThreadData::Dump() const
139 {
140 	kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty);
141 
142 	int32 priority = GetPriority() - _GetPenalty();
143 	priority = std::max(priority, int32(1));
144 	kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
145 		fAdditionalPenalty % priority, fAdditionalPenalty);
146 	kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority());
147 
148 	kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n",
149 		fTimeUsed, ComputeQuantum());
150 	kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime);
151 	kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart);
152 	kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
153 	kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
154 	kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
155 	kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
156 		fCore != NULL ? fCore->ID() : -1);
157 	if (fCore != NULL && HasCacheExpired())
158 		kprintf("\tcache affinity has expired\n");
159 }
160 
161 
162 bool
163 ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
164 {
165 	SCHEDULER_ENTER_FUNCTION();
166 
167 	bool rescheduleNeeded = false;
168 
169 	if (targetCore != NULL && !targetCore->CPUMask().Matches(GetCPUMask()))
170 		targetCore = NULL;
171 	if (targetCPU != NULL && !GetCPUMask().GetBit(targetCPU->ID()))
172 		targetCPU = NULL;
173 
174 	if (targetCore == NULL && targetCPU != NULL)
175 		targetCore = targetCPU->Core();
176 	else if (targetCore != NULL && targetCPU == NULL)
177 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
178 	else if (targetCore == NULL && targetCPU == NULL) {
179 		targetCore = _ChooseCore();
180 		CPUSet mask = GetCPUMask();
181 		if (mask.IsEmpty())
182 			mask.SetAll();
183 		ASSERT(mask.Matches(targetCore->CPUMask()));
184 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
185 	}
186 
187 	ASSERT(targetCore != NULL);
188 	ASSERT(targetCPU != NULL);
189 
190 	if (fCore != targetCore) {
191 		fLoadMeasurementEpoch = targetCore->LoadMeasurementEpoch() - 1;
192 		if (fReady) {
193 			if (fCore != NULL)
194 				fCore->RemoveLoad(fNeededLoad, true);
195 			targetCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, true);
196 		}
197 	}
198 
199 	fCore = targetCore;
200 	return rescheduleNeeded;
201 }
202 
203 
204 bigtime_t
205 ThreadData::ComputeQuantum() const
206 {
207 	SCHEDULER_ENTER_FUNCTION();
208 
209 	if (IsRealTime())
210 		return fBaseQuantum;
211 
212 	int32 threadCount = fCore->ThreadCount();
213 	if (fCore->CPUCount() > 0)
214 		threadCount /= fCore->CPUCount();
215 
216 	bigtime_t quantum = fBaseQuantum;
217 	if (threadCount < kMaximumQuantumLengthsCount)
218 		quantum = std::min(sMaximumQuantumLengths[threadCount], quantum);
219 	return quantum;
220 }
221 
222 
223 void
224 ThreadData::UnassignCore(bool running)
225 {
226 	SCHEDULER_ENTER_FUNCTION();
227 
228 	ASSERT(fCore != NULL);
229 	if (running || fThread->state == B_THREAD_READY)
230 		fReady = false;
231 	if (!fReady)
232 		fCore = NULL;
233 }
234 
235 
236 /* static */ void
237 ThreadData::ComputeQuantumLengths()
238 {
239 	SCHEDULER_ENTER_FUNCTION();
240 
241 	for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
242 		const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
243 		if (priority >= B_URGENT_DISPLAY_PRIORITY) {
244 			sQuantumLengths[priority] = kQuantum0;
245 			continue;
246 		}
247 
248 		const bigtime_t kQuantum1
249 			= kQuantum0 * gCurrentMode->quantum_multipliers[0];
250 		if (priority > B_NORMAL_PRIORITY) {
251 			sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0,
252 				B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority);
253 			continue;
254 		}
255 
256 		const bigtime_t kQuantum2
257 			= kQuantum0 * gCurrentMode->quantum_multipliers[1];
258 		sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1,
259 			B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority);
260 	}
261 
262 	for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount;
263 		threadCount++) {
264 
265 		bigtime_t quantum = gCurrentMode->maximum_latency;
266 		if (threadCount != 0)
267 			quantum /= threadCount;
268 		quantum = std::max(quantum, gCurrentMode->minimal_quantum);
269 		sMaximumQuantumLengths[threadCount] = quantum;
270 	}
271 }
272 
273 
274 inline int32
275 ThreadData::_GetPenalty() const
276 {
277 	SCHEDULER_ENTER_FUNCTION();
278 	return fPriorityPenalty;
279 }
280 
281 
282 void
283 ThreadData::_ComputeNeededLoad()
284 {
285 	SCHEDULER_ENTER_FUNCTION();
286 	ASSERT(!IsIdle());
287 
288 	int32 oldLoad = compute_load(fLastMeasureAvailableTime,
289 		fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime);
290 	if (oldLoad < 0 || oldLoad == fNeededLoad)
291 		return;
292 
293 	fCore->ChangeLoad(fNeededLoad - oldLoad);
294 }
295 
296 
297 void
298 ThreadData::_ComputeEffectivePriority() const
299 {
300 	SCHEDULER_ENTER_FUNCTION();
301 
302 	if (IsIdle())
303 		fEffectivePriority = B_IDLE_PRIORITY;
304 	else if (IsRealTime())
305 		fEffectivePriority = GetPriority();
306 	else {
307 		fEffectivePriority = GetPriority();
308 		fEffectivePriority -= _GetPenalty();
309 		if (fEffectivePriority > 0)
310 			fEffectivePriority -= fAdditionalPenalty % fEffectivePriority;
311 
312 		ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY);
313 		ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
314 	}
315 
316 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
317 }
318 
319 
320 /* static */ bigtime_t
321 ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
322 	int32 maxPriority, int32 minPriority, int32 priority)
323 {
324 	SCHEDULER_ENTER_FUNCTION();
325 
326 	ASSERT(priority <= maxPriority);
327 	ASSERT(priority >= minPriority);
328 
329 	bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority);
330 	result /= maxPriority - minPriority;
331 	return maxQuantum - result;
332 }
333 
334 
335 ThreadProcessing::~ThreadProcessing()
336 {
337 }
338 
339