xref: /haiku/src/system/kernel/scheduler/scheduler_thread.cpp (revision d284f7cc43cc0d1106c3b0c40e62c58107648573)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 
6 #include "scheduler_thread.h"
7 
8 
9 using namespace Scheduler;
10 
11 
12 static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
13 
14 const int32 kMaximumQuantumLengthsCount	= 20;
15 static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount];
16 
17 
18 void
19 ThreadData::_InitBase()
20 {
21 	fPriorityPenalty = 0;
22 	fAdditionalPenalty = 0;
23 	fEffectivePriority = GetPriority();
24 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
25 
26 	fTimeUsed = 0;
27 	fStolenTime = 0;
28 
29 	fMeasureAvailableActiveTime = 0;
30 	fLastMeasureAvailableTime = 0;
31 	fMeasureAvailableTime = 0;
32 
33 	fWentSleep = 0;
34 	fWentSleepActive = 0;
35 
36 	fEnqueued = false;
37 	fReady = false;
38 }
39 
40 
41 inline CoreEntry*
42 ThreadData::_ChooseCore() const
43 {
44 	SCHEDULER_ENTER_FUNCTION();
45 
46 	ASSERT(!gSingleCore);
47 	return gCurrentMode->choose_core(this);
48 }
49 
50 
51 inline CPUEntry*
52 ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
53 {
54 	SCHEDULER_ENTER_FUNCTION();
55 
56 	int32 threadPriority = GetEffectivePriority();
57 
58 	if (fThread->previous_cpu != NULL) {
59 		CPUEntry* previousCPU
60 			= CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
61 		if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) {
62 			CoreCPUHeapLocker _(core);
63 			if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
64 				previousCPU->UpdatePriority(threadPriority);
65 				rescheduleNeeded = true;
66 				return previousCPU;
67 			}
68 		}
69 	}
70 
71 	CoreCPUHeapLocker _(core);
72 	CPUEntry* cpu = core->CPUHeap()->PeekRoot();
73 	ASSERT(cpu != NULL);
74 
75 	if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
76 		cpu->UpdatePriority(threadPriority);
77 		rescheduleNeeded = true;
78 	} else
79 		rescheduleNeeded = false;
80 
81 	return cpu;
82 }
83 
84 
85 ThreadData::ThreadData(Thread* thread)
86 	:
87 	fThread(thread)
88 {
89 }
90 
91 
92 void
93 ThreadData::Init()
94 {
95 	_InitBase();
96 	fCore = NULL;
97 
98 	Thread* currentThread = thread_get_current_thread();
99 	ThreadData* currentThreadData = currentThread->scheduler_data;
100 	fNeededLoad = currentThreadData->fNeededLoad;
101 
102 	if (!IsRealTime()) {
103 		fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty,
104 				std::max(GetPriority() - _GetMinimalPriority(), int32(0)));
105 		fAdditionalPenalty = currentThreadData->fAdditionalPenalty;
106 
107 		_ComputeEffectivePriority();
108 	}
109 }
110 
111 
112 void
113 ThreadData::Init(CoreEntry* core)
114 {
115 	_InitBase();
116 
117 	fCore = core;
118 	fReady = true;
119 	fNeededLoad = 0;
120 }
121 
122 
123 void
124 ThreadData::Dump() const
125 {
126 	kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty);
127 
128 	int32 priority = GetPriority() - _GetPenalty();
129 	priority = std::max(priority, int32(1));
130 	kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
131 		fAdditionalPenalty % priority, fAdditionalPenalty);
132 	kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority());
133 
134 	kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n",
135 		fTimeUsed, ComputeQuantum());
136 	kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime);
137 	kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart);
138 	kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
139 	kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
140 	kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
141 	kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
142 		fCore != NULL ? fCore->ID() : -1);
143 	if (fCore != NULL && HasCacheExpired())
144 		kprintf("\tcache affinity has expired\n");
145 }
146 
147 
148 bool
149 ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
150 {
151 	SCHEDULER_ENTER_FUNCTION();
152 
153 	bool rescheduleNeeded = false;
154 
155 	if (targetCore == NULL && targetCPU != NULL)
156 		targetCore = targetCPU->Core();
157 	else if (targetCore != NULL && targetCPU == NULL)
158 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
159 	else if (targetCore == NULL && targetCPU == NULL) {
160 		targetCore = _ChooseCore();
161 		targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
162 	}
163 
164 	ASSERT(targetCore != NULL);
165 	ASSERT(targetCPU != NULL);
166 
167 	if (fCore != targetCore) {
168 		fLoadMeasurementEpoch = targetCore->LoadMeasurementEpoch() - 1;
169 		if (fReady) {
170 			if (fCore != NULL)
171 				fCore->RemoveLoad(fNeededLoad, true);
172 			targetCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, true);
173 		}
174 	}
175 
176 	fCore = targetCore;
177 	return rescheduleNeeded;
178 }
179 
180 
181 bigtime_t
182 ThreadData::ComputeQuantum() const
183 {
184 	SCHEDULER_ENTER_FUNCTION();
185 
186 	if (IsRealTime())
187 		return fBaseQuantum;
188 
189 	int32 threadCount = fCore->ThreadCount();
190 	if (fCore->CPUCount() > 0)
191 		threadCount /= fCore->CPUCount();
192 
193 	bigtime_t quantum = fBaseQuantum;
194 	if (threadCount < kMaximumQuantumLengthsCount)
195 		quantum = std::min(sMaximumQuantumLengths[threadCount], quantum);
196 	return quantum;
197 }
198 
199 
200 void
201 ThreadData::UnassignCore(bool running)
202 {
203 	SCHEDULER_ENTER_FUNCTION();
204 
205 	ASSERT(fCore != NULL);
206 	if (running || fThread->state == B_THREAD_READY)
207 		fReady = false;
208 	if (!fReady)
209 		fCore = NULL;
210 }
211 
212 
213 /* static */ void
214 ThreadData::ComputeQuantumLengths()
215 {
216 	SCHEDULER_ENTER_FUNCTION();
217 
218 	for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
219 		const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
220 		if (priority >= B_URGENT_DISPLAY_PRIORITY) {
221 			sQuantumLengths[priority] = kQuantum0;
222 			continue;
223 		}
224 
225 		const bigtime_t kQuantum1
226 			= kQuantum0 * gCurrentMode->quantum_multipliers[0];
227 		if (priority > B_NORMAL_PRIORITY) {
228 			sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0,
229 				B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority);
230 			continue;
231 		}
232 
233 		const bigtime_t kQuantum2
234 			= kQuantum0 * gCurrentMode->quantum_multipliers[1];
235 		sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1,
236 			B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority);
237 	}
238 
239 	for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount;
240 		threadCount++) {
241 
242 		bigtime_t quantum = gCurrentMode->maximum_latency;
243 		if (threadCount != 0)
244 			quantum /= threadCount;
245 		quantum = std::max(quantum, gCurrentMode->minimal_quantum);
246 		sMaximumQuantumLengths[threadCount] = quantum;
247 	}
248 }
249 
250 
251 inline int32
252 ThreadData::_GetPenalty() const
253 {
254 	SCHEDULER_ENTER_FUNCTION();
255 	return fPriorityPenalty;
256 }
257 
258 
259 void
260 ThreadData::_ComputeNeededLoad()
261 {
262 	SCHEDULER_ENTER_FUNCTION();
263 	ASSERT(!IsIdle());
264 
265 	int32 oldLoad = compute_load(fLastMeasureAvailableTime,
266 		fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime);
267 	if (oldLoad < 0 || oldLoad == fNeededLoad)
268 		return;
269 
270 	fCore->ChangeLoad(fNeededLoad - oldLoad);
271 }
272 
273 
274 void
275 ThreadData::_ComputeEffectivePriority() const
276 {
277 	SCHEDULER_ENTER_FUNCTION();
278 
279 	if (IsIdle())
280 		fEffectivePriority = B_IDLE_PRIORITY;
281 	else if (IsRealTime())
282 		fEffectivePriority = GetPriority();
283 	else {
284 		fEffectivePriority = GetPriority();
285 		fEffectivePriority -= _GetPenalty();
286 		if (fEffectivePriority > 0)
287 			fEffectivePriority -= fAdditionalPenalty % fEffectivePriority;
288 
289 		ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY);
290 		ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
291 	}
292 
293 	fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
294 }
295 
296 
297 /* static */ bigtime_t
298 ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
299 	int32 maxPriority, int32 minPriority, int32 priority)
300 {
301 	SCHEDULER_ENTER_FUNCTION();
302 
303 	ASSERT(priority <= maxPriority);
304 	ASSERT(priority >= minPriority);
305 
306 	bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority);
307 	result /= maxPriority - minPriority;
308 	return maxQuantum - result;
309 }
310 
311 
312 ThreadProcessing::~ThreadProcessing()
313 {
314 }
315 
316