xref: /haiku/src/system/kernel/scheduler/scheduler_thread.h (revision 3b07762c548ec4016dea480d1061577cd15ec614)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef KERNEL_SCHEDULER_THREAD_H
6 #define KERNEL_SCHEDULER_THREAD_H
7 
8 
9 #include <thread.h>
10 #include <util/AutoLock.h>
11 
12 #include "scheduler_common.h"
13 #include "scheduler_cpu.h"
14 #include "scheduler_locking.h"
15 #include "scheduler_profiler.h"
16 
17 
18 namespace Scheduler {
19 
20 
21 struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
22 	RunQueueLinkImpl<ThreadData> {
23 private:
24 	inline	void		_InitBase();
25 
26 	inline	int32		_GetMinimalPriority() const;
27 
28 	inline	CoreEntry*	_ChooseCore() const;
29 	inline	CPUEntry*	_ChooseCPU(CoreEntry* core,
30 							bool& rescheduleNeeded) const;
31 
32 public:
33 						ThreadData(Thread* thread);
34 
35 			void		Init();
36 			void		Init(CoreEntry* core);
37 
38 			void		Dump() const;
39 
40 	inline	int32		GetPriority() const	{ return fThread->priority; }
41 	inline	Thread*		GetThread() const	{ return fThread; }
42 
43 	inline	bool		IsRealTime() const;
44 	inline	bool		IsIdle() const;
45 
46 	inline	bool		HasCacheExpired() const;
47 	inline	bool		ShouldRebalance() const;
48 
49 	inline	int32		GetEffectivePriority() const;
50 
51 	inline	bool		IsCPUBound() const	{ return fCPUBound; }
52 
53 	inline	void		CancelPenalty();
54 	inline	bool		ShouldCancelPenalty() const;
55 
56 			bool		ChooseCoreAndCPU(CoreEntry*& targetCore,
57 							CPUEntry*& targetCPU);
58 
59 	inline	void		SetLastInterruptTime(bigtime_t interruptTime)
60 							{ fLastInterruptTime = interruptTime; }
61 	inline	void		SetStolenInterruptTime(bigtime_t interruptTime);
62 
63 			bigtime_t	ComputeQuantum() const;
64 	inline	bigtime_t	GetQuantumLeft();
65 	inline	void		StartQuantum();
66 	inline	bool		HasQuantumEnded(bool wasPreempted, bool hasYielded);
67 
68 	inline	void		Continues();
69 	inline	void		GoesAway();
70 	inline	void		Dies();
71 
72 	inline	bigtime_t	WentSleep() const	{ return fWentSleep; }
73 	inline	bigtime_t	WentSleepActive() const	{ return fWentSleepActive; }
74 
75 	inline	void		PutBack();
76 	inline	void		Enqueue();
77 	inline	bool		Dequeue();
78 
79 	inline	void		UpdateActivity(bigtime_t active);
80 
81 	inline	bool		IsEnqueued() const	{ return fEnqueued; }
82 	inline	void		SetDequeued()	{ fEnqueued = false; }
83 
84 	inline	int32		GetLoad() const	{ return fNeededLoad; }
85 
86 	inline	CoreEntry*	Core() const	{ return fCore; }
87 	inline	void		UnassignCore(bool running = false);
88 
89 	static	void		ComputeQuantumLengths();
90 
91 private:
92 	inline	void		_IncreasePenalty();
93 	inline	int32		_GetPenalty() const;
94 
95 			void		_ComputeNeededLoad();
96 
97 			void		_ComputeEffectivePriority() const;
98 
99 	static	bigtime_t	_ScaleQuantum(bigtime_t maxQuantum,
100 							bigtime_t minQuantum, int32 maxPriority,
101 							int32 minPriority, int32 priority);
102 
103 			bigtime_t	fStolenTime;
104 			bigtime_t	fQuantumStart;
105 			bigtime_t	fLastInterruptTime;
106 
107 			bigtime_t	fWentSleep;
108 			bigtime_t	fWentSleepActive;
109 			int32		fWentSleepCount;
110 			int32		fWentSleepCountIdle;
111 
112 			bool		fEnqueued;
113 			bool		fReady;
114 
115 			Thread*		fThread;
116 
117 			bool		fCPUBound;
118 			int32		fPriorityPenalty;
119 			int32		fAdditionalPenalty;
120 
121 	mutable	int32		fEffectivePriority;
122 	mutable	bigtime_t	fBaseQuantum;
123 
124 			bigtime_t	fTimeUsed;
125 
126 			bigtime_t	fMeasureAvailableActiveTime;
127 			bigtime_t	fMeasureAvailableTime;
128 			bigtime_t	fLastMeasureAvailableTime;
129 
130 			int32		fNeededLoad;
131 
132 			CoreEntry*	fCore;
133 };
134 
135 class ThreadProcessing {
136 public:
137 	virtual				~ThreadProcessing();
138 
139 	virtual	void		operator()(ThreadData* thread) = 0;
140 };
141 
142 
143 inline int32
144 ThreadData::_GetMinimalPriority() const
145 {
146 	SCHEDULER_ENTER_FUNCTION();
147 
148 	const int32 kDivisor = 5;
149 
150 	const int32 kMaximalPriority = 25;
151 	const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
152 
153 	int32 priority = GetPriority() / kDivisor;
154 	return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
155 }
156 
157 
158 inline bool
159 ThreadData::IsRealTime() const
160 {
161 	return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY;
162 }
163 
164 
165 inline bool
166 ThreadData::IsIdle() const
167 {
168 	return GetPriority() == B_IDLE_PRIORITY;
169 }
170 
171 
172 inline bool
173 ThreadData::HasCacheExpired() const
174 {
175 	SCHEDULER_ENTER_FUNCTION();
176 	return gCurrentMode->has_cache_expired(this);
177 }
178 
179 
180 inline bool
181 ThreadData::ShouldRebalance() const
182 {
183 	SCHEDULER_ENTER_FUNCTION();
184 
185 	ASSERT(!gSingleCore);
186 	return gCurrentMode->should_rebalance(this);
187 }
188 
189 
190 inline int32
191 ThreadData::GetEffectivePriority() const
192 {
193 	SCHEDULER_ENTER_FUNCTION();
194 	return fEffectivePriority;
195 }
196 
197 
198 inline void
199 ThreadData::_IncreasePenalty()
200 {
201 	SCHEDULER_ENTER_FUNCTION();
202 
203 	if (IsIdle() || IsRealTime())
204 		return;
205 
206 	TRACE("increasing thread %ld penalty\n", fThread->id);
207 
208 	int32 oldPenalty = fPriorityPenalty++;
209 	const int kMinimalPriority = _GetMinimalPriority();
210 	if (GetPriority() - oldPenalty <= kMinimalPriority) {
211 		fPriorityPenalty = oldPenalty;
212 		fCPUBound = true;
213 	}
214 
215 	_ComputeEffectivePriority();
216 }
217 
218 
219 inline void
220 ThreadData::CancelPenalty()
221 {
222 	SCHEDULER_ENTER_FUNCTION();
223 
224 	int32 oldPenalty = fPriorityPenalty;
225 	fPriorityPenalty = 0;
226 	fCPUBound = false;
227 
228 	if (oldPenalty != 0) {
229 		TRACE("cancelling thread %ld penalty\n", fThread->id);
230 		_ComputeEffectivePriority();
231 	}
232 }
233 
234 
235 inline bool
236 ThreadData::ShouldCancelPenalty() const
237 {
238 	SCHEDULER_ENTER_FUNCTION();
239 
240 	if (fCore == NULL)
241 		return false;
242 
243 	if (GetEffectivePriority() != B_LOWEST_ACTIVE_PRIORITY && !IsCPUBound()) {
244 		if (fCore->StarvationCounter() != fWentSleepCount)
245 			return true;
246 	}
247 
248 	return fCore->StarvationCounterIdle() != fWentSleepCountIdle;
249 }
250 
251 
252 inline void
253 ThreadData::SetStolenInterruptTime(bigtime_t interruptTime)
254 {
255 	SCHEDULER_ENTER_FUNCTION();
256 
257 	interruptTime -= fLastInterruptTime;
258 	fStolenTime += interruptTime;
259 }
260 
261 
262 inline bigtime_t
263 ThreadData::GetQuantumLeft()
264 {
265 	SCHEDULER_ENTER_FUNCTION();
266 
267 	bigtime_t stolenTime = std::min(fStolenTime, gCurrentMode->minimal_quantum);
268 	ASSERT(stolenTime >= 0);
269 	fStolenTime -= stolenTime;
270 
271 	bigtime_t quantum = ComputeQuantum() - fTimeUsed;
272 	quantum += stolenTime;
273 	quantum = std::max(quantum, gCurrentMode->minimal_quantum);
274 
275 	return quantum;
276 }
277 
278 
279 inline void
280 ThreadData::StartQuantum()
281 {
282 	SCHEDULER_ENTER_FUNCTION();
283 	fQuantumStart = system_time();
284 }
285 
286 
287 inline bool
288 ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
289 {
290 	SCHEDULER_ENTER_FUNCTION();
291 
292 	bigtime_t timeUsed = system_time() - fQuantumStart;
293 	ASSERT(timeUsed >= 0);
294 	fTimeUsed += timeUsed;
295 
296 	bigtime_t timeLeft = ComputeQuantum() - fTimeUsed;
297 	timeLeft = std::max(bigtime_t(0), timeLeft);
298 
299 	// too little time left, it's better make the next quantum a bit longer
300 	bigtime_t skipTime = gCurrentMode->minimal_quantum / 2;
301 	if (hasYielded || wasPreempted || timeLeft <= skipTime) {
302 		fStolenTime += timeLeft;
303 		timeLeft = 0;
304 	}
305 
306 	if (timeLeft == 0) {
307 		fAdditionalPenalty++;
308 		_IncreasePenalty();
309 		fTimeUsed = 0;
310 		return true;
311 	}
312 
313 	return false;
314 }
315 
316 
317 inline void
318 ThreadData::Continues()
319 {
320 	SCHEDULER_ENTER_FUNCTION();
321 
322 	ASSERT(fReady);
323 	if (gTrackCoreLoad)
324 		_ComputeNeededLoad();
325 }
326 
327 
328 inline void
329 ThreadData::GoesAway()
330 {
331 	SCHEDULER_ENTER_FUNCTION();
332 
333 	ASSERT(fReady);
334 
335 	if (!HasQuantumEnded(false, false)) {
336 		fAdditionalPenalty++;
337 		_ComputeEffectivePriority();
338 	}
339 
340 	fLastInterruptTime = 0;
341 
342 	fWentSleep = system_time();
343 	fWentSleepActive = fCore->GetActiveTime();
344 	fWentSleepCount = fCore->StarvationCounter();
345 	fWentSleepCountIdle = fCore->StarvationCounterIdle();
346 
347 	if (gTrackCoreLoad)
348 		fCore->UpdateLoad(-fNeededLoad);
349 	fReady = false;
350 }
351 
352 
353 inline void
354 ThreadData::Dies()
355 {
356 	SCHEDULER_ENTER_FUNCTION();
357 
358 	ASSERT(fReady);
359 	if (gTrackCoreLoad)
360 		fCore->UpdateLoad(-fNeededLoad);
361 	fReady = false;
362 }
363 
364 
365 inline void
366 ThreadData::PutBack()
367 {
368 	SCHEDULER_ENTER_FUNCTION();
369 
370 	int32 priority = GetEffectivePriority();
371 
372 	if (fThread->pinned_to_cpu > 0) {
373 		ASSERT(fThread->cpu != NULL);
374 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->cpu->cpu_num);
375 
376 		CPURunQueueLocker _(cpu);
377 		ASSERT(!fEnqueued);
378 		fEnqueued = true;
379 
380 		cpu->PushFront(this, priority);
381 	} else {
382 		CoreRunQueueLocker _(fCore);
383 		ASSERT(!fEnqueued);
384 		fEnqueued = true;
385 
386 		fCore->PushFront(this, priority);
387 	}
388 }
389 
390 
391 inline void
392 ThreadData::Enqueue()
393 {
394 	SCHEDULER_ENTER_FUNCTION();
395 
396 	if (!fReady) {
397 		if (gTrackCoreLoad) {
398 			bigtime_t timeSlept = system_time() - fWentSleep;
399 			fCore->UpdateLoad(fNeededLoad);
400 			if (timeSlept > 0) {
401 				fMeasureAvailableTime += timeSlept;
402 				_ComputeNeededLoad();
403 			}
404 		}
405 
406 		fReady = true;
407 	}
408 
409 	fThread->state = B_THREAD_READY;
410 
411 	int32 priority = GetEffectivePriority();
412 
413 	if (fThread->pinned_to_cpu > 0) {
414 		ASSERT(fThread->previous_cpu != NULL);
415 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
416 
417 		CPURunQueueLocker _(cpu);
418 		ASSERT(!fEnqueued);
419 		fEnqueued = true;
420 
421 		cpu->PushBack(this, priority);
422 	} else {
423 		CoreRunQueueLocker _(fCore);
424 		ASSERT(!fEnqueued);
425 		fEnqueued = true;
426 
427 		fCore->PushBack(this, priority);
428 	}
429 }
430 
431 
432 inline bool
433 ThreadData::Dequeue()
434 {
435 	SCHEDULER_ENTER_FUNCTION();
436 
437 	if (fThread->pinned_to_cpu > 0) {
438 		ASSERT(fThread->previous_cpu != NULL);
439 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
440 
441 		CPURunQueueLocker _(cpu);
442 		if (!fEnqueued)
443 			return false;
444 		cpu->Remove(this);
445 		ASSERT(!fEnqueued);
446 		return true;
447 	}
448 
449 	CoreRunQueueLocker _(fCore);
450 	if (!fEnqueued)
451 		return false;
452 
453 	fCore->Remove(this);
454 	ASSERT(!fEnqueued);
455 	return true;
456 }
457 
458 
459 inline void
460 ThreadData::UpdateActivity(bigtime_t active)
461 {
462 	SCHEDULER_ENTER_FUNCTION();
463 
464 	if (!gTrackCoreLoad)
465 		return;
466 
467 	fMeasureAvailableTime += active;
468 	fMeasureAvailableActiveTime += active;
469 }
470 
471 
472 inline void
473 ThreadData::UnassignCore(bool running)
474 {
475 	SCHEDULER_ENTER_FUNCTION();
476 
477 	ASSERT(fCore != NULL);
478 	if (!fReady)
479 		fCore = NULL;
480 
481 	if (running || fThread->state == B_THREAD_READY) {
482 		if (gTrackCoreLoad)
483 			fCore->UpdateLoad(-fNeededLoad);
484 		fReady = false;
485 		fThread->state = B_THREAD_SUSPENDED;
486 		fCore = NULL;
487 	}
488 }
489 
490 
491 }	// namespace Scheduler
492 
493 
494 #endif	// KERNEL_SCHEDULER_THREAD_H
495 
496