xref: /haiku/src/system/kernel/scheduler/scheduler_thread.h (revision e81a954787e50e56a7f06f72705b7859b6ab06d1)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Distributed under the terms of the MIT License.
4  */
5 #ifndef KERNEL_SCHEDULER_THREAD_H
6 #define KERNEL_SCHEDULER_THREAD_H
7 
8 
9 #include <thread.h>
10 #include <util/AutoLock.h>
11 
12 #include "scheduler_common.h"
13 #include "scheduler_cpu.h"
14 #include "scheduler_locking.h"
15 #include "scheduler_profiler.h"
16 
17 
18 namespace Scheduler {
19 
20 
21 struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
22 	RunQueueLinkImpl<ThreadData> {
23 private:
24 	inline	void		_InitBase();
25 
26 	inline	int32		_GetMinimalPriority() const;
27 
28 	inline	CoreEntry*	_ChooseCore() const;
29 	inline	CPUEntry*	_ChooseCPU(CoreEntry* core,
30 							bool& rescheduleNeeded) const;
31 
32 public:
33 						ThreadData(Thread* thread);
34 
35 			void		Init();
36 			void		Init(CoreEntry* core);
37 
38 			void		Dump() const;
39 
40 	inline	int32		GetPriority() const	{ return fThread->priority; }
41 	inline	Thread*		GetThread() const	{ return fThread; }
42 
43 	inline	bool		IsRealTime() const;
44 	inline	bool		IsIdle() const;
45 
46 	inline	bool		HasCacheExpired() const;
47 	inline	CoreEntry*	Rebalance() const;
48 
49 	inline	int32		GetEffectivePriority() const;
50 
51 	inline	void		StartCPUTime();
52 	inline	void		StopCPUTime();
53 
54 	inline	void		CancelPenalty();
55 	inline	bool		ShouldCancelPenalty() const;
56 
57 			bool		ChooseCoreAndCPU(CoreEntry*& targetCore,
58 							CPUEntry*& targetCPU);
59 
60 	inline	void		SetLastInterruptTime(bigtime_t interruptTime)
61 							{ fLastInterruptTime = interruptTime; }
62 	inline	void		SetStolenInterruptTime(bigtime_t interruptTime);
63 
64 			bigtime_t	ComputeQuantum() const;
65 	inline	bigtime_t	GetQuantumLeft();
66 	inline	void		StartQuantum();
67 	inline	bool		HasQuantumEnded(bool wasPreempted, bool hasYielded);
68 
69 	inline	void		Continues();
70 	inline	void		GoesAway();
71 	inline	void		Dies();
72 
73 	inline	bigtime_t	WentSleep() const	{ return fWentSleep; }
74 	inline	bigtime_t	WentSleepActive() const	{ return fWentSleepActive; }
75 
76 	inline	void		PutBack();
77 	inline	void		Enqueue();
78 	inline	bool		Dequeue();
79 
80 	inline	void		UpdateActivity(bigtime_t active);
81 
82 	inline	bool		IsEnqueued() const	{ return fEnqueued; }
83 	inline	void		SetDequeued()	{ fEnqueued = false; }
84 
85 	inline	int32		GetLoad() const	{ return fNeededLoad; }
86 
87 	inline	CoreEntry*	Core() const	{ return fCore; }
88 			void		UnassignCore(bool running = false);
89 
90 	static	void		ComputeQuantumLengths();
91 
92 private:
93 	inline	void		_IncreasePenalty();
94 	inline	int32		_GetPenalty() const;
95 
96 			void		_ComputeNeededLoad();
97 
98 			void		_ComputeEffectivePriority() const;
99 
100 	static	bigtime_t	_ScaleQuantum(bigtime_t maxQuantum,
101 							bigtime_t minQuantum, int32 maxPriority,
102 							int32 minPriority, int32 priority);
103 
104 			bigtime_t	fStolenTime;
105 			bigtime_t	fQuantumStart;
106 			bigtime_t	fLastInterruptTime;
107 
108 			bigtime_t	fWentSleep;
109 			bigtime_t	fWentSleepActive;
110 
111 			bool		fEnqueued;
112 			bool		fReady;
113 
114 			Thread*		fThread;
115 
116 			int32		fPriorityPenalty;
117 			int32		fAdditionalPenalty;
118 
119 	mutable	int32		fEffectivePriority;
120 	mutable	bigtime_t	fBaseQuantum;
121 
122 			bigtime_t	fTimeUsed;
123 
124 			bigtime_t	fMeasureAvailableActiveTime;
125 			bigtime_t	fMeasureAvailableTime;
126 			bigtime_t	fLastMeasureAvailableTime;
127 
128 			int32		fNeededLoad;
129 			uint32		fLoadMeasurementEpoch;
130 
131 			CoreEntry*	fCore;
132 };
133 
134 class ThreadProcessing {
135 public:
136 	virtual				~ThreadProcessing();
137 
138 	virtual	void		operator()(ThreadData* thread) = 0;
139 };
140 
141 
142 inline int32
143 ThreadData::_GetMinimalPriority() const
144 {
145 	SCHEDULER_ENTER_FUNCTION();
146 
147 	const int32 kDivisor = 5;
148 
149 	const int32 kMaximalPriority = 25;
150 	const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
151 
152 	int32 priority = GetPriority() / kDivisor;
153 	return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
154 }
155 
156 
157 inline bool
158 ThreadData::IsRealTime() const
159 {
160 	return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY;
161 }
162 
163 
164 inline bool
165 ThreadData::IsIdle() const
166 {
167 	return GetPriority() == B_IDLE_PRIORITY;
168 }
169 
170 
171 inline bool
172 ThreadData::HasCacheExpired() const
173 {
174 	SCHEDULER_ENTER_FUNCTION();
175 	return gCurrentMode->has_cache_expired(this);
176 }
177 
178 
179 inline CoreEntry*
180 ThreadData::Rebalance() const
181 {
182 	SCHEDULER_ENTER_FUNCTION();
183 
184 	ASSERT(!gSingleCore);
185 	return gCurrentMode->rebalance(this);
186 }
187 
188 
189 inline int32
190 ThreadData::GetEffectivePriority() const
191 {
192 	SCHEDULER_ENTER_FUNCTION();
193 	return fEffectivePriority;
194 }
195 
196 
197 inline void
198 ThreadData::_IncreasePenalty()
199 {
200 	SCHEDULER_ENTER_FUNCTION();
201 
202 	if (IsIdle() || IsRealTime())
203 		return;
204 
205 	TRACE("increasing thread %ld penalty\n", fThread->id);
206 
207 	int32 oldPenalty = fPriorityPenalty++;
208 	const int kMinimalPriority = _GetMinimalPriority();
209 	if (GetPriority() - oldPenalty <= kMinimalPriority)
210 		fPriorityPenalty = oldPenalty;
211 
212 	_ComputeEffectivePriority();
213 }
214 
215 
216 inline void
217 ThreadData::StartCPUTime()
218 {
219 	SCHEDULER_ENTER_FUNCTION();
220 
221 	SpinLocker threadTimeLocker(fThread->time_lock);
222 	fThread->last_time = system_time();
223 }
224 
225 
226 inline void
227 ThreadData::StopCPUTime()
228 {
229 	SCHEDULER_ENTER_FUNCTION();
230 
231 	// User time is tracked in thread_at_kernel_entry()
232 	SpinLocker threadTimeLocker(fThread->time_lock);
233 	fThread->kernel_time += system_time() - fThread->last_time;
234 	fThread->last_time = 0;
235 	threadTimeLocker.Unlock();
236 
237 	// If the old thread's team has user time timers, check them now.
238 	Team* team = fThread->team;
239 	SpinLocker teamTimeLocker(team->time_lock);
240 	if (team->HasActiveUserTimeUserTimers())
241 		user_timer_check_team_user_timers(team);
242 }
243 
244 
245 inline void
246 ThreadData::CancelPenalty()
247 {
248 	SCHEDULER_ENTER_FUNCTION();
249 
250 	int32 oldPenalty = fPriorityPenalty;
251 	fPriorityPenalty = 0;
252 
253 	if (oldPenalty != 0) {
254 		TRACE("cancelling thread %ld penalty\n", fThread->id);
255 		_ComputeEffectivePriority();
256 	}
257 }
258 
259 
260 inline bool
261 ThreadData::ShouldCancelPenalty() const
262 {
263 	SCHEDULER_ENTER_FUNCTION();
264 
265 	if (fCore == NULL)
266 		return false;
267 	return system_time() - fWentSleep > gCurrentMode->base_quantum / 2;
268 }
269 
270 
271 inline void
272 ThreadData::SetStolenInterruptTime(bigtime_t interruptTime)
273 {
274 	SCHEDULER_ENTER_FUNCTION();
275 
276 	interruptTime -= fLastInterruptTime;
277 	fStolenTime += interruptTime;
278 }
279 
280 
281 inline bigtime_t
282 ThreadData::GetQuantumLeft()
283 {
284 	SCHEDULER_ENTER_FUNCTION();
285 
286 	bigtime_t stolenTime = std::min(fStolenTime, gCurrentMode->minimal_quantum);
287 	ASSERT(stolenTime >= 0);
288 	fStolenTime -= stolenTime;
289 
290 	bigtime_t quantum = ComputeQuantum() - fTimeUsed;
291 	quantum += stolenTime;
292 	quantum = std::max(quantum, gCurrentMode->minimal_quantum);
293 
294 	return quantum;
295 }
296 
297 
298 inline void
299 ThreadData::StartQuantum()
300 {
301 	SCHEDULER_ENTER_FUNCTION();
302 	fQuantumStart = system_time();
303 }
304 
305 
306 inline bool
307 ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
308 {
309 	SCHEDULER_ENTER_FUNCTION();
310 
311 	bigtime_t timeUsed = system_time() - fQuantumStart;
312 	ASSERT(timeUsed >= 0);
313 	fTimeUsed += timeUsed;
314 
315 	bigtime_t timeLeft = ComputeQuantum() - fTimeUsed;
316 	timeLeft = std::max(bigtime_t(0), timeLeft);
317 
318 	// too little time left, it's better make the next quantum a bit longer
319 	bigtime_t skipTime = gCurrentMode->minimal_quantum / 2;
320 	if (hasYielded || wasPreempted || timeLeft <= skipTime) {
321 		fStolenTime += timeLeft;
322 		timeLeft = 0;
323 	}
324 
325 	if (timeLeft == 0) {
326 		fAdditionalPenalty++;
327 		_IncreasePenalty();
328 		fTimeUsed = 0;
329 		return true;
330 	}
331 
332 	return false;
333 }
334 
335 
336 inline void
337 ThreadData::Continues()
338 {
339 	SCHEDULER_ENTER_FUNCTION();
340 
341 	ASSERT(fReady);
342 	if (gTrackCoreLoad)
343 		_ComputeNeededLoad();
344 }
345 
346 
347 inline void
348 ThreadData::GoesAway()
349 {
350 	SCHEDULER_ENTER_FUNCTION();
351 
352 	ASSERT(fReady);
353 
354 	if (!HasQuantumEnded(false, false)) {
355 		fAdditionalPenalty++;
356 		_ComputeEffectivePriority();
357 	}
358 
359 	fLastInterruptTime = 0;
360 
361 	fWentSleep = system_time();
362 	fWentSleepActive = fCore->GetActiveTime();
363 
364 	if (gTrackCoreLoad)
365 		fLoadMeasurementEpoch = fCore->RemoveLoad(fNeededLoad, false);
366 	fReady = false;
367 }
368 
369 
370 inline void
371 ThreadData::Dies()
372 {
373 	SCHEDULER_ENTER_FUNCTION();
374 
375 	ASSERT(fReady);
376 	if (gTrackCoreLoad)
377 		fCore->RemoveLoad(fNeededLoad, true);
378 	fReady = false;
379 }
380 
381 
382 inline void
383 ThreadData::PutBack()
384 {
385 	SCHEDULER_ENTER_FUNCTION();
386 
387 	int32 priority = GetEffectivePriority();
388 
389 	if (fThread->pinned_to_cpu > 0) {
390 		ASSERT(fThread->cpu != NULL);
391 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->cpu->cpu_num);
392 
393 		CPURunQueueLocker _(cpu);
394 		ASSERT(!fEnqueued);
395 		fEnqueued = true;
396 
397 		cpu->PushFront(this, priority);
398 	} else {
399 		CoreRunQueueLocker _(fCore);
400 		ASSERT(!fEnqueued);
401 		fEnqueued = true;
402 
403 		fCore->PushFront(this, priority);
404 	}
405 }
406 
407 
408 inline void
409 ThreadData::Enqueue()
410 {
411 	SCHEDULER_ENTER_FUNCTION();
412 
413 	if (!fReady) {
414 		if (gTrackCoreLoad) {
415 			bigtime_t timeSlept = system_time() - fWentSleep;
416 			bool updateLoad = timeSlept > 0;
417 
418 			fCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, !updateLoad);
419 			if (updateLoad) {
420 				fMeasureAvailableTime += timeSlept;
421 				_ComputeNeededLoad();
422 			}
423 		}
424 
425 		fReady = true;
426 	}
427 
428 	fThread->state = B_THREAD_READY;
429 
430 	int32 priority = GetEffectivePriority();
431 
432 	if (fThread->pinned_to_cpu > 0) {
433 		ASSERT(fThread->previous_cpu != NULL);
434 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
435 
436 		CPURunQueueLocker _(cpu);
437 		ASSERT(!fEnqueued);
438 		fEnqueued = true;
439 
440 		cpu->PushBack(this, priority);
441 	} else {
442 		CoreRunQueueLocker _(fCore);
443 		ASSERT(!fEnqueued);
444 		fEnqueued = true;
445 
446 		fCore->PushBack(this, priority);
447 	}
448 }
449 
450 
451 inline bool
452 ThreadData::Dequeue()
453 {
454 	SCHEDULER_ENTER_FUNCTION();
455 
456 	if (fThread->pinned_to_cpu > 0) {
457 		ASSERT(fThread->previous_cpu != NULL);
458 		CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
459 
460 		CPURunQueueLocker _(cpu);
461 		if (!fEnqueued)
462 			return false;
463 		cpu->Remove(this);
464 		ASSERT(!fEnqueued);
465 		return true;
466 	}
467 
468 	CoreRunQueueLocker _(fCore);
469 	if (!fEnqueued)
470 		return false;
471 
472 	fCore->Remove(this);
473 	ASSERT(!fEnqueued);
474 	return true;
475 }
476 
477 
478 inline void
479 ThreadData::UpdateActivity(bigtime_t active)
480 {
481 	SCHEDULER_ENTER_FUNCTION();
482 
483 	if (!gTrackCoreLoad)
484 		return;
485 
486 	fMeasureAvailableTime += active;
487 	fMeasureAvailableActiveTime += active;
488 }
489 
490 
491 }	// namespace Scheduler
492 
493 
494 #endif	// KERNEL_SCHEDULER_THREAD_H
495 
496