1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef KERNEL_SCHEDULER_THREAD_H
6 #define KERNEL_SCHEDULER_THREAD_H
7
8
9 #include <thread.h>
10 #include <util/AutoLock.h>
11
12 #include "scheduler_common.h"
13 #include "scheduler_cpu.h"
14 #include "scheduler_locking.h"
15 #include "scheduler_profiler.h"
16
17
18 namespace Scheduler {
19
20
21 struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
22 RunQueueLinkImpl<ThreadData> {
23 private:
24 inline void _InitBase();
25
26 inline int32 _GetMinimalPriority() const;
27
28 inline CoreEntry* _ChooseCore() const;
29 inline CPUEntry* _ChooseCPU(CoreEntry* core,
30 bool& rescheduleNeeded) const;
31
32 public:
33 ThreadData(Thread* thread);
34
35 void Init();
36 void Init(CoreEntry* core);
37
38 void Dump() const;
39
GetPriorityThreadData40 inline int32 GetPriority() const { return fThread->priority; }
GetThreadThreadData41 inline Thread* GetThread() const { return fThread; }
GetCPUMaskThreadData42 inline CPUSet GetCPUMask() const { return fThread->cpumask.And(gCPUEnabled); }
43
44 inline bool IsRealTime() const;
45 inline bool IsIdle() const;
46
47 inline bool HasCacheExpired() const;
48 inline CoreEntry* Rebalance() const;
49
50 inline int32 GetEffectivePriority() const;
51
52 inline void StartCPUTime();
53 inline void StopCPUTime();
54
55 inline void CancelPenalty();
56 inline bool ShouldCancelPenalty() const;
57
58 bool ChooseCoreAndCPU(CoreEntry*& targetCore,
59 CPUEntry*& targetCPU);
60
SetLastInterruptTimeThreadData61 inline void SetLastInterruptTime(bigtime_t interruptTime)
62 { fLastInterruptTime = interruptTime; }
63 inline void SetStolenInterruptTime(bigtime_t interruptTime);
64
65 bigtime_t ComputeQuantum() const;
66 inline bigtime_t GetQuantumLeft();
67 inline void StartQuantum();
68 inline bool HasQuantumEnded(bool wasPreempted, bool hasYielded);
69
70 inline void Continues();
71 inline void GoesAway();
72 inline void Dies();
73
WentSleepThreadData74 inline bigtime_t WentSleep() const { return fWentSleep; }
WentSleepActiveThreadData75 inline bigtime_t WentSleepActive() const { return fWentSleepActive; }
76
77 inline void PutBack();
78 inline void Enqueue(bool& wasRunQueueEmpty);
79 inline bool Dequeue();
80
81 inline void UpdateActivity(bigtime_t active);
82
IsEnqueuedThreadData83 inline bool IsEnqueued() const { return fEnqueued; }
SetDequeuedThreadData84 inline void SetDequeued() { fEnqueued = false; }
85
GetLoadThreadData86 inline int32 GetLoad() const { return fNeededLoad; }
87
CoreThreadData88 inline CoreEntry* Core() const { return fCore; }
89 void UnassignCore(bool running = false);
90
91 static void ComputeQuantumLengths();
92
93 private:
94 inline void _IncreasePenalty();
95 inline int32 _GetPenalty() const;
96
97 void _ComputeNeededLoad();
98
99 void _ComputeEffectivePriority() const;
100
101 static bigtime_t _ScaleQuantum(bigtime_t maxQuantum,
102 bigtime_t minQuantum, int32 maxPriority,
103 int32 minPriority, int32 priority);
104
105 bigtime_t fStolenTime;
106 bigtime_t fQuantumStart;
107 bigtime_t fLastInterruptTime;
108
109 bigtime_t fWentSleep;
110 bigtime_t fWentSleepActive;
111
112 bool fEnqueued;
113 bool fReady;
114
115 Thread* fThread;
116
117 int32 fPriorityPenalty;
118 int32 fAdditionalPenalty;
119
120 mutable int32 fEffectivePriority;
121 mutable bigtime_t fBaseQuantum;
122
123 bigtime_t fTimeUsed;
124
125 bigtime_t fMeasureAvailableActiveTime;
126 bigtime_t fMeasureAvailableTime;
127 bigtime_t fLastMeasureAvailableTime;
128
129 int32 fNeededLoad;
130 uint32 fLoadMeasurementEpoch;
131
132 CoreEntry* fCore;
133 };
134
135 class ThreadProcessing {
136 public:
137 virtual ~ThreadProcessing();
138
139 virtual void operator()(ThreadData* thread) = 0;
140 };
141
142
143 inline int32
_GetMinimalPriority()144 ThreadData::_GetMinimalPriority() const
145 {
146 SCHEDULER_ENTER_FUNCTION();
147
148 const int32 kDivisor = 5;
149
150 const int32 kMaximalPriority = 25;
151 const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
152
153 int32 priority = GetPriority() / kDivisor;
154 return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
155 }
156
157
158 inline bool
IsRealTime()159 ThreadData::IsRealTime() const
160 {
161 return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY;
162 }
163
164
165 inline bool
IsIdle()166 ThreadData::IsIdle() const
167 {
168 return GetPriority() == B_IDLE_PRIORITY;
169 }
170
171
172 inline bool
HasCacheExpired()173 ThreadData::HasCacheExpired() const
174 {
175 SCHEDULER_ENTER_FUNCTION();
176 return gCurrentMode->has_cache_expired(this);
177 }
178
179
180 inline CoreEntry*
Rebalance()181 ThreadData::Rebalance() const
182 {
183 SCHEDULER_ENTER_FUNCTION();
184
185 ASSERT(!gSingleCore);
186 return gCurrentMode->rebalance(this);
187 }
188
189
190 inline int32
GetEffectivePriority()191 ThreadData::GetEffectivePriority() const
192 {
193 SCHEDULER_ENTER_FUNCTION();
194 return fEffectivePriority;
195 }
196
197
198 inline void
_IncreasePenalty()199 ThreadData::_IncreasePenalty()
200 {
201 SCHEDULER_ENTER_FUNCTION();
202
203 if (IsIdle() || IsRealTime())
204 return;
205
206 TRACE("increasing thread %ld penalty\n", fThread->id);
207
208 int32 oldPenalty = fPriorityPenalty++;
209 const int kMinimalPriority = _GetMinimalPriority();
210 if (GetPriority() - oldPenalty <= kMinimalPriority)
211 fPriorityPenalty = oldPenalty;
212
213 _ComputeEffectivePriority();
214 }
215
216
217 inline void
StartCPUTime()218 ThreadData::StartCPUTime()
219 {
220 SCHEDULER_ENTER_FUNCTION();
221
222 SpinLocker threadTimeLocker(fThread->time_lock);
223 fThread->last_time = system_time();
224 }
225
226
227 inline void
StopCPUTime()228 ThreadData::StopCPUTime()
229 {
230 SCHEDULER_ENTER_FUNCTION();
231
232 // User time is tracked in thread_at_kernel_entry()
233 SpinLocker threadTimeLocker(fThread->time_lock);
234 fThread->kernel_time += system_time() - fThread->last_time;
235 fThread->last_time = 0;
236 threadTimeLocker.Unlock();
237
238 // If the old thread's team has user time timers, check them now.
239 Team* team = fThread->team;
240 SpinLocker teamTimeLocker(team->time_lock);
241 if (team->HasActiveUserTimeUserTimers())
242 user_timer_check_team_user_timers(team);
243 }
244
245
246 inline void
CancelPenalty()247 ThreadData::CancelPenalty()
248 {
249 SCHEDULER_ENTER_FUNCTION();
250
251 int32 oldPenalty = fPriorityPenalty;
252 fPriorityPenalty = 0;
253
254 if (oldPenalty != 0) {
255 TRACE("cancelling thread %ld penalty\n", fThread->id);
256 _ComputeEffectivePriority();
257 }
258 }
259
260
261 inline bool
ShouldCancelPenalty()262 ThreadData::ShouldCancelPenalty() const
263 {
264 SCHEDULER_ENTER_FUNCTION();
265
266 if (fCore == NULL)
267 return false;
268 return system_time() - fWentSleep > gCurrentMode->base_quantum / 2;
269 }
270
271
272 inline void
SetStolenInterruptTime(bigtime_t interruptTime)273 ThreadData::SetStolenInterruptTime(bigtime_t interruptTime)
274 {
275 SCHEDULER_ENTER_FUNCTION();
276
277 interruptTime -= fLastInterruptTime;
278 fStolenTime += interruptTime;
279 }
280
281
282 inline bigtime_t
GetQuantumLeft()283 ThreadData::GetQuantumLeft()
284 {
285 SCHEDULER_ENTER_FUNCTION();
286
287 bigtime_t stolenTime = std::min(fStolenTime, gCurrentMode->minimal_quantum);
288 ASSERT(stolenTime >= 0);
289 fStolenTime -= stolenTime;
290
291 bigtime_t quantum = ComputeQuantum() - fTimeUsed;
292 quantum += stolenTime;
293 quantum = std::max(quantum, gCurrentMode->minimal_quantum);
294
295 return quantum;
296 }
297
298
299 inline void
StartQuantum()300 ThreadData::StartQuantum()
301 {
302 SCHEDULER_ENTER_FUNCTION();
303 fQuantumStart = system_time();
304 }
305
306
307 inline bool
HasQuantumEnded(bool wasPreempted,bool hasYielded)308 ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
309 {
310 SCHEDULER_ENTER_FUNCTION();
311
312 bigtime_t timeUsed = system_time() - fQuantumStart;
313 ASSERT(timeUsed >= 0);
314 fTimeUsed += timeUsed;
315
316 bigtime_t timeLeft = ComputeQuantum() - fTimeUsed;
317 timeLeft = std::max(bigtime_t(0), timeLeft);
318
319 // too little time left, it's better make the next quantum a bit longer
320 bigtime_t skipTime = gCurrentMode->minimal_quantum / 2;
321 if (hasYielded || wasPreempted || timeLeft <= skipTime) {
322 fStolenTime += timeLeft;
323 timeLeft = 0;
324 }
325
326 if (timeLeft == 0) {
327 fAdditionalPenalty++;
328 _IncreasePenalty();
329 fTimeUsed = 0;
330 return true;
331 }
332
333 return false;
334 }
335
336
337 inline void
Continues()338 ThreadData::Continues()
339 {
340 SCHEDULER_ENTER_FUNCTION();
341
342 ASSERT(fReady);
343 if (gTrackCoreLoad)
344 _ComputeNeededLoad();
345 }
346
347
348 inline void
GoesAway()349 ThreadData::GoesAway()
350 {
351 SCHEDULER_ENTER_FUNCTION();
352
353 ASSERT(fReady);
354
355 if (!HasQuantumEnded(false, false)) {
356 fAdditionalPenalty++;
357 _ComputeEffectivePriority();
358 }
359
360 fLastInterruptTime = 0;
361
362 fWentSleep = system_time();
363 fWentSleepActive = fCore->GetActiveTime();
364
365 if (gTrackCoreLoad)
366 fLoadMeasurementEpoch = fCore->RemoveLoad(fNeededLoad, false);
367 fReady = false;
368 }
369
370
371 inline void
Dies()372 ThreadData::Dies()
373 {
374 SCHEDULER_ENTER_FUNCTION();
375
376 ASSERT(fReady);
377 if (gTrackCoreLoad)
378 fCore->RemoveLoad(fNeededLoad, true);
379 fReady = false;
380 }
381
382
383 inline void
PutBack()384 ThreadData::PutBack()
385 {
386 SCHEDULER_ENTER_FUNCTION();
387
388 int32 priority = GetEffectivePriority();
389
390 if (fThread->pinned_to_cpu > 0) {
391 ASSERT(fThread->cpu != NULL);
392 CPUEntry* cpu = CPUEntry::GetCPU(fThread->cpu->cpu_num);
393
394 CPURunQueueLocker _(cpu);
395 ASSERT(!fEnqueued);
396 fEnqueued = true;
397
398 cpu->PushFront(this, priority);
399 } else {
400 CoreRunQueueLocker _(fCore);
401 ASSERT(!fEnqueued);
402 fEnqueued = true;
403
404 fCore->PushFront(this, priority);
405 }
406 }
407
408
409 inline void
Enqueue(bool & wasRunQueueEmpty)410 ThreadData::Enqueue(bool& wasRunQueueEmpty)
411 {
412 SCHEDULER_ENTER_FUNCTION();
413
414 if (!fReady) {
415 if (gTrackCoreLoad) {
416 bigtime_t timeSlept = system_time() - fWentSleep;
417 bool updateLoad = timeSlept > 0;
418
419 fCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, !updateLoad);
420 if (updateLoad) {
421 fMeasureAvailableTime += timeSlept;
422 _ComputeNeededLoad();
423 }
424 }
425
426 fReady = true;
427 }
428
429 fThread->state = B_THREAD_READY;
430
431 const int32 priority = GetEffectivePriority();
432 if (fThread->pinned_to_cpu > 0) {
433 ASSERT(fThread->previous_cpu != NULL);
434 CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
435
436 CPURunQueueLocker _(cpu);
437 ASSERT(!fEnqueued);
438 fEnqueued = true;
439
440 ThreadData* top = cpu->PeekThread();
441 wasRunQueueEmpty = (top == NULL || top->IsIdle());
442
443 cpu->PushBack(this, priority);
444 } else {
445 CoreRunQueueLocker _(fCore);
446 ASSERT(!fEnqueued);
447 fEnqueued = true;
448
449 ThreadData* top = fCore->PeekThread();
450 wasRunQueueEmpty = (top == NULL || top->IsIdle());
451
452 fCore->PushBack(this, priority);
453 }
454 }
455
456
457 inline bool
Dequeue()458 ThreadData::Dequeue()
459 {
460 SCHEDULER_ENTER_FUNCTION();
461
462 if (fThread->pinned_to_cpu > 0) {
463 ASSERT(fThread->previous_cpu != NULL);
464 CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
465
466 CPURunQueueLocker _(cpu);
467 if (!fEnqueued)
468 return false;
469 cpu->Remove(this);
470 ASSERT(!fEnqueued);
471 return true;
472 }
473
474 CoreRunQueueLocker _(fCore);
475 if (!fEnqueued)
476 return false;
477
478 fCore->Remove(this);
479 ASSERT(!fEnqueued);
480 return true;
481 }
482
483
484 inline void
UpdateActivity(bigtime_t active)485 ThreadData::UpdateActivity(bigtime_t active)
486 {
487 SCHEDULER_ENTER_FUNCTION();
488
489 if (!gTrackCoreLoad)
490 return;
491
492 fMeasureAvailableTime += active;
493 fMeasureAvailableActiveTime += active;
494 }
495
496
497 } // namespace Scheduler
498
499
500 #endif // KERNEL_SCHEDULER_THREAD_H
501
502