1 /* 2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org. 3 * Distributed under the terms of the MIT License. 4 */ 5 6 #include "scheduler_thread.h" 7 8 9 using namespace Scheduler; 10 11 12 static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1]; 13 14 const int32 kMaximumQuantumLengthsCount = 20; 15 static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount]; 16 17 18 void 19 ThreadData::_InitBase() 20 { 21 fPriorityPenalty = 0; 22 fAdditionalPenalty = 0; 23 fEffectivePriority = GetPriority(); 24 fBaseQuantum = sQuantumLengths[GetEffectivePriority()]; 25 fCPUBound = false; 26 27 fTimeUsed = 0; 28 fStolenTime = 0; 29 30 fMeasureAvailableActiveTime = 0; 31 fLastMeasureAvailableTime = 0; 32 fMeasureAvailableTime = 0; 33 34 fNeededLoad = 0; 35 36 fWentSleep = 0; 37 fWentSleepActive = 0; 38 fWentSleepCount = 0; 39 fWentSleepCountIdle = 0; 40 41 fEnqueued = false; 42 fReady = false; 43 } 44 45 46 inline CoreEntry* 47 ThreadData::_ChooseCore() const 48 { 49 SCHEDULER_ENTER_FUNCTION(); 50 51 ASSERT(!gSingleCore); 52 return gCurrentMode->choose_core(this); 53 } 54 55 56 inline CPUEntry* 57 ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const 58 { 59 SCHEDULER_ENTER_FUNCTION(); 60 61 int32 threadPriority = GetEffectivePriority(); 62 63 if (fThread->previous_cpu != NULL) { 64 CPUEntry* previousCPU 65 = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num); 66 if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) { 67 CoreCPUHeapLocker _(core); 68 if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) { 69 previousCPU->UpdatePriority(threadPriority); 70 rescheduleNeeded = true; 71 return previousCPU; 72 } 73 } 74 } 75 76 CoreCPUHeapLocker _(core); 77 CPUEntry* cpu = core->CPUHeap()->PeekRoot(); 78 ASSERT(cpu != NULL); 79 80 if (CPUPriorityHeap::GetKey(cpu) < threadPriority) { 81 cpu->UpdatePriority(threadPriority); 82 rescheduleNeeded = true; 83 } else 84 rescheduleNeeded = false; 85 86 return cpu; 87 } 88 89 90 ThreadData::ThreadData(Thread* thread) 91 : 92 fThread(thread) 93 { 94 } 95 96 97 void 98 ThreadData::Init() 99 { 100 _InitBase(); 101 102 Thread* currentThread = thread_get_current_thread(); 103 ThreadData* currentThreadData = currentThread->scheduler_data; 104 fCore = currentThreadData->fCore; 105 106 if (!IsRealTime()) { 107 fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty, 108 std::max(GetPriority() - _GetMinimalPriority(), int32(0))); 109 fAdditionalPenalty = currentThreadData->fAdditionalPenalty; 110 111 _ComputeEffectivePriority(); 112 } 113 } 114 115 116 void 117 ThreadData::Init(CoreEntry* core) 118 { 119 _InitBase(); 120 121 fCore = core; 122 fReady = true; 123 } 124 125 126 void 127 ThreadData::Dump() const 128 { 129 kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty); 130 131 int32 additionalPenalty = 0; 132 const int kMinimalPriority = _GetMinimalPriority(); 133 if (kMinimalPriority > 0) 134 additionalPenalty = fAdditionalPenalty % kMinimalPriority; 135 kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n", 136 additionalPenalty, fAdditionalPenalty); 137 kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority()); 138 139 kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n", 140 fTimeUsed, ComputeQuantum()); 141 kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime); 142 kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart); 143 kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10); 144 kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep); 145 kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive); 146 kprintf("\twent_sleep_count:\t%" B_PRId32 "\n", fWentSleepCount); 147 kprintf("\tcore:\t\t\t%" B_PRId32 "\n", 148 fCore != NULL ? fCore->ID() : -1); 149 if (fCore != NULL && HasCacheExpired()) 150 kprintf("\tcache affinity has expired\n"); 151 } 152 153 154 bool 155 ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU) 156 { 157 SCHEDULER_ENTER_FUNCTION(); 158 159 bool rescheduleNeeded = false; 160 161 if (targetCore == NULL && targetCPU != NULL) 162 targetCore = targetCPU->Core(); 163 else if (targetCore != NULL && targetCPU == NULL) 164 targetCPU = _ChooseCPU(targetCore, rescheduleNeeded); 165 else if (targetCore == NULL && targetCPU == NULL) { 166 targetCore = _ChooseCore(); 167 targetCPU = _ChooseCPU(targetCore, rescheduleNeeded); 168 } 169 170 ASSERT(targetCore != NULL); 171 ASSERT(targetCPU != NULL); 172 173 if (fReady && fCore != targetCore && fCore != NULL) { 174 fCore->UpdateLoad(-fNeededLoad); 175 targetCore->UpdateLoad(fNeededLoad); 176 } 177 178 fCore = targetCore; 179 return rescheduleNeeded; 180 } 181 182 183 bigtime_t 184 ThreadData::ComputeQuantum() const 185 { 186 SCHEDULER_ENTER_FUNCTION(); 187 188 if (IsRealTime()) 189 return fBaseQuantum; 190 191 int32 threadCount = fCore->ThreadCount(); 192 if (fCore->CPUCount() > 0) 193 threadCount /= fCore->CPUCount(); 194 195 bigtime_t quantum = fBaseQuantum; 196 if (threadCount < kMaximumQuantumLengthsCount) 197 quantum = std::min(sMaximumQuantumLengths[threadCount], quantum); 198 return quantum; 199 } 200 201 202 /* static */ void 203 ThreadData::ComputeQuantumLengths() 204 { 205 SCHEDULER_ENTER_FUNCTION(); 206 207 for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) { 208 const bigtime_t kQuantum0 = gCurrentMode->base_quantum; 209 if (priority >= B_URGENT_DISPLAY_PRIORITY) { 210 sQuantumLengths[priority] = kQuantum0; 211 continue; 212 } 213 214 const bigtime_t kQuantum1 215 = kQuantum0 * gCurrentMode->quantum_multipliers[0]; 216 if (priority > B_NORMAL_PRIORITY) { 217 sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0, 218 B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority); 219 continue; 220 } 221 222 const bigtime_t kQuantum2 223 = kQuantum0 * gCurrentMode->quantum_multipliers[1]; 224 sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1, 225 B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority); 226 } 227 228 for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount; 229 threadCount++) { 230 231 bigtime_t quantum = gCurrentMode->maximum_latency; 232 if (threadCount != 0) 233 quantum /= threadCount; 234 quantum = std::max(quantum, gCurrentMode->minimal_quantum); 235 sMaximumQuantumLengths[threadCount] = quantum; 236 } 237 } 238 239 240 inline int32 241 ThreadData::_GetPenalty() const 242 { 243 SCHEDULER_ENTER_FUNCTION(); 244 return fPriorityPenalty; 245 } 246 247 248 void 249 ThreadData::_ComputeNeededLoad() 250 { 251 SCHEDULER_ENTER_FUNCTION(); 252 253 int32 oldLoad = compute_load(fLastMeasureAvailableTime, 254 fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime); 255 if (oldLoad < 0 || oldLoad == fNeededLoad) 256 return; 257 258 int32 delta = fNeededLoad - oldLoad; 259 fCore->UpdateLoad(delta); 260 } 261 262 263 void 264 ThreadData::_ComputeEffectivePriority() const 265 { 266 SCHEDULER_ENTER_FUNCTION(); 267 268 if (IsIdle()) 269 fEffectivePriority = B_IDLE_PRIORITY; 270 else if (IsRealTime()) 271 fEffectivePriority = GetPriority(); 272 else { 273 fEffectivePriority = GetPriority(); 274 fEffectivePriority -= _GetPenalty(); 275 if (fEffectivePriority > 0) 276 fEffectivePriority -= fAdditionalPenalty % fEffectivePriority; 277 278 ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY); 279 ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY); 280 } 281 282 fBaseQuantum = sQuantumLengths[GetEffectivePriority()]; 283 } 284 285 286 /* static */ bigtime_t 287 ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum, 288 int32 maxPriority, int32 minPriority, int32 priority) 289 { 290 SCHEDULER_ENTER_FUNCTION(); 291 292 ASSERT(priority <= maxPriority); 293 ASSERT(priority >= minPriority); 294 295 bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority); 296 result /= maxPriority - minPriority; 297 return maxQuantum - result; 298 } 299 300 301 ThreadProcessing::~ThreadProcessing() 302 { 303 } 304 305