1 /* 2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org. 3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2002, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 /* This file contains the cpu functions (init, etc). */ 11 12 13 #include <cpu.h> 14 #include <arch/cpu.h> 15 16 #include <string.h> 17 18 #include <cpufreq.h> 19 #include <cpuidle.h> 20 21 #include <boot/kernel_args.h> 22 #include <kscheduler.h> 23 #include <thread_types.h> 24 #include <util/AutoLock.h> 25 26 27 /* global per-cpu structure */ 28 cpu_ent gCPU[SMP_MAX_CPUS]; 29 30 uint32 gCPUCacheLevelCount; 31 static cpu_topology_node sCPUTopology; 32 33 static cpufreq_module_info* sCPUPerformanceModule; 34 static cpuidle_module_info* sCPUIdleModule; 35 36 static spinlock sSetCpuLock; 37 38 39 status_t 40 cpu_init(kernel_args *args) 41 { 42 return arch_cpu_init(args); 43 } 44 45 46 status_t 47 cpu_init_percpu(kernel_args *args, int curr_cpu) 48 { 49 return arch_cpu_init_percpu(args, curr_cpu); 50 } 51 52 53 status_t 54 cpu_init_post_vm(kernel_args *args) 55 { 56 return arch_cpu_init_post_vm(args); 57 } 58 59 60 static void 61 load_cpufreq_module() 62 { 63 void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX); 64 65 while (true) { 66 char name[B_FILE_NAME_LENGTH]; 67 size_t nameLength = sizeof(name); 68 cpufreq_module_info* current = NULL; 69 70 if (read_next_module_name(cookie, name, &nameLength) != B_OK) 71 break; 72 73 if (get_module(name, (module_info**)¤t) == B_OK) { 74 dprintf("found cpufreq module: %s\n", name); 75 76 if (sCPUPerformanceModule != NULL) { 77 if (sCPUPerformanceModule->rank < current->rank) { 78 put_module(sCPUPerformanceModule->info.name); 79 sCPUPerformanceModule = current; 80 } else 81 put_module(name); 82 } else 83 sCPUPerformanceModule = current; 84 } 85 } 86 87 close_module_list(cookie); 88 89 if (sCPUPerformanceModule == NULL) 90 dprintf("no valid cpufreq module found\n"); 91 } 92 93 94 static void 95 load_cpuidle_module() 96 { 97 void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX); 98 99 while (true) { 100 char name[B_FILE_NAME_LENGTH]; 101 size_t nameLength = sizeof(name); 102 cpuidle_module_info* current = NULL; 103 104 if (read_next_module_name(cookie, name, &nameLength) != B_OK) 105 break; 106 107 if (get_module(name, (module_info**)¤t) == B_OK) { 108 dprintf("found cpuidle module: %s\n", name); 109 110 if (sCPUIdleModule != NULL) { 111 if (sCPUIdleModule->rank < current->rank) { 112 put_module(sCPUIdleModule->info.name); 113 sCPUIdleModule = current; 114 } else 115 put_module(name); 116 } else 117 sCPUIdleModule = current; 118 } 119 } 120 121 close_module_list(cookie); 122 123 if (sCPUIdleModule == NULL) 124 dprintf("no valid cpuidle module found\n"); 125 } 126 127 128 status_t 129 cpu_init_post_modules(kernel_args *args) 130 { 131 status_t result = arch_cpu_init_post_modules(args); 132 if (result != B_OK) 133 return result; 134 135 load_cpufreq_module(); 136 load_cpuidle_module(); 137 return B_OK; 138 } 139 140 141 status_t 142 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu) 143 { 144 // set the cpu number in the local cpu structure so that 145 // we can use it for get_current_cpu 146 memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu])); 147 gCPU[curr_cpu].cpu_num = curr_cpu; 148 149 list_init(&gCPU[curr_cpu].irqs); 150 B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock); 151 152 return arch_cpu_preboot_init_percpu(args, curr_cpu); 153 } 154 155 156 bigtime_t 157 cpu_get_active_time(int32 cpu) 158 { 159 if (cpu < 0 || cpu > smp_get_num_cpus()) 160 return 0; 161 162 bigtime_t activeTime; 163 uint32 count; 164 165 do { 166 count = acquire_read_seqlock(&gCPU[cpu].active_time_lock); 167 activeTime = gCPU[cpu].active_time; 168 } while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count)); 169 170 return activeTime; 171 } 172 173 174 void 175 clear_caches(void *address, size_t length, uint32 flags) 176 { 177 // ToDo: implement me! 178 } 179 180 181 static status_t 182 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id) 183 { 184 cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1); 185 ASSERT(level >= 0); 186 187 cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node; 188 if (newNode == NULL) 189 return B_NO_MEMORY; 190 node->children[id] = newNode; 191 192 newNode->level = level; 193 if (level != CPU_TOPOLOGY_SMT) { 194 newNode->children_count = maxID[level - 1]; 195 newNode->children 196 = new(std::nothrow) cpu_topology_node*[maxID[level - 1]]; 197 if (newNode->children == NULL) 198 return B_NO_MEMORY; 199 200 memset(newNode->children, 0, 201 maxID[level - 1] * sizeof(cpu_topology_node*)); 202 } else { 203 newNode->children_count = 0; 204 newNode->children = NULL; 205 } 206 207 return B_OK; 208 } 209 210 211 static void 212 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID) 213 { 214 if (node->children == NULL) 215 return; 216 217 int32 count = 0; 218 for (int32 i = 0; i < node->children_count; i++) { 219 if (node->children[i] == NULL) 220 continue; 221 222 if (count != i) 223 node->children[count] = node->children[i]; 224 225 if (node->children[count]->level != CPU_TOPOLOGY_SMT) 226 node->children[count]->id = lastID[node->children[count]->level]++; 227 228 cpu_rebuild_topology_tree(node->children[count], lastID); 229 count++; 230 } 231 node->children_count = count; 232 } 233 234 235 status_t 236 cpu_build_topology_tree(void) 237 { 238 sCPUTopology.level = CPU_TOPOLOGY_LEVELS; 239 240 int32 maxID[CPU_TOPOLOGY_LEVELS]; 241 memset(&maxID, 0, sizeof(maxID)); 242 243 const int32 kCPUCount = smp_get_num_cpus(); 244 for (int32 i = 0; i < kCPUCount; i++) { 245 for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++) 246 maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]); 247 } 248 249 for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++) 250 maxID[j]++; 251 252 sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1]; 253 sCPUTopology.children 254 = new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]]; 255 if (sCPUTopology.children == NULL) 256 return B_NO_MEMORY; 257 memset(sCPUTopology.children, 0, 258 maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*)); 259 260 for (int32 i = 0; i < kCPUCount; i++) { 261 cpu_topology_node* node = &sCPUTopology; 262 for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) { 263 int32 id = gCPU[i].topology_id[j]; 264 if (node->children[id] == NULL) { 265 status_t result = cpu_create_topology_node(node, maxID, id); 266 if (result != B_OK) 267 return result; 268 } 269 270 node = node->children[id]; 271 } 272 273 ASSERT(node->level == CPU_TOPOLOGY_SMT); 274 node->id = i; 275 } 276 277 int32 lastID[CPU_TOPOLOGY_LEVELS]; 278 memset(&lastID, 0, sizeof(lastID)); 279 cpu_rebuild_topology_tree(&sCPUTopology, lastID); 280 281 return B_OK; 282 } 283 284 285 const cpu_topology_node* 286 get_cpu_topology(void) 287 { 288 return &sCPUTopology; 289 } 290 291 292 void 293 cpu_set_scheduler_mode(enum scheduler_mode mode) 294 { 295 if (sCPUPerformanceModule != NULL) 296 sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode); 297 if (sCPUIdleModule != NULL) 298 sCPUIdleModule->cpuidle_set_scheduler_mode(mode); 299 } 300 301 302 status_t 303 increase_cpu_performance(int delta) 304 { 305 if (sCPUPerformanceModule != NULL) 306 return sCPUPerformanceModule->cpufreq_increase_performance(delta); 307 return B_NOT_SUPPORTED; 308 } 309 310 311 status_t 312 decrease_cpu_performance(int delta) 313 { 314 if (sCPUPerformanceModule != NULL) 315 return sCPUPerformanceModule->cpufreq_decrease_performance(delta); 316 return B_NOT_SUPPORTED; 317 } 318 319 320 void 321 cpu_idle(void) 322 { 323 #if KDEBUG 324 if (!are_interrupts_enabled()) 325 panic("cpu_idle() called with interrupts disabled."); 326 #endif 327 328 if (sCPUIdleModule != NULL) 329 sCPUIdleModule->cpuidle_idle(); 330 else 331 arch_cpu_idle(); 332 } 333 334 335 void 336 cpu_wait(int32* variable, int32 test) 337 { 338 if (sCPUIdleModule != NULL) 339 sCPUIdleModule->cpuidle_wait(variable, test); 340 else 341 arch_cpu_pause(); 342 } 343 344 345 // #pragma mark - 346 347 348 void 349 _user_clear_caches(void *address, size_t length, uint32 flags) 350 { 351 clear_caches(address, length, flags); 352 } 353 354 355 bool 356 _user_cpu_enabled(int32 cpu) 357 { 358 if (cpu < 0 || cpu >= smp_get_num_cpus()) 359 return false; 360 361 return !gCPU[cpu].disabled; 362 } 363 364 365 status_t 366 _user_set_cpu_enabled(int32 cpu, bool enabled) 367 { 368 int32 i, count; 369 370 if (cpu < 0 || cpu >= smp_get_num_cpus()) 371 return B_BAD_VALUE; 372 373 // We need to lock here to make sure that no one can disable 374 // the last CPU 375 376 InterruptsSpinLocker locker(sSetCpuLock); 377 378 if (!enabled) { 379 // check if this is the last CPU to be disabled 380 for (i = 0, count = 0; i < smp_get_num_cpus(); i++) { 381 if (!gCPU[i].disabled) 382 count++; 383 } 384 385 if (count == 1) 386 return B_NOT_ALLOWED; 387 } 388 389 bool oldState = gCPU[cpu].disabled; 390 391 if (oldState != !enabled) 392 scheduler_set_cpu_enabled(cpu, enabled); 393 394 if (!enabled) { 395 if (smp_get_current_cpu() == cpu) { 396 locker.Unlock(); 397 thread_yield(); 398 locker.Lock(); 399 } 400 401 // someone reenabled the CPU while we were rescheduling 402 if (!gCPU[cpu].disabled) 403 return B_OK; 404 405 ASSERT(smp_get_current_cpu() != cpu); 406 while (!thread_is_idle_thread(gCPU[cpu].running_thread)) { 407 locker.Unlock(); 408 thread_yield(); 409 locker.Lock(); 410 411 if (!gCPU[cpu].disabled) 412 return B_OK; 413 ASSERT(smp_get_current_cpu() != cpu); 414 } 415 } 416 417 return B_OK; 418 } 419 420