xref: /haiku/src/system/kernel/cpu.cpp (revision ed24eb5ff12640d052171c6a7feba37fab8a75d1)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /* This file contains the cpu functions (init, etc). */
11 
12 
13 #include <cpu.h>
14 #include <arch/cpu.h>
15 #include <arch/system_info.h>
16 
17 #include <string.h>
18 
19 #include <cpufreq.h>
20 #include <cpuidle.h>
21 
22 #include <boot/kernel_args.h>
23 #include <kscheduler.h>
24 #include <thread_types.h>
25 #include <util/AutoLock.h>
26 #include <util/ThreadAutoLock.h>
27 
28 
29 /* global per-cpu structure */
30 cpu_ent gCPU[SMP_MAX_CPUS];
31 
32 uint32 gCPUCacheLevelCount;
33 static cpu_topology_node sCPUTopology;
34 
35 static cpufreq_module_info* sCPUPerformanceModule;
36 static cpuidle_module_info* sCPUIdleModule;
37 
38 static spinlock sSetCpuLock;
39 
40 
41 status_t
42 cpu_init(kernel_args *args)
43 {
44 	return arch_cpu_init(args);
45 }
46 
47 
48 status_t
49 cpu_init_percpu(kernel_args *args, int curr_cpu)
50 {
51 	return arch_cpu_init_percpu(args, curr_cpu);
52 }
53 
54 
55 status_t
56 cpu_init_post_vm(kernel_args *args)
57 {
58 	return arch_cpu_init_post_vm(args);
59 }
60 
61 
62 static void
63 load_cpufreq_module()
64 {
65 	void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX);
66 
67 	while (true) {
68 		char name[B_FILE_NAME_LENGTH];
69 		size_t nameLength = sizeof(name);
70 		cpufreq_module_info* current = NULL;
71 
72 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
73 			break;
74 
75 		if (get_module(name, (module_info**)&current) == B_OK) {
76 			dprintf("found cpufreq module: %s\n", name);
77 
78 			if (sCPUPerformanceModule != NULL) {
79 				if (sCPUPerformanceModule->rank < current->rank) {
80 					put_module(sCPUPerformanceModule->info.name);
81 					sCPUPerformanceModule = current;
82 				} else
83 					put_module(name);
84 			} else
85 				sCPUPerformanceModule = current;
86 		}
87 	}
88 
89 	close_module_list(cookie);
90 
91 	if (sCPUPerformanceModule == NULL)
92 		dprintf("no valid cpufreq module found\n");
93 	else
94 		scheduler_update_policy();
95 }
96 
97 
98 static void
99 load_cpuidle_module()
100 {
101 	void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX);
102 
103 	while (true) {
104 		char name[B_FILE_NAME_LENGTH];
105 		size_t nameLength = sizeof(name);
106 		cpuidle_module_info* current = NULL;
107 
108 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
109 			break;
110 
111 		if (get_module(name, (module_info**)&current) == B_OK) {
112 			dprintf("found cpuidle module: %s\n", name);
113 
114 			if (sCPUIdleModule != NULL) {
115 				if (sCPUIdleModule->rank < current->rank) {
116 					put_module(sCPUIdleModule->info.name);
117 					sCPUIdleModule = current;
118 				} else
119 					put_module(name);
120 			} else
121 				sCPUIdleModule = current;
122 		}
123 	}
124 
125 	close_module_list(cookie);
126 
127 	if (sCPUIdleModule == NULL)
128 		dprintf("no valid cpuidle module found\n");
129 }
130 
131 
132 status_t
133 cpu_init_post_modules(kernel_args *args)
134 {
135 	status_t result = arch_cpu_init_post_modules(args);
136 	if (result != B_OK)
137 		return result;
138 
139 	load_cpufreq_module();
140 	load_cpuidle_module();
141 	return B_OK;
142 }
143 
144 
145 status_t
146 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
147 {
148 	// set the cpu number in the local cpu structure so that
149 	// we can use it for get_current_cpu
150 	memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
151 	gCPU[curr_cpu].cpu_num = curr_cpu;
152 
153 	list_init(&gCPU[curr_cpu].irqs);
154 	B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock);
155 
156 	return arch_cpu_preboot_init_percpu(args, curr_cpu);
157 }
158 
159 
160 bigtime_t
161 cpu_get_active_time(int32 cpu)
162 {
163 	if (cpu < 0 || cpu > smp_get_num_cpus())
164 		return 0;
165 
166 	bigtime_t activeTime;
167 	uint32 count;
168 
169 	do {
170 		count = acquire_read_seqlock(&gCPU[cpu].active_time_lock);
171 		activeTime = gCPU[cpu].active_time;
172 	} while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count));
173 
174 	return activeTime;
175 }
176 
177 
178 uint64
179 cpu_frequency(int32 cpu)
180 {
181 	if (cpu < 0 || cpu >= smp_get_num_cpus())
182 		return 0;
183 	uint64 frequency = 0;
184 	arch_get_frequency(&frequency, cpu);
185 	return frequency;
186 }
187 
188 
189 void
190 clear_caches(void *address, size_t length, uint32 flags)
191 {
192 	// TODO: data cache
193 	if ((B_INVALIDATE_ICACHE & flags) != 0) {
194 		arch_cpu_sync_icache(address, length);
195 	}
196 }
197 
198 
199 static status_t
200 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id)
201 {
202 	cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1);
203 	ASSERT(level >= 0);
204 
205 	cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node;
206 	if (newNode == NULL)
207 		return B_NO_MEMORY;
208 	node->children[id] = newNode;
209 
210 	newNode->level = level;
211 	if (level != CPU_TOPOLOGY_SMT) {
212 		newNode->children_count = maxID[level - 1];
213 		newNode->children
214 			= new(std::nothrow) cpu_topology_node*[maxID[level - 1]];
215 		if (newNode->children == NULL)
216 			return B_NO_MEMORY;
217 
218 		memset(newNode->children, 0,
219 			maxID[level - 1] * sizeof(cpu_topology_node*));
220 	} else {
221 		newNode->children_count = 0;
222 		newNode->children = NULL;
223 	}
224 
225 	return B_OK;
226 }
227 
228 
229 static void
230 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID)
231 {
232 	if (node->children == NULL)
233 		return;
234 
235 	int32 count = 0;
236 	for (int32 i = 0; i < node->children_count; i++) {
237 		if (node->children[i] == NULL)
238 			continue;
239 
240 		if (count != i)
241 			node->children[count] = node->children[i];
242 
243 		if (node->children[count]->level != CPU_TOPOLOGY_SMT)
244 			node->children[count]->id = lastID[node->children[count]->level]++;
245 
246 		cpu_rebuild_topology_tree(node->children[count], lastID);
247 		count++;
248 	}
249 	node->children_count = count;
250 }
251 
252 
253 status_t
254 cpu_build_topology_tree(void)
255 {
256 	sCPUTopology.level = CPU_TOPOLOGY_LEVELS;
257 
258 	int32 maxID[CPU_TOPOLOGY_LEVELS];
259 	memset(&maxID, 0, sizeof(maxID));
260 
261 	const int32 kCPUCount = smp_get_num_cpus();
262 	for (int32 i = 0; i < kCPUCount; i++) {
263 		for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
264 			maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]);
265 	}
266 
267 	for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
268 		maxID[j]++;
269 
270 	sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1];
271 	sCPUTopology.children
272 		= new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]];
273 	if (sCPUTopology.children == NULL)
274 		return B_NO_MEMORY;
275 	memset(sCPUTopology.children, 0,
276 		maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*));
277 
278 	for (int32 i = 0; i < kCPUCount; i++) {
279 		cpu_topology_node* node = &sCPUTopology;
280 		for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) {
281 			int32 id = gCPU[i].topology_id[j];
282 			if (node->children[id] == NULL) {
283 				status_t result = cpu_create_topology_node(node, maxID, id);
284 				if (result != B_OK)
285 					return result;
286 			}
287 
288 			node = node->children[id];
289 		}
290 
291 		ASSERT(node->level == CPU_TOPOLOGY_SMT);
292 		node->id = i;
293 	}
294 
295 	int32 lastID[CPU_TOPOLOGY_LEVELS];
296 	memset(&lastID, 0, sizeof(lastID));
297 	cpu_rebuild_topology_tree(&sCPUTopology, lastID);
298 
299 	return B_OK;
300 }
301 
302 
303 const cpu_topology_node*
304 get_cpu_topology(void)
305 {
306 	return &sCPUTopology;
307 }
308 
309 
310 void
311 cpu_set_scheduler_mode(enum scheduler_mode mode)
312 {
313 	if (sCPUPerformanceModule != NULL)
314 		sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode);
315 	if (sCPUIdleModule != NULL)
316 		sCPUIdleModule->cpuidle_set_scheduler_mode(mode);
317 }
318 
319 
320 status_t
321 increase_cpu_performance(int delta)
322 {
323 	if (sCPUPerformanceModule != NULL)
324 		return sCPUPerformanceModule->cpufreq_increase_performance(delta);
325 	return B_NOT_SUPPORTED;
326 }
327 
328 
329 status_t
330 decrease_cpu_performance(int delta)
331 {
332 	if (sCPUPerformanceModule != NULL)
333 		return sCPUPerformanceModule->cpufreq_decrease_performance(delta);
334 	return B_NOT_SUPPORTED;
335 }
336 
337 
338 void
339 cpu_idle(void)
340 {
341 #if KDEBUG
342 	if (!are_interrupts_enabled())
343 		panic("cpu_idle() called with interrupts disabled.");
344 #endif
345 
346 	if (sCPUIdleModule != NULL)
347 		sCPUIdleModule->cpuidle_idle();
348 	else
349 		arch_cpu_idle();
350 }
351 
352 
353 void
354 cpu_wait(int32* variable, int32 test)
355 {
356 	if (sCPUIdleModule != NULL)
357 		sCPUIdleModule->cpuidle_wait(variable, test);
358 	else
359 		arch_cpu_pause();
360 }
361 
362 
363 //	#pragma mark -
364 
365 
366 void
367 _user_clear_caches(void *address, size_t length, uint32 flags)
368 {
369 	clear_caches(address, length, flags);
370 }
371 
372 
373 bool
374 _user_cpu_enabled(int32 cpu)
375 {
376 	if (cpu < 0 || cpu >= smp_get_num_cpus())
377 		return false;
378 
379 	return !gCPU[cpu].disabled;
380 }
381 
382 
383 status_t
384 _user_set_cpu_enabled(int32 cpu, bool enabled)
385 {
386 	int32 i, count;
387 
388 	if (geteuid() != 0)
389 		return B_PERMISSION_DENIED;
390 	if (cpu < 0 || cpu >= smp_get_num_cpus())
391 		return B_BAD_VALUE;
392 
393 	// We need to lock here to make sure that no one can disable
394 	// the last CPU
395 
396 	InterruptsSpinLocker locker(sSetCpuLock);
397 
398 	if (!enabled) {
399 		// check if this is the last CPU to be disabled
400 		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
401 			if (!gCPU[i].disabled)
402 				count++;
403 		}
404 
405 		if (count == 1)
406 			return B_NOT_ALLOWED;
407 	}
408 
409 	bool oldState = gCPU[cpu].disabled;
410 
411 	if (oldState != !enabled)
412 		scheduler_set_cpu_enabled(cpu, enabled);
413 
414 	if (!enabled) {
415 		if (smp_get_current_cpu() == cpu) {
416 			locker.Unlock();
417 			thread_yield();
418 			locker.Lock();
419 		}
420 
421 		// someone reenabled the CPU while we were rescheduling
422 		if (!gCPU[cpu].disabled)
423 			return B_OK;
424 
425 		ASSERT(smp_get_current_cpu() != cpu);
426 		while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
427 			locker.Unlock();
428 			thread_yield();
429 			locker.Lock();
430 
431 			if (!gCPU[cpu].disabled)
432 				return B_OK;
433 			ASSERT(smp_get_current_cpu() != cpu);
434 		}
435 	}
436 
437 	return B_OK;
438 }
439 
440