xref: /haiku/src/system/kernel/cpu.cpp (revision 02354704729d38c3b078c696adc1bbbd33cbcf72)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /* This file contains the cpu functions (init, etc). */
11 
12 
13 #include <cpu.h>
14 #include <arch/cpu.h>
15 #include <arch/system_info.h>
16 
17 #include <string.h>
18 
19 #include <cpufreq.h>
20 #include <cpuidle.h>
21 
22 #include <boot/kernel_args.h>
23 #include <kscheduler.h>
24 #include <thread_types.h>
25 #include <util/AutoLock.h>
26 #include <util/ThreadAutoLock.h>
27 
28 
29 /* global per-cpu structure */
30 cpu_ent gCPU[SMP_MAX_CPUS];
31 
32 uint32 gCPUCacheLevelCount;
33 static cpu_topology_node sCPUTopology;
34 
35 static cpufreq_module_info* sCPUPerformanceModule;
36 static cpuidle_module_info* sCPUIdleModule;
37 
38 static spinlock sSetCpuLock;
39 
40 
41 status_t
42 cpu_init(kernel_args *args)
43 {
44 	return arch_cpu_init(args);
45 }
46 
47 
48 status_t
49 cpu_init_percpu(kernel_args *args, int curr_cpu)
50 {
51 	return arch_cpu_init_percpu(args, curr_cpu);
52 }
53 
54 
55 status_t
56 cpu_init_post_vm(kernel_args *args)
57 {
58 	return arch_cpu_init_post_vm(args);
59 }
60 
61 
62 static void
63 load_cpufreq_module()
64 {
65 	void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX);
66 
67 	while (true) {
68 		char name[B_FILE_NAME_LENGTH];
69 		size_t nameLength = sizeof(name);
70 		cpufreq_module_info* current = NULL;
71 
72 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
73 			break;
74 
75 		if (get_module(name, (module_info**)&current) == B_OK) {
76 			dprintf("found cpufreq module: %s\n", name);
77 
78 			if (sCPUPerformanceModule != NULL) {
79 				if (sCPUPerformanceModule->rank < current->rank) {
80 					put_module(sCPUPerformanceModule->info.name);
81 					sCPUPerformanceModule = current;
82 				} else
83 					put_module(name);
84 			} else
85 				sCPUPerformanceModule = current;
86 		}
87 	}
88 
89 	close_module_list(cookie);
90 
91 	if (sCPUPerformanceModule == NULL)
92 		dprintf("no valid cpufreq module found\n");
93 	else
94 		scheduler_update_policy();
95 }
96 
97 
98 static void
99 load_cpuidle_module()
100 {
101 	void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX);
102 
103 	while (true) {
104 		char name[B_FILE_NAME_LENGTH];
105 		size_t nameLength = sizeof(name);
106 		cpuidle_module_info* current = NULL;
107 
108 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
109 			break;
110 
111 		if (get_module(name, (module_info**)&current) == B_OK) {
112 			dprintf("found cpuidle module: %s\n", name);
113 
114 			if (sCPUIdleModule != NULL) {
115 				if (sCPUIdleModule->rank < current->rank) {
116 					put_module(sCPUIdleModule->info.name);
117 					sCPUIdleModule = current;
118 				} else
119 					put_module(name);
120 			} else
121 				sCPUIdleModule = current;
122 		}
123 	}
124 
125 	close_module_list(cookie);
126 
127 	if (sCPUIdleModule == NULL)
128 		dprintf("no valid cpuidle module found\n");
129 }
130 
131 
132 status_t
133 cpu_init_post_modules(kernel_args *args)
134 {
135 	status_t result = arch_cpu_init_post_modules(args);
136 	if (result != B_OK)
137 		return result;
138 
139 	load_cpufreq_module();
140 	load_cpuidle_module();
141 	return B_OK;
142 }
143 
144 
145 status_t
146 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
147 {
148 	// set the cpu number in the local cpu structure so that
149 	// we can use it for get_current_cpu
150 	memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
151 	gCPU[curr_cpu].cpu_num = curr_cpu;
152 
153 	list_init(&gCPU[curr_cpu].irqs);
154 	B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock);
155 
156 	return arch_cpu_preboot_init_percpu(args, curr_cpu);
157 }
158 
159 
160 bigtime_t
161 cpu_get_active_time(int32 cpu)
162 {
163 	if (cpu < 0 || cpu > smp_get_num_cpus())
164 		return 0;
165 
166 	bigtime_t activeTime;
167 	uint32 count;
168 
169 	do {
170 		count = acquire_read_seqlock(&gCPU[cpu].active_time_lock);
171 		activeTime = gCPU[cpu].active_time;
172 	} while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count));
173 
174 	return activeTime;
175 }
176 
177 
178 uint64
179 cpu_frequency(int32 cpu)
180 {
181 	if (cpu < 0 || cpu >= smp_get_num_cpus())
182 		return 0;
183 	uint64 frequency = 0;
184 	arch_get_frequency(&frequency, cpu);
185 	return frequency;
186 }
187 
188 
189 void
190 clear_caches(void *address, size_t length, uint32 flags)
191 {
192 	// ToDo: implement me!
193 }
194 
195 
196 static status_t
197 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id)
198 {
199 	cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1);
200 	ASSERT(level >= 0);
201 
202 	cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node;
203 	if (newNode == NULL)
204 		return B_NO_MEMORY;
205 	node->children[id] = newNode;
206 
207 	newNode->level = level;
208 	if (level != CPU_TOPOLOGY_SMT) {
209 		newNode->children_count = maxID[level - 1];
210 		newNode->children
211 			= new(std::nothrow) cpu_topology_node*[maxID[level - 1]];
212 		if (newNode->children == NULL)
213 			return B_NO_MEMORY;
214 
215 		memset(newNode->children, 0,
216 			maxID[level - 1] * sizeof(cpu_topology_node*));
217 	} else {
218 		newNode->children_count = 0;
219 		newNode->children = NULL;
220 	}
221 
222 	return B_OK;
223 }
224 
225 
226 static void
227 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID)
228 {
229 	if (node->children == NULL)
230 		return;
231 
232 	int32 count = 0;
233 	for (int32 i = 0; i < node->children_count; i++) {
234 		if (node->children[i] == NULL)
235 			continue;
236 
237 		if (count != i)
238 			node->children[count] = node->children[i];
239 
240 		if (node->children[count]->level != CPU_TOPOLOGY_SMT)
241 			node->children[count]->id = lastID[node->children[count]->level]++;
242 
243 		cpu_rebuild_topology_tree(node->children[count], lastID);
244 		count++;
245 	}
246 	node->children_count = count;
247 }
248 
249 
250 status_t
251 cpu_build_topology_tree(void)
252 {
253 	sCPUTopology.level = CPU_TOPOLOGY_LEVELS;
254 
255 	int32 maxID[CPU_TOPOLOGY_LEVELS];
256 	memset(&maxID, 0, sizeof(maxID));
257 
258 	const int32 kCPUCount = smp_get_num_cpus();
259 	for (int32 i = 0; i < kCPUCount; i++) {
260 		for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
261 			maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]);
262 	}
263 
264 	for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
265 		maxID[j]++;
266 
267 	sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1];
268 	sCPUTopology.children
269 		= new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]];
270 	if (sCPUTopology.children == NULL)
271 		return B_NO_MEMORY;
272 	memset(sCPUTopology.children, 0,
273 		maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*));
274 
275 	for (int32 i = 0; i < kCPUCount; i++) {
276 		cpu_topology_node* node = &sCPUTopology;
277 		for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) {
278 			int32 id = gCPU[i].topology_id[j];
279 			if (node->children[id] == NULL) {
280 				status_t result = cpu_create_topology_node(node, maxID, id);
281 				if (result != B_OK)
282 					return result;
283 			}
284 
285 			node = node->children[id];
286 		}
287 
288 		ASSERT(node->level == CPU_TOPOLOGY_SMT);
289 		node->id = i;
290 	}
291 
292 	int32 lastID[CPU_TOPOLOGY_LEVELS];
293 	memset(&lastID, 0, sizeof(lastID));
294 	cpu_rebuild_topology_tree(&sCPUTopology, lastID);
295 
296 	return B_OK;
297 }
298 
299 
300 const cpu_topology_node*
301 get_cpu_topology(void)
302 {
303 	return &sCPUTopology;
304 }
305 
306 
307 void
308 cpu_set_scheduler_mode(enum scheduler_mode mode)
309 {
310 	if (sCPUPerformanceModule != NULL)
311 		sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode);
312 	if (sCPUIdleModule != NULL)
313 		sCPUIdleModule->cpuidle_set_scheduler_mode(mode);
314 }
315 
316 
317 status_t
318 increase_cpu_performance(int delta)
319 {
320 	if (sCPUPerformanceModule != NULL)
321 		return sCPUPerformanceModule->cpufreq_increase_performance(delta);
322 	return B_NOT_SUPPORTED;
323 }
324 
325 
326 status_t
327 decrease_cpu_performance(int delta)
328 {
329 	if (sCPUPerformanceModule != NULL)
330 		return sCPUPerformanceModule->cpufreq_decrease_performance(delta);
331 	return B_NOT_SUPPORTED;
332 }
333 
334 
335 void
336 cpu_idle(void)
337 {
338 #if KDEBUG
339 	if (!are_interrupts_enabled())
340 		panic("cpu_idle() called with interrupts disabled.");
341 #endif
342 
343 	if (sCPUIdleModule != NULL)
344 		sCPUIdleModule->cpuidle_idle();
345 	else
346 		arch_cpu_idle();
347 }
348 
349 
350 void
351 cpu_wait(int32* variable, int32 test)
352 {
353 	if (sCPUIdleModule != NULL)
354 		sCPUIdleModule->cpuidle_wait(variable, test);
355 	else
356 		arch_cpu_pause();
357 }
358 
359 
360 //	#pragma mark -
361 
362 
363 void
364 _user_clear_caches(void *address, size_t length, uint32 flags)
365 {
366 	clear_caches(address, length, flags);
367 }
368 
369 
370 bool
371 _user_cpu_enabled(int32 cpu)
372 {
373 	if (cpu < 0 || cpu >= smp_get_num_cpus())
374 		return false;
375 
376 	return !gCPU[cpu].disabled;
377 }
378 
379 
380 status_t
381 _user_set_cpu_enabled(int32 cpu, bool enabled)
382 {
383 	int32 i, count;
384 
385 	if (geteuid() != 0)
386 		return B_PERMISSION_DENIED;
387 	if (cpu < 0 || cpu >= smp_get_num_cpus())
388 		return B_BAD_VALUE;
389 
390 	// We need to lock here to make sure that no one can disable
391 	// the last CPU
392 
393 	InterruptsSpinLocker locker(sSetCpuLock);
394 
395 	if (!enabled) {
396 		// check if this is the last CPU to be disabled
397 		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
398 			if (!gCPU[i].disabled)
399 				count++;
400 		}
401 
402 		if (count == 1)
403 			return B_NOT_ALLOWED;
404 	}
405 
406 	bool oldState = gCPU[cpu].disabled;
407 
408 	if (oldState != !enabled)
409 		scheduler_set_cpu_enabled(cpu, enabled);
410 
411 	if (!enabled) {
412 		if (smp_get_current_cpu() == cpu) {
413 			locker.Unlock();
414 			thread_yield();
415 			locker.Lock();
416 		}
417 
418 		// someone reenabled the CPU while we were rescheduling
419 		if (!gCPU[cpu].disabled)
420 			return B_OK;
421 
422 		ASSERT(smp_get_current_cpu() != cpu);
423 		while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
424 			locker.Unlock();
425 			thread_yield();
426 			locker.Lock();
427 
428 			if (!gCPU[cpu].disabled)
429 				return B_OK;
430 			ASSERT(smp_get_current_cpu() != cpu);
431 		}
432 	}
433 
434 	return B_OK;
435 }
436 
437