xref: /haiku/src/system/kernel/cpu.cpp (revision 68d37cfb3a755a7270d772b505ee15c8b18aa5e0)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /* This file contains the cpu functions (init, etc). */
11 
12 
13 #include <cpu.h>
14 #include <arch/cpu.h>
15 #include <arch/system_info.h>
16 
17 #include <string.h>
18 
19 #include <cpufreq.h>
20 #include <cpuidle.h>
21 
22 #include <boot/kernel_args.h>
23 #include <kscheduler.h>
24 #include <thread_types.h>
25 #include <util/AutoLock.h>
26 
27 
28 /* global per-cpu structure */
29 cpu_ent gCPU[SMP_MAX_CPUS];
30 
31 uint32 gCPUCacheLevelCount;
32 static cpu_topology_node sCPUTopology;
33 
34 static cpufreq_module_info* sCPUPerformanceModule;
35 static cpuidle_module_info* sCPUIdleModule;
36 
37 static spinlock sSetCpuLock;
38 
39 
40 status_t
41 cpu_init(kernel_args *args)
42 {
43 	return arch_cpu_init(args);
44 }
45 
46 
47 status_t
48 cpu_init_percpu(kernel_args *args, int curr_cpu)
49 {
50 	return arch_cpu_init_percpu(args, curr_cpu);
51 }
52 
53 
54 status_t
55 cpu_init_post_vm(kernel_args *args)
56 {
57 	return arch_cpu_init_post_vm(args);
58 }
59 
60 
61 static void
62 load_cpufreq_module()
63 {
64 	void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX);
65 
66 	while (true) {
67 		char name[B_FILE_NAME_LENGTH];
68 		size_t nameLength = sizeof(name);
69 		cpufreq_module_info* current = NULL;
70 
71 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
72 			break;
73 
74 		if (get_module(name, (module_info**)&current) == B_OK) {
75 			dprintf("found cpufreq module: %s\n", name);
76 
77 			if (sCPUPerformanceModule != NULL) {
78 				if (sCPUPerformanceModule->rank < current->rank) {
79 					put_module(sCPUPerformanceModule->info.name);
80 					sCPUPerformanceModule = current;
81 				} else
82 					put_module(name);
83 			} else
84 				sCPUPerformanceModule = current;
85 		}
86 	}
87 
88 	close_module_list(cookie);
89 
90 	if (sCPUPerformanceModule == NULL)
91 		dprintf("no valid cpufreq module found\n");
92 	else
93 		scheduler_update_policy();
94 }
95 
96 
97 static void
98 load_cpuidle_module()
99 {
100 	void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX);
101 
102 	while (true) {
103 		char name[B_FILE_NAME_LENGTH];
104 		size_t nameLength = sizeof(name);
105 		cpuidle_module_info* current = NULL;
106 
107 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
108 			break;
109 
110 		if (get_module(name, (module_info**)&current) == B_OK) {
111 			dprintf("found cpuidle module: %s\n", name);
112 
113 			if (sCPUIdleModule != NULL) {
114 				if (sCPUIdleModule->rank < current->rank) {
115 					put_module(sCPUIdleModule->info.name);
116 					sCPUIdleModule = current;
117 				} else
118 					put_module(name);
119 			} else
120 				sCPUIdleModule = current;
121 		}
122 	}
123 
124 	close_module_list(cookie);
125 
126 	if (sCPUIdleModule == NULL)
127 		dprintf("no valid cpuidle module found\n");
128 }
129 
130 
131 status_t
132 cpu_init_post_modules(kernel_args *args)
133 {
134 	status_t result = arch_cpu_init_post_modules(args);
135 	if (result != B_OK)
136 		return result;
137 
138 	load_cpufreq_module();
139 	load_cpuidle_module();
140 	return B_OK;
141 }
142 
143 
144 status_t
145 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
146 {
147 	// set the cpu number in the local cpu structure so that
148 	// we can use it for get_current_cpu
149 	memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
150 	gCPU[curr_cpu].cpu_num = curr_cpu;
151 
152 	list_init(&gCPU[curr_cpu].irqs);
153 	B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock);
154 
155 	return arch_cpu_preboot_init_percpu(args, curr_cpu);
156 }
157 
158 
159 bigtime_t
160 cpu_get_active_time(int32 cpu)
161 {
162 	if (cpu < 0 || cpu > smp_get_num_cpus())
163 		return 0;
164 
165 	bigtime_t activeTime;
166 	uint32 count;
167 
168 	do {
169 		count = acquire_read_seqlock(&gCPU[cpu].active_time_lock);
170 		activeTime = gCPU[cpu].active_time;
171 	} while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count));
172 
173 	return activeTime;
174 }
175 
176 
177 uint64
178 cpu_frequency(int32 cpu)
179 {
180 	if (cpu < 0 || cpu >= smp_get_num_cpus())
181 		return 0;
182 	uint64 frequency = 0;
183 	arch_get_frequency(&frequency, cpu);
184 	return frequency;
185 }
186 
187 
188 void
189 clear_caches(void *address, size_t length, uint32 flags)
190 {
191 	// ToDo: implement me!
192 }
193 
194 
195 static status_t
196 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id)
197 {
198 	cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1);
199 	ASSERT(level >= 0);
200 
201 	cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node;
202 	if (newNode == NULL)
203 		return B_NO_MEMORY;
204 	node->children[id] = newNode;
205 
206 	newNode->level = level;
207 	if (level != CPU_TOPOLOGY_SMT) {
208 		newNode->children_count = maxID[level - 1];
209 		newNode->children
210 			= new(std::nothrow) cpu_topology_node*[maxID[level - 1]];
211 		if (newNode->children == NULL)
212 			return B_NO_MEMORY;
213 
214 		memset(newNode->children, 0,
215 			maxID[level - 1] * sizeof(cpu_topology_node*));
216 	} else {
217 		newNode->children_count = 0;
218 		newNode->children = NULL;
219 	}
220 
221 	return B_OK;
222 }
223 
224 
225 static void
226 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID)
227 {
228 	if (node->children == NULL)
229 		return;
230 
231 	int32 count = 0;
232 	for (int32 i = 0; i < node->children_count; i++) {
233 		if (node->children[i] == NULL)
234 			continue;
235 
236 		if (count != i)
237 			node->children[count] = node->children[i];
238 
239 		if (node->children[count]->level != CPU_TOPOLOGY_SMT)
240 			node->children[count]->id = lastID[node->children[count]->level]++;
241 
242 		cpu_rebuild_topology_tree(node->children[count], lastID);
243 		count++;
244 	}
245 	node->children_count = count;
246 }
247 
248 
249 status_t
250 cpu_build_topology_tree(void)
251 {
252 	sCPUTopology.level = CPU_TOPOLOGY_LEVELS;
253 
254 	int32 maxID[CPU_TOPOLOGY_LEVELS];
255 	memset(&maxID, 0, sizeof(maxID));
256 
257 	const int32 kCPUCount = smp_get_num_cpus();
258 	for (int32 i = 0; i < kCPUCount; i++) {
259 		for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
260 			maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]);
261 	}
262 
263 	for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
264 		maxID[j]++;
265 
266 	sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1];
267 	sCPUTopology.children
268 		= new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]];
269 	if (sCPUTopology.children == NULL)
270 		return B_NO_MEMORY;
271 	memset(sCPUTopology.children, 0,
272 		maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*));
273 
274 	for (int32 i = 0; i < kCPUCount; i++) {
275 		cpu_topology_node* node = &sCPUTopology;
276 		for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) {
277 			int32 id = gCPU[i].topology_id[j];
278 			if (node->children[id] == NULL) {
279 				status_t result = cpu_create_topology_node(node, maxID, id);
280 				if (result != B_OK)
281 					return result;
282 			}
283 
284 			node = node->children[id];
285 		}
286 
287 		ASSERT(node->level == CPU_TOPOLOGY_SMT);
288 		node->id = i;
289 	}
290 
291 	int32 lastID[CPU_TOPOLOGY_LEVELS];
292 	memset(&lastID, 0, sizeof(lastID));
293 	cpu_rebuild_topology_tree(&sCPUTopology, lastID);
294 
295 	return B_OK;
296 }
297 
298 
299 const cpu_topology_node*
300 get_cpu_topology(void)
301 {
302 	return &sCPUTopology;
303 }
304 
305 
306 void
307 cpu_set_scheduler_mode(enum scheduler_mode mode)
308 {
309 	if (sCPUPerformanceModule != NULL)
310 		sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode);
311 	if (sCPUIdleModule != NULL)
312 		sCPUIdleModule->cpuidle_set_scheduler_mode(mode);
313 }
314 
315 
316 status_t
317 increase_cpu_performance(int delta)
318 {
319 	if (sCPUPerformanceModule != NULL)
320 		return sCPUPerformanceModule->cpufreq_increase_performance(delta);
321 	return B_NOT_SUPPORTED;
322 }
323 
324 
325 status_t
326 decrease_cpu_performance(int delta)
327 {
328 	if (sCPUPerformanceModule != NULL)
329 		return sCPUPerformanceModule->cpufreq_decrease_performance(delta);
330 	return B_NOT_SUPPORTED;
331 }
332 
333 
334 void
335 cpu_idle(void)
336 {
337 #if KDEBUG
338 	if (!are_interrupts_enabled())
339 		panic("cpu_idle() called with interrupts disabled.");
340 #endif
341 
342 	if (sCPUIdleModule != NULL)
343 		sCPUIdleModule->cpuidle_idle();
344 	else
345 		arch_cpu_idle();
346 }
347 
348 
349 void
350 cpu_wait(int32* variable, int32 test)
351 {
352 	if (sCPUIdleModule != NULL)
353 		sCPUIdleModule->cpuidle_wait(variable, test);
354 	else
355 		arch_cpu_pause();
356 }
357 
358 
359 //	#pragma mark -
360 
361 
362 void
363 _user_clear_caches(void *address, size_t length, uint32 flags)
364 {
365 	clear_caches(address, length, flags);
366 }
367 
368 
369 bool
370 _user_cpu_enabled(int32 cpu)
371 {
372 	if (cpu < 0 || cpu >= smp_get_num_cpus())
373 		return false;
374 
375 	return !gCPU[cpu].disabled;
376 }
377 
378 
379 status_t
380 _user_set_cpu_enabled(int32 cpu, bool enabled)
381 {
382 	int32 i, count;
383 
384 	if (geteuid() != 0)
385 		return B_PERMISSION_DENIED;
386 	if (cpu < 0 || cpu >= smp_get_num_cpus())
387 		return B_BAD_VALUE;
388 
389 	// We need to lock here to make sure that no one can disable
390 	// the last CPU
391 
392 	InterruptsSpinLocker locker(sSetCpuLock);
393 
394 	if (!enabled) {
395 		// check if this is the last CPU to be disabled
396 		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
397 			if (!gCPU[i].disabled)
398 				count++;
399 		}
400 
401 		if (count == 1)
402 			return B_NOT_ALLOWED;
403 	}
404 
405 	bool oldState = gCPU[cpu].disabled;
406 
407 	if (oldState != !enabled)
408 		scheduler_set_cpu_enabled(cpu, enabled);
409 
410 	if (!enabled) {
411 		if (smp_get_current_cpu() == cpu) {
412 			locker.Unlock();
413 			thread_yield();
414 			locker.Lock();
415 		}
416 
417 		// someone reenabled the CPU while we were rescheduling
418 		if (!gCPU[cpu].disabled)
419 			return B_OK;
420 
421 		ASSERT(smp_get_current_cpu() != cpu);
422 		while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
423 			locker.Unlock();
424 			thread_yield();
425 			locker.Lock();
426 
427 			if (!gCPU[cpu].disabled)
428 				return B_OK;
429 			ASSERT(smp_get_current_cpu() != cpu);
430 		}
431 	}
432 
433 	return B_OK;
434 }
435 
436