xref: /haiku/src/system/kernel/cpu.cpp (revision 984f843b917a1c4e077915c5961a6ef1cf8dabc7)
1 /*
2  * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3  * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8  */
9 
10 /* This file contains the cpu functions (init, etc). */
11 
12 
13 #include <cpu.h>
14 #include <arch/cpu.h>
15 #include <arch/system_info.h>
16 
17 #include <string.h>
18 
19 #include <cpufreq.h>
20 #include <cpuidle.h>
21 
22 #include <boot/kernel_args.h>
23 #include <kscheduler.h>
24 #include <thread_types.h>
25 #include <util/AutoLock.h>
26 #include <util/ThreadAutoLock.h>
27 
28 
29 /* global per-cpu structure */
30 cpu_ent gCPU[SMP_MAX_CPUS];
31 CPUSet gCPUEnabled;
32 
33 uint32 gCPUCacheLevelCount;
34 static cpu_topology_node sCPUTopology;
35 
36 static cpufreq_module_info* sCPUPerformanceModule;
37 static cpuidle_module_info* sCPUIdleModule;
38 
39 static spinlock sSetCpuLock;
40 
41 
42 status_t
43 cpu_init(kernel_args *args)
44 {
45 	return arch_cpu_init(args);
46 }
47 
48 
49 status_t
50 cpu_init_percpu(kernel_args *args, int curr_cpu)
51 {
52 	return arch_cpu_init_percpu(args, curr_cpu);
53 }
54 
55 
56 status_t
57 cpu_init_post_vm(kernel_args *args)
58 {
59 	return arch_cpu_init_post_vm(args);
60 }
61 
62 
63 static void
64 load_cpufreq_module()
65 {
66 	void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX);
67 
68 	while (true) {
69 		char name[B_FILE_NAME_LENGTH];
70 		size_t nameLength = sizeof(name);
71 		cpufreq_module_info* current = NULL;
72 
73 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
74 			break;
75 
76 		if (get_module(name, (module_info**)&current) == B_OK) {
77 			dprintf("found cpufreq module: %s\n", name);
78 
79 			if (sCPUPerformanceModule != NULL) {
80 				if (sCPUPerformanceModule->rank < current->rank) {
81 					put_module(sCPUPerformanceModule->info.name);
82 					sCPUPerformanceModule = current;
83 				} else
84 					put_module(name);
85 			} else
86 				sCPUPerformanceModule = current;
87 		}
88 	}
89 
90 	close_module_list(cookie);
91 
92 	if (sCPUPerformanceModule == NULL)
93 		dprintf("no valid cpufreq module found\n");
94 	else
95 		scheduler_update_policy();
96 }
97 
98 
99 static void
100 load_cpuidle_module()
101 {
102 	void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX);
103 
104 	while (true) {
105 		char name[B_FILE_NAME_LENGTH];
106 		size_t nameLength = sizeof(name);
107 		cpuidle_module_info* current = NULL;
108 
109 		if (read_next_module_name(cookie, name, &nameLength) != B_OK)
110 			break;
111 
112 		if (get_module(name, (module_info**)&current) == B_OK) {
113 			dprintf("found cpuidle module: %s\n", name);
114 
115 			if (sCPUIdleModule != NULL) {
116 				if (sCPUIdleModule->rank < current->rank) {
117 					put_module(sCPUIdleModule->info.name);
118 					sCPUIdleModule = current;
119 				} else
120 					put_module(name);
121 			} else
122 				sCPUIdleModule = current;
123 		}
124 	}
125 
126 	close_module_list(cookie);
127 
128 	if (sCPUIdleModule == NULL)
129 		dprintf("no valid cpuidle module found\n");
130 }
131 
132 
133 status_t
134 cpu_init_post_modules(kernel_args *args)
135 {
136 	status_t result = arch_cpu_init_post_modules(args);
137 	if (result != B_OK)
138 		return result;
139 
140 	load_cpufreq_module();
141 	load_cpuidle_module();
142 	return B_OK;
143 }
144 
145 
146 status_t
147 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
148 {
149 	// set the cpu number in the local cpu structure so that
150 	// we can use it for get_current_cpu
151 	memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
152 	gCPU[curr_cpu].cpu_num = curr_cpu;
153 	gCPUEnabled.SetBit(curr_cpu);
154 
155 	list_init(&gCPU[curr_cpu].irqs);
156 	B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock);
157 
158 	return arch_cpu_preboot_init_percpu(args, curr_cpu);
159 }
160 
161 
162 bigtime_t
163 cpu_get_active_time(int32 cpu)
164 {
165 	if (cpu < 0 || cpu > smp_get_num_cpus())
166 		return 0;
167 
168 	bigtime_t activeTime;
169 	uint32 count;
170 
171 	do {
172 		count = acquire_read_seqlock(&gCPU[cpu].active_time_lock);
173 		activeTime = gCPU[cpu].active_time;
174 	} while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count));
175 
176 	return activeTime;
177 }
178 
179 
180 uint64
181 cpu_frequency(int32 cpu)
182 {
183 	if (cpu < 0 || cpu >= smp_get_num_cpus())
184 		return 0;
185 	uint64 frequency = 0;
186 	arch_get_frequency(&frequency, cpu);
187 	return frequency;
188 }
189 
190 
191 void
192 clear_caches(void *address, size_t length, uint32 flags)
193 {
194 	// TODO: data cache
195 	if ((B_INVALIDATE_ICACHE & flags) != 0) {
196 		arch_cpu_sync_icache(address, length);
197 	}
198 }
199 
200 
201 static status_t
202 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id)
203 {
204 	cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1);
205 	ASSERT(level >= 0);
206 
207 	cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node;
208 	if (newNode == NULL)
209 		return B_NO_MEMORY;
210 	node->children[id] = newNode;
211 
212 	newNode->level = level;
213 	if (level != CPU_TOPOLOGY_SMT) {
214 		newNode->children_count = maxID[level - 1];
215 		newNode->children
216 			= new(std::nothrow) cpu_topology_node*[maxID[level - 1]];
217 		if (newNode->children == NULL)
218 			return B_NO_MEMORY;
219 
220 		memset(newNode->children, 0,
221 			maxID[level - 1] * sizeof(cpu_topology_node*));
222 	} else {
223 		newNode->children_count = 0;
224 		newNode->children = NULL;
225 	}
226 
227 	return B_OK;
228 }
229 
230 
231 static void
232 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID)
233 {
234 	if (node->children == NULL)
235 		return;
236 
237 	int32 count = 0;
238 	for (int32 i = 0; i < node->children_count; i++) {
239 		if (node->children[i] == NULL)
240 			continue;
241 
242 		if (count != i)
243 			node->children[count] = node->children[i];
244 
245 		if (node->children[count]->level != CPU_TOPOLOGY_SMT)
246 			node->children[count]->id = lastID[node->children[count]->level]++;
247 
248 		cpu_rebuild_topology_tree(node->children[count], lastID);
249 		count++;
250 	}
251 	node->children_count = count;
252 }
253 
254 
255 status_t
256 cpu_build_topology_tree(void)
257 {
258 	sCPUTopology.level = CPU_TOPOLOGY_LEVELS;
259 
260 	int32 maxID[CPU_TOPOLOGY_LEVELS];
261 	memset(&maxID, 0, sizeof(maxID));
262 
263 	const int32 kCPUCount = smp_get_num_cpus();
264 	for (int32 i = 0; i < kCPUCount; i++) {
265 		for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
266 			maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]);
267 	}
268 
269 	for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
270 		maxID[j]++;
271 
272 	sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1];
273 	sCPUTopology.children
274 		= new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]];
275 	if (sCPUTopology.children == NULL)
276 		return B_NO_MEMORY;
277 	memset(sCPUTopology.children, 0,
278 		maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*));
279 
280 	for (int32 i = 0; i < kCPUCount; i++) {
281 		cpu_topology_node* node = &sCPUTopology;
282 		for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) {
283 			int32 id = gCPU[i].topology_id[j];
284 			if (node->children[id] == NULL) {
285 				status_t result = cpu_create_topology_node(node, maxID, id);
286 				if (result != B_OK)
287 					return result;
288 			}
289 
290 			node = node->children[id];
291 		}
292 
293 		ASSERT(node->level == CPU_TOPOLOGY_SMT);
294 		node->id = i;
295 	}
296 
297 	int32 lastID[CPU_TOPOLOGY_LEVELS];
298 	memset(&lastID, 0, sizeof(lastID));
299 	cpu_rebuild_topology_tree(&sCPUTopology, lastID);
300 
301 	return B_OK;
302 }
303 
304 
305 const cpu_topology_node*
306 get_cpu_topology(void)
307 {
308 	return &sCPUTopology;
309 }
310 
311 
312 void
313 cpu_set_scheduler_mode(enum scheduler_mode mode)
314 {
315 	if (sCPUPerformanceModule != NULL)
316 		sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode);
317 	if (sCPUIdleModule != NULL)
318 		sCPUIdleModule->cpuidle_set_scheduler_mode(mode);
319 }
320 
321 
322 status_t
323 increase_cpu_performance(int delta)
324 {
325 	if (sCPUPerformanceModule != NULL)
326 		return sCPUPerformanceModule->cpufreq_increase_performance(delta);
327 	return B_NOT_SUPPORTED;
328 }
329 
330 
331 status_t
332 decrease_cpu_performance(int delta)
333 {
334 	if (sCPUPerformanceModule != NULL)
335 		return sCPUPerformanceModule->cpufreq_decrease_performance(delta);
336 	return B_NOT_SUPPORTED;
337 }
338 
339 
340 void
341 cpu_idle(void)
342 {
343 #if KDEBUG
344 	if (!are_interrupts_enabled())
345 		panic("cpu_idle() called with interrupts disabled.");
346 #endif
347 
348 	if (sCPUIdleModule != NULL)
349 		sCPUIdleModule->cpuidle_idle();
350 	else
351 		arch_cpu_idle();
352 }
353 
354 
355 void
356 cpu_wait(int32* variable, int32 test)
357 {
358 	if (sCPUIdleModule != NULL)
359 		sCPUIdleModule->cpuidle_wait(variable, test);
360 	else
361 		arch_cpu_pause();
362 }
363 
364 
365 //	#pragma mark -
366 
367 
368 void
369 _user_clear_caches(void *address, size_t length, uint32 flags)
370 {
371 	clear_caches(address, length, flags);
372 }
373 
374 
375 bool
376 _user_cpu_enabled(int32 cpu)
377 {
378 	if (cpu < 0 || cpu >= smp_get_num_cpus())
379 		return false;
380 
381 	return !gCPU[cpu].disabled;
382 }
383 
384 
385 status_t
386 _user_set_cpu_enabled(int32 cpu, bool enabled)
387 {
388 	int32 i, count;
389 
390 	if (geteuid() != 0)
391 		return B_PERMISSION_DENIED;
392 	if (cpu < 0 || cpu >= smp_get_num_cpus())
393 		return B_BAD_VALUE;
394 
395 	// We need to lock here to make sure that no one can disable
396 	// the last CPU
397 
398 	InterruptsSpinLocker locker(sSetCpuLock);
399 
400 	if (!enabled) {
401 		// check if this is the last CPU to be disabled
402 		for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
403 			if (!gCPU[i].disabled)
404 				count++;
405 		}
406 
407 		if (count == 1)
408 			return B_NOT_ALLOWED;
409 	}
410 
411 	bool oldState = gCPU[cpu].disabled;
412 
413 	if (oldState != !enabled)
414 		scheduler_set_cpu_enabled(cpu, enabled);
415 
416 	if (!enabled) {
417 		if (smp_get_current_cpu() == cpu) {
418 			locker.Unlock();
419 			thread_yield();
420 			locker.Lock();
421 		}
422 
423 		// someone reenabled the CPU while we were rescheduling
424 		if (!gCPU[cpu].disabled)
425 			return B_OK;
426 
427 		ASSERT(smp_get_current_cpu() != cpu);
428 		while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
429 			locker.Unlock();
430 			thread_yield();
431 			locker.Lock();
432 
433 			if (!gCPU[cpu].disabled)
434 				return B_OK;
435 			ASSERT(smp_get_current_cpu() != cpu);
436 		}
437 	}
438 
439 	return B_OK;
440 }
441 
442