xref: /haiku/src/system/boot/platform/efi/arch/x86/arch_smp.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2021-2022 Haiku, Inc. All rights reserved.
3  * Released under the terms of the MIT License.
4  *
5  * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved.
6  * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de.
7  * Distributed under the terms of the MIT License.
8  *
9  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
10  * Distributed under the terms of the NewOS License.
11  */
12 
13 
14 #include "arch_smp.h"
15 
16 #include <kernel.h>
17 #include <safemode.h>
18 #include <boot/platform.h>
19 #include <boot/stage2.h>
20 #include <boot/menu.h>
21 #include <arch/x86/apic.h>
22 #include <arch/x86/arch_cpu.h>
23 #include <arch/x86/arch_system_info.h>
24 
25 #include "mmu.h"
26 #include "acpi.h"
27 
28 
29 #define NO_SMP 0
30 
31 //#define TRACE_SMP
32 #ifdef TRACE_SMP
33 #	define TRACE(x...) dprintf(x)
34 #else
35 #	define TRACE(x...) ;
36 #endif
37 
38 
39 extern "C" void execute_n_instructions(int count);
40 
41 void copy_trampoline_code(uint64 trampolineCode, uint64 trampolineStack);
42 void prepare_trampoline_args(uint64 trampolineCode, uint64 trampolineStack,
43 	uint32 pagedir, uint64 kernelEntry, addr_t virtKernelArgs,
44 	uint32 currentCpu);
45 uint32 get_sentinel(uint64 trampolineStack);
46 
47 
48 static uint32
49 apic_read(uint32 offset)
50 {
51 	return *(volatile uint32 *)((addr_t)gKernelArgs.arch_args.apic_phys + offset);
52 }
53 
54 
55 static void
56 apic_write(uint32 offset, uint32 data)
57 {
58 	*(volatile uint32 *)((addr_t)gKernelArgs.arch_args.apic_phys + offset) = data;
59 }
60 
61 
62 static status_t
63 acpi_do_smp_config(void)
64 {
65 	TRACE("smp: using ACPI to detect MP configuration\n");
66 
67 	// reset CPU count
68 	gKernelArgs.num_cpus = 0;
69 
70 	acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE);
71 
72 	if (madt == NULL) {
73 		TRACE("smp: Failed to find MADT!\n");
74 		return B_ERROR;
75 	}
76 
77 	gKernelArgs.arch_args.apic_phys = madt->local_apic_address;
78 	TRACE("smp: local apic address is 0x%" B_PRIx32 "\n", madt->local_apic_address);
79 
80 	acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt));
81 	acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length);
82 	while (apic < end) {
83 		switch (apic->type) {
84 			case ACPI_MADT_LOCAL_APIC:
85 			{
86 				if (gKernelArgs.num_cpus == SMP_MAX_CPUS) {
87 					TRACE("smp: already reached maximum CPUs (%d)\n",
88 						SMP_MAX_CPUS);
89 					break;
90 				}
91 
92 				acpi_local_apic *localApic = (acpi_local_apic *)apic;
93 				TRACE("smp: found local APIC with id %u\n",
94 					localApic->apic_id);
95 				if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) {
96 					TRACE("smp: APIC is disabled and will not be used\n");
97 					break;
98 				}
99 
100 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
101 					= localApic->apic_id;
102 				// TODO: how to find out? putting 0x10 in to indicate a local apic
103 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
104 					= 0x10;
105 				gKernelArgs.num_cpus++;
106 				break;
107 			}
108 
109 			case ACPI_MADT_IO_APIC: {
110 				acpi_io_apic *ioApic = (acpi_io_apic *)apic;
111 				TRACE("smp: found io APIC with id %" B_PRIu32 " and address 0x%" B_PRIx32 "\n",
112 					ioApic->io_apic_id, ioApic->io_apic_address);
113 				if (gKernelArgs.arch_args.ioapic_phys == 0)
114 					gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address;
115 				break;
116 			}
117 			default:
118 				break;
119 		}
120 
121 		apic = (acpi_apic *)((uint8 *)apic + apic->length);
122 	}
123 
124 	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
125 }
126 
127 
128 static void
129 calculate_apic_timer_conversion_factor(void)
130 {
131 	int64 t1, t2;
132 	uint32 config;
133 	uint32 count;
134 
135 	TRACE("calculating apic timer conversion factor\n");
136 
137 	// setup the timer
138 	config = apic_read(APIC_LVT_TIMER);
139 	config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED;
140 		// timer masked, vector 0
141 	apic_write(APIC_LVT_TIMER, config);
142 
143 	config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f);
144 	apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1);
145 		// divide clock by one
146 
147 	t1 = system_time();
148 	apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
149 
150 	execute_n_instructions(128 * 20000);
151 
152 	count = apic_read(APIC_CURRENT_TIMER_COUNT);
153 	t2 = system_time();
154 
155 	count = 0xffffffff - count;
156 
157 	gKernelArgs.arch_args.apic_time_cv_factor
158 		= (uint32)((1000000.0/(t2 - t1)) * count);
159 
160 	TRACE("APIC ticks/sec = %" B_PRId32 "\n",
161 		gKernelArgs.arch_args.apic_time_cv_factor);
162 }
163 
164 
165 //	#pragma mark -
166 
167 
168 int
169 arch_smp_get_current_cpu(void)
170 {
171 	if (gKernelArgs.arch_args.apic == NULL)
172 		return 0;
173 
174 	uint8 apicID = apic_read(APIC_ID) >> 24;
175 	for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
176 		if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID)
177 			return i;
178 	}
179 
180 	return 0;
181 }
182 
183 
184 void
185 arch_smp_init_other_cpus(void)
186 {
187 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
188 		// SMP has been disabled!
189 		TRACE("smp disabled per safemode setting\n");
190 		gKernelArgs.num_cpus = 1;
191 	}
192 
193 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) {
194 		TRACE("local apic disabled per safemode setting, disabling smp\n");
195 		gKernelArgs.arch_args.apic_phys = 0;
196 		gKernelArgs.num_cpus = 1;
197 	}
198 
199 	if (gKernelArgs.arch_args.apic_phys == 0)
200 		return;
201 
202 	TRACE("smp: found %" B_PRId32 " cpu%s\n", gKernelArgs.num_cpus,
203 		gKernelArgs.num_cpus != 1 ? "s" : "");
204 	TRACE("smp: apic_phys = %lx\n", (addr_t)gKernelArgs.arch_args.apic_phys);
205 	TRACE("smp: ioapic_phys = %lx\n",
206 		(addr_t)gKernelArgs.arch_args.ioapic_phys);
207 
208 	// map in the apic
209 	gKernelArgs.arch_args.apic = (void *)mmu_map_physical_memory(
210 		gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags);
211 
212 	TRACE("smp: apic (mapped) = %lx\n", (addr_t)gKernelArgs.arch_args.apic.Pointer());
213 
214 	// calculate how fast the apic timer is
215 	calculate_apic_timer_conversion_factor();
216 
217 	if (gKernelArgs.num_cpus < 2)
218 		return;
219 
220 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
221 		// create a final stack the trampoline code will put the ap processor on
222 		void * stack = NULL;
223 		const size_t size = KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
224 		if (platform_allocate_region(&stack, size, 0, false) != B_OK) {
225 			panic("Unable to allocate AP stack");
226 		}
227 		memset(stack, 0, size);
228 		gKernelArgs.cpu_kstack[i].start = fix_address((uint64_t)stack);
229 		gKernelArgs.cpu_kstack[i].size = size;
230 	}
231 }
232 
233 
234 void
235 arch_smp_boot_other_cpus(uint32 pagedir, uint64 kernelEntry, addr_t virtKernelArgs)
236 {
237 	TRACE("trampolining other cpus\n");
238 
239 	// allocate a stack and a code area for the smp trampoline
240 	// (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS)
241 	uint64 trampolineCode = 0x9000;
242 	uint64 trampolineStack = 0x8000;
243 
244 	// copy the trampoline code over
245 	copy_trampoline_code(trampolineCode, trampolineStack);
246 
247 	// boot the cpus
248 	TRACE("we have %" B_PRId32 " CPUs to boot...\n", gKernelArgs.num_cpus - 1);
249 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
250 		TRACE("trampolining CPU %" B_PRId32 "\n", i);
251 		uint32 config;
252 		uint64 numStartups;
253 		uint32 j;
254 
255 		prepare_trampoline_args(trampolineCode, trampolineStack,
256 			pagedir, kernelEntry, virtKernelArgs, i);
257 
258 		/* clear apic errors */
259 		if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) {
260 			apic_write(APIC_ERROR_STATUS, 0);
261 			apic_read(APIC_ERROR_STATUS);
262 		}
263 
264 		/* send (aka assert) INIT IPI */
265 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
266 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
267 		apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */
268 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
269 			| APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT
270 			| APIC_DELIVERY_MODE_INIT;
271 		apic_write(APIC_INTR_COMMAND_1, config);
272 
273 		// wait for pending to end
274 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
275 			asm volatile ("pause;");
276 
277 		/* deassert INIT */
278 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
279 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
280 		apic_write(APIC_INTR_COMMAND_2, config);
281 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
282 			| APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT;
283 		apic_write(APIC_INTR_COMMAND_1, config);
284 
285 		// wait for pending to end
286 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
287 			asm volatile ("pause;");
288 
289 		/* wait 10ms */
290 		spin(10000);
291 		/* is this a local apic or an 82489dx ? */
292 		numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0)
293 			? 2 : 0;
294 		for (j = 0; j < numStartups; j++) {
295 			/* it's a local apic, so send STARTUP IPIs */
296 			apic_write(APIC_ERROR_STATUS, 0);
297 
298 			/* set target pe */
299 			config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
300 				| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
301 			apic_write(APIC_INTR_COMMAND_2, config);
302 
303 			/* send the IPI */
304 			config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800)
305 				| APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12);
306 			apic_write(APIC_INTR_COMMAND_1, config);
307 
308 			/* wait */
309 			spin(200);
310 
311 			while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
312 				asm volatile ("pause;");
313 		}
314 
315 		// Wait for the trampoline code to clear the final stack location.
316 		// This serves as a notification for us that it has loaded the address
317 		// and it is safe for us to overwrite it to trampoline the next CPU.
318 		while (get_sentinel(trampolineStack) != 0)
319 			spin(1000);
320 	}
321 
322 	TRACE("done trampolining\n");
323 }
324 
325 
326 void
327 arch_smp_add_safemode_menus(Menu *menu)
328 {
329 	MenuItem *item;
330 
331 	if (gKernelArgs.arch_args.ioapic_phys != 0) {
332 		menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC"));
333 		item->SetType(MENU_ITEM_MARKABLE);
334 		item->SetData(B_SAFEMODE_DISABLE_IOAPIC);
335 		item->SetHelpText("Disables using the IO APIC for interrupt routing, "
336 			"forcing the use of the legacy PIC instead.");
337 	}
338 
339 	if (gKernelArgs.arch_args.apic_phys != 0) {
340 		menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC"));
341 		item->SetType(MENU_ITEM_MARKABLE);
342 		item->SetData(B_SAFEMODE_DISABLE_APIC);
343 		item->SetHelpText("Disables using the local APIC, also disables SMP.");
344 
345 		cpuid_info info;
346 		if (get_current_cpuid(&info, 1, 0) == B_OK
347 				&& (info.regs.ecx & IA32_FEATURE_EXT_X2APIC) != 0) {
348 			menu->AddItem(item = new(nothrow) MenuItem("Disable X2APIC"));
349 			item->SetType(MENU_ITEM_MARKABLE);
350 			item->SetData(B_SAFEMODE_DISABLE_X2APIC);
351 			item->SetHelpText("Disables using X2APIC.");
352 		}
353 
354 		if (get_current_cpuid(&info, 7, 0) == B_OK
355 				&& ((info.regs.ebx & (IA32_FEATURE_SMEP
356 					| IA32_FEATURE_SMAP)) != 0)) {
357 			menu->AddItem(item = new(nothrow) MenuItem(
358 				"Disable SMEP and SMAP"));
359 			item->SetType(MENU_ITEM_MARKABLE);
360 			item->SetData(B_SAFEMODE_DISABLE_SMEP_SMAP);
361 			item->SetHelpText("Disables using SMEP and SMAP.");
362 		}
363 	}
364 
365 	cpuid_info info;
366 	if (get_current_cpuid(&info, 1, 0) == B_OK
367 		&& (info.regs.edx & IA32_FEATURE_PAT) != 0) {
368 		menu->AddItem(item = new(nothrow) MenuItem("Disable PAT"));
369 		item->SetType(MENU_ITEM_MARKABLE);
370 		item->SetData(B_SAFEMODE_DISABLE_PAT);
371 		item->SetHelpText("Disables using page attribute tables for memory "
372 			"type setting, falling back to MTRRs.");
373 	}
374 
375 	if (gKernelArgs.num_cpus < 2)
376 		return;
377 
378 	item = new(nothrow) MenuItem("Disable SMP");
379 	menu->AddItem(item);
380 	item->SetData(B_SAFEMODE_DISABLE_SMP);
381 	item->SetType(MENU_ITEM_MARKABLE);
382 	item->SetHelpText("Disables all but one CPU core.");
383 }
384 
385 
386 void
387 arch_smp_init(void)
388 {
389 	cpuid_info info;
390 	if (get_current_cpuid(&info, 1, 0) != B_OK)
391 		return;
392 
393 	if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) {
394 		// Local APICs aren't present; As they form the basis for all inter CPU
395 		// communication and therefore SMP, we don't need to go any further.
396 		TRACE("no local APIC present, not attempting SMP init\n");
397 		return;
398 	}
399 
400 	// first try to find ACPI tables to get MP configuration as it handles
401 	// physical as well as logical MP configurations as in multiple cpus,
402 	// multiple cores or hyper threading.
403 	if (acpi_do_smp_config() == B_OK) {
404 		TRACE("smp init success\n");
405 		return;
406 	}
407 
408 	// Everything failed or we are not running an SMP system, reset anything
409 	// that might have been set through an incomplete configuration attempt.
410 	gKernelArgs.arch_args.apic_phys = 0;
411 	gKernelArgs.arch_args.ioapic_phys = 0;
412 	gKernelArgs.num_cpus = 1;
413 }
414