xref: /haiku/src/system/boot/platform/bios_ia32/smp.cpp (revision 4f2fd49bdc6078128b1391191e4edac647044c3d)
1 /*
2  * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved.
3  * Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8 */
9 
10 
11 #include "smp.h"
12 #include "mmu.h"
13 #include "acpi.h"
14 #include "hpet.h"
15 
16 #include <KernelExport.h>
17 
18 #include <kernel.h>
19 #include <safemode.h>
20 #include <boot/stage2.h>
21 #include <boot/menu.h>
22 #include <arch/x86/arch_acpi.h>
23 #include <arch/x86/arch_apic.h>
24 #include <arch/x86/arch_system_info.h>
25 
26 #include <string.h>
27 
28 #define NO_SMP 0
29 
30 #define TRACE_SMP
31 #ifdef TRACE_SMP
32 #	define TRACE(x) dprintf x
33 #else
34 #	define TRACE(x) ;
35 #endif
36 
37 struct gdt_idt_descr {
38 	uint16 a;
39 	uint32 *b;
40 } _PACKED;
41 
42 static struct scan_spots_struct smp_scan_spots[] = {
43 	{ 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 },
44 	{ 0xf0000, 0x100000, 0x100000 - 0xf0000 },
45 	{ 0, 0, 0 }
46 };
47 
48 extern "C" void execute_n_instructions(int count);
49 
50 extern "C" void smp_trampoline(void);
51 extern "C" void smp_trampoline_end(void);
52 
53 
54 static int smp_get_current_cpu(void);
55 
56 
57 static uint32
58 apic_read(uint32 offset)
59 {
60 	return *(uint32 *)((uint32)gKernelArgs.arch_args.apic + offset);
61 }
62 
63 
64 static void
65 apic_write(uint32 offset, uint32 data)
66 {
67 	uint32 *addr = (uint32 *)((uint32)gKernelArgs.arch_args.apic + offset);
68 	*addr = data;
69 }
70 
71 
72 static int
73 smp_get_current_cpu(void)
74 {
75 	if (gKernelArgs.arch_args.apic == NULL)
76 		return 0;
77 
78 	return gKernelArgs.arch_args.cpu_os_id[(apic_read(APIC_ID) & 0xffffffff) >> 24];
79 }
80 
81 
82 static mp_floating_struct *
83 smp_mp_probe(uint32 base, uint32 limit)
84 {
85 	TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit));
86 	for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) {
87 		if (*pointer == MP_FLOATING_SIGNATURE) {
88 			TRACE(("smp_mp_probe: found floating pointer structure at %p\n", pointer));
89 			return (mp_floating_struct *)pointer;
90 		}
91 	}
92 
93 	return NULL;
94 }
95 
96 
97 static status_t
98 smp_do_mp_config(mp_floating_struct *floatingStruct)
99 {
100 	TRACE(("smp: intel mp version %s, %s",
101 		(floatingStruct->spec_revision == 1) ? "1.1" : "1.4",
102 		(floatingStruct->mp_feature_2 & 0x80)
103 			? "imcr and pic compatibility mode.\n"
104 			: "virtual wire compatibility mode.\n"));
105 
106 	if (floatingStruct->config_table == NULL) {
107 #if 1
108 		// XXX need to implement
109 		TRACE(("smp: standard configuration %d unimplemented\n", floatingStruct->mp_feature_1));
110 		gKernelArgs.num_cpus = 1;
111 		return B_OK;
112 #else
113 		/* this system conforms to one of the default configurations */
114 		TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1));
115 		gKernelArgs.num_cpus = 2;
116 		gKernelArgs.cpu_apic_id[0] = 0;
117 		gKernelArgs.cpu_apic_id[1] = 1;
118 		apic_phys = (unsigned int *) 0xfee00000;
119 		ioapic_phys = (unsigned int *) 0xfec00000;
120 		dprintf("smp: WARNING: standard configuration code is untested");
121 		return B_OK;
122 #endif
123 	}
124 
125 	/*
126 	 * we are not running in standard configuration, so we have to look through
127 	 * all of the mp configuration table crap to figure out how many processors
128 	 * we have, where our apics are, etc.
129 	 */
130 	mp_config_table *config = floatingStruct->config_table;
131 	gKernelArgs.num_cpus = 0;
132 
133 	/* print out our new found configuration. */
134 	TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem,
135 		config->product));
136 	TRACE(("smp: base table has %d entries, extended section %d bytes\n",
137 		config->num_base_entries, config->ext_length));
138 
139 	gKernelArgs.arch_args.apic_phys = (uint32)config->apic;
140 
141 	char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table));
142 	for (int32 i = 0; i < config->num_base_entries; i++) {
143 		switch (*pointer) {
144 			case MP_BASE_PROCESSOR:
145 			{
146 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
147 					TRACE(("smp: already reached maximum boot CPUs (%d)\n", MAX_BOOT_CPUS));
148 					pointer += sizeof(struct mp_base_processor);
149 					break;
150 				}
151 
152 				struct mp_base_processor *processor = (struct mp_base_processor *)pointer;
153 
154 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] = processor->apic_id;
155 				gKernelArgs.arch_args.cpu_os_id[processor->apic_id] = gKernelArgs.num_cpus;
156 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] = processor->apic_version;
157 
158 #ifdef TRACE_SMP
159 				const char *cpuFamily[] = { "", "", "", "", "Intel 486",
160 					"Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" };
161 #endif
162 				TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n",
163 					gKernelArgs.num_cpus, cpuFamily[(processor->signature & 0xf00) >> 8],
164 					processor->apic_id, processor->apic_version, (processor->cpu_flags & 0x2) ?
165 					", BSP" : ""));
166 
167 				gKernelArgs.num_cpus++;
168 				pointer += sizeof(struct mp_base_processor);
169 				break;
170 			}
171 			case MP_BASE_BUS:
172 			{
173 				struct mp_base_bus *bus = (struct mp_base_bus *)pointer;
174 
175 				TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id,
176 					bus->name[0], bus->name[1], bus->name[2], bus->name[3],
177 					bus->name[4], bus->name[5]));
178 
179 				pointer += sizeof(struct mp_base_bus);
180 				break;
181 			}
182 			case MP_BASE_IO_APIC:
183 			{
184 				struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer;
185 				gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr;
186 
187 				TRACE(("smp: found io apic with apic id %d, version %d\n",
188 					io->ioapic_id, io->ioapic_version));
189 
190 				pointer += sizeof(struct mp_base_ioapic);
191 				break;
192 			}
193 			case MP_BASE_IO_INTR:
194 			case MP_BASE_LOCAL_INTR:
195 			{
196 				struct mp_base_interrupt *interrupt = (struct mp_base_interrupt *)pointer;
197 
198 				dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest apic %d, int %3d, polarity %d, trigger mode %d\n",
199 					interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local",
200 					interrupt->interrupt_type, interrupt->source_bus_id,
201 					interrupt->source_bus_irq, interrupt->dest_apic_id,
202 					interrupt->dest_apic_int, interrupt->polarity,
203 					interrupt->trigger_mode);
204 				pointer += sizeof(struct mp_base_interrupt);
205 				break;
206 			}
207 		}
208 	}
209 
210 	dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n",
211 		(void *)gKernelArgs.arch_args.apic_phys,
212 		(void *)gKernelArgs.arch_args.ioapic_phys,
213 		gKernelArgs.num_cpus);
214 
215 	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
216 }
217 
218 
219 static status_t
220 smp_do_acpi_config(void)
221 {
222 	TRACE(("smp: using ACPI to detect MP configuration\n"));
223 
224 	// reset CPU count
225 	gKernelArgs.num_cpus = 0;
226 
227 	acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE);
228 
229 	if (madt == NULL) {
230 		TRACE(("smp: Failed to find MADT!\n"));
231 		return B_ERROR;
232 	}
233 
234 	gKernelArgs.arch_args.apic_phys = madt->local_apic_address;
235 	TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address));
236 
237 	acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt));
238 	acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length);
239 	while (apic < end) {
240 		switch (apic->type) {
241 			case ACPI_MADT_LOCAL_APIC:
242 			{
243 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
244 					TRACE(("smp: already reached maximum boot CPUs (%d)\n", MAX_BOOT_CPUS));
245 					break;
246 				}
247 
248 				acpi_local_apic *localApic = (acpi_local_apic *)apic;
249 				TRACE(("smp: found local APIC with id %u\n", localApic->apic_id));
250 				if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) {
251 					TRACE(("smp: APIC is disabled and will not be used\n"));
252 					break;
253 				}
254 
255 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] = localApic->apic_id;
256 				gKernelArgs.arch_args.cpu_os_id[localApic->apic_id] = gKernelArgs.num_cpus;
257 				// ToDo: how to find out? putting 0x10 in to indicate a local apic
258 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] = 0x10;
259 				gKernelArgs.num_cpus++;
260 				break;
261 			}
262 
263 			case ACPI_MADT_IO_APIC: {
264 				acpi_io_apic *ioApic = (acpi_io_apic *)apic;
265 				TRACE(("smp: found io APIC with id %u and address 0x%lx\n",
266 					ioApic->io_apic_id, ioApic->io_apic_address));
267 				gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address;
268 				break;
269 			}
270 		}
271 
272 		apic = (acpi_apic *)((uint8 *)apic + apic->length);
273 	}
274 
275 	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
276 }
277 
278 
279 /** Target function of the trampoline code.
280  *	The trampoline code should have the pgdir and a gdt set up for us,
281  *	along with us being on the final stack for this processor. We need
282  *	to set up the local APIC and load the global idt and gdt. When we're
283  *	done, we'll jump into the kernel with the cpu number as an argument.
284  */
285 
286 static int
287 smp_cpu_ready(void)
288 {
289 	uint32 curr_cpu = smp_get_current_cpu();
290 	struct gdt_idt_descr idt_descr;
291 	struct gdt_idt_descr gdt_descr;
292 
293 	//TRACE(("smp_cpu_ready: entry cpu %ld\n", curr_cpu));
294 
295 	// Important.  Make sure supervisor threads can fault on read only pages...
296 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
297 	asm("cld");
298 	asm("fninit");
299 
300 	// Set up the final idt
301 	idt_descr.a = IDT_LIMIT - 1;
302 	idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt;
303 
304 	asm("lidt	%0;"
305 		: : "m" (idt_descr));
306 
307 	// Set up the final gdt
308 	gdt_descr.a = GDT_LIMIT - 1;
309 	gdt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_gdt;
310 
311 	asm("lgdt	%0;"
312 		: : "m" (gdt_descr));
313 
314 	asm("pushl  %0; "					// push the cpu number
315 		"pushl 	%1;	"					// kernel args
316 		"pushl 	$0x0;"					// dummy retval for call to main
317 		"pushl 	%2;	"					// this is the start address
318 		"ret;		"					// jump.
319 		: : "g" (curr_cpu), "g" (&gKernelArgs), "g" (gKernelArgs.kernel_image.elf_header.e_entry));
320 
321 	// no where to return to
322 	return 0;
323 }
324 
325 
326 static void
327 calculate_apic_timer_conversion_factor(void)
328 {
329 	int64 t1, t2;
330 	uint32 config;
331 	uint32 count;
332 
333 	// setup the timer
334 	config = apic_read(APIC_LVT_TIMER);
335 	config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED; // timer masked, vector 0
336 	apic_write(APIC_LVT_TIMER, config);
337 
338 	config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f);
339 	apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1);
340 		// divide clock by one
341 
342 	t1 = system_time();
343 	apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
344 
345 	execute_n_instructions(128 * 20000);
346 
347 	count = apic_read(APIC_CURRENT_TIMER_COUNT);
348 	t2 = system_time();
349 
350 	count = 0xffffffff - count;
351 
352 	gKernelArgs.arch_args.apic_time_cv_factor = (uint32)((1000000.0/(t2 - t1)) * count);
353 
354 	TRACE(("APIC ticks/sec = %ld\n", gKernelArgs.arch_args.apic_time_cv_factor));
355 }
356 
357 
358 //	#pragma mark -
359 
360 
361 void
362 smp_init_other_cpus(void)
363 {
364 	void *handle = load_driver_settings(B_SAFEMODE_DRIVER_SETTINGS);
365 	if (handle != NULL) {
366 		if (get_driver_boolean_parameter(handle, B_SAFEMODE_DISABLE_SMP, false, false)) {
367 			// SMP has been disabled!
368 			TRACE(("smp disabled per safemode setting\n"));
369 			gKernelArgs.num_cpus = 1;
370 		}
371 		unload_driver_settings(handle);
372 	}
373 
374 	if (gKernelArgs.arch_args.apic_phys == 0)
375 		return;
376 
377 	TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus, gKernelArgs.num_cpus != 1 ? "s" : ""));
378 	TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys));
379 	TRACE(("smp: ioapic_phys = %p\n", (void *)gKernelArgs.arch_args.ioapic_phys));
380 
381 	// map in the apic & ioapic (if available)
382 	gKernelArgs.arch_args.apic = (uint32 *)mmu_map_physical_memory(
383 		gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags);
384 	if (gKernelArgs.arch_args.ioapic_phys != 0) {
385 		gKernelArgs.arch_args.ioapic = (uint32 *)mmu_map_physical_memory(
386 			gKernelArgs.arch_args.ioapic_phys, B_PAGE_SIZE, kDefaultPageFlags);
387 	}
388 
389 	TRACE(("smp: apic = %p\n", gKernelArgs.arch_args.apic));
390 	TRACE(("smp: ioapic = %p\n", gKernelArgs.arch_args.ioapic));
391 
392 	// calculate how fast the apic timer is
393 	calculate_apic_timer_conversion_factor();
394 
395 	if (gKernelArgs.num_cpus < 2)
396 		return;
397 
398 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
399 		// create a final stack the trampoline code will put the ap processor on
400 		gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL,
401 			KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
402 		gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE
403 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
404 	}
405 }
406 
407 
408 void
409 smp_boot_other_cpus(void)
410 {
411 	if (gKernelArgs.num_cpus < 2)
412 		return;
413 
414 	TRACE(("trampolining other cpus\n"));
415 
416 	// The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved for
417 	// this, or when PXE services are used 0x8b000-0x8cfff.
418 
419 	// allocate a stack and a code area for the smp trampoline
420 	// (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS,
421 	// and when PXE services are used, the 0x8d000-0x9ffff is also reserved)
422 #ifdef _PXE_ENV
423 	uint32 trampolineCode = 0x8b000;
424 	uint32 trampolineStack = 0x8c000;
425 #else
426 	uint32 trampolineCode = 0x9f000;
427 	uint32 trampolineStack = 0x9e000;
428 #endif
429 
430 	// copy the trampoline code over
431 	memcpy((char *)trampolineCode, (const void*)&smp_trampoline,
432 		(uint32)&smp_trampoline_end - (uint32)&smp_trampoline);
433 
434 	// boot the cpus
435 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
436 		uint32 *finalStack;
437 		uint32 *tempStack;
438 		uint32 config;
439 		uint32 numStartups;
440 		uint32 j;
441 
442 		// set this stack up
443 		finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start;
444 		memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0,
445 			KERNEL_STACK_SIZE);
446 		tempStack = (finalStack
447 			+ (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
448 				/ sizeof(uint32)) - 1;
449 		*tempStack = (uint32)&smp_cpu_ready;
450 
451 		// set the trampoline stack up
452 		tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4);
453 		// final location of the stack
454 		*tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE
455 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32);
456 		tempStack--;
457 		// page dir
458 		*tempStack = gKernelArgs.arch_args.phys_pgdir;
459 
460 		// put a gdt descriptor at the bottom of the stack
461 		*((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT
462 		*((uint32 *)(trampolineStack + 2)) = trampolineStack + 8;
463 
464 		// put the gdt at the bottom
465 		memcpy(&((uint32 *)trampolineStack)[2], (void *)gKernelArgs.arch_args.vir_gdt, 6*4);
466 
467 		/* clear apic errors */
468 		if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) {
469 			apic_write(APIC_ERROR_STATUS, 0);
470 			apic_read(APIC_ERROR_STATUS);
471 		}
472 
473 //dprintf("assert INIT\n");
474 		/* send (aka assert) INIT IPI */
475 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
476 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
477 		apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */
478 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
479 			| APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT | APIC_DELIVERY_MODE_INIT;
480 		apic_write(APIC_INTR_COMMAND_1, config);
481 
482 dprintf("wait for delivery\n");
483 		// wait for pending to end
484 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
485 			asm volatile ("pause;");
486 
487 dprintf("deassert INIT\n");
488 		/* deassert INIT */
489 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
490 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
491 		apic_write(APIC_INTR_COMMAND_2, config);
492 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
493 			| APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT;
494 		apic_write(APIC_INTR_COMMAND_1, config);
495 
496 dprintf("wait for delivery\n");
497 		// wait for pending to end
498 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
499 			asm volatile ("pause;");
500 
501 		/* wait 10ms */
502 		spin(10000);
503 
504 		/* is this a local apic or an 82489dx ? */
505 		numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) ? 2 : 0;
506 dprintf("num startups = %ld\n", numStartups);
507 		for (j = 0; j < numStartups; j++) {
508 			/* it's a local apic, so send STARTUP IPIs */
509 dprintf("send STARTUP\n");
510 			apic_write(APIC_ERROR_STATUS, 0);
511 
512 			/* set target pe */
513 			config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
514 				| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
515 			apic_write(APIC_INTR_COMMAND_2, config);
516 
517 			/* send the IPI */
518 			config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800)
519 				| APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12);
520 			apic_write(APIC_INTR_COMMAND_1, config);
521 
522 			/* wait */
523 			spin(200);
524 
525 dprintf("wait for delivery\n");
526 			while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
527 				asm volatile ("pause;");
528 		}
529 
530 		// Wait for the trampoline code to clear the final stack location.
531 		// This serves as a notification for us that it has loaded the address
532 		// and it is safe for us to overwrite it to trampoline the next CPU.
533 		tempStack++;
534 		while (*tempStack != 0)
535 			spin(1000);
536 	}
537 
538 	TRACE(("done trampolining\n"));
539 }
540 
541 
542 void
543 smp_add_safemode_menus(Menu *menu)
544 {
545 
546 	if (gKernelArgs.num_cpus < 2)
547 		return;
548 
549 	MenuItem *item = new(nothrow) MenuItem("Disable SMP");
550 	menu->AddItem(item);
551 	item->SetData(B_SAFEMODE_DISABLE_SMP);
552 	item->SetType(MENU_ITEM_MARKABLE);
553 	item->SetHelpText("Disables all but one CPU core.");
554 }
555 
556 
557 void
558 smp_init(void)
559 {
560 #if NO_SMP
561 	gKernelArgs.num_cpus = 1;
562 	return;
563 #endif
564 
565 	// first try to find ACPI tables to get MP configuration as it handles
566 	// physical as well as logical MP configurations as in multiple cpus,
567 	// multiple cores or hyper threading.
568 	if (smp_do_acpi_config() == B_OK)
569 		return;
570 
571 	// then try to find MPS tables and do configuration based on them
572 	for (int32 i = 0; smp_scan_spots[i].length > 0; i++) {
573 		mp_floating_struct *floatingStruct = smp_mp_probe(
574 			smp_scan_spots[i].start, smp_scan_spots[i].stop);
575 		if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK)
576 			return;
577 	}
578 
579 	// everything failed or we are not running an SMP system
580 	gKernelArgs.num_cpus = 1;
581 }
582