xref: /haiku/src/system/boot/platform/bios_ia32/smp.cpp (revision 49d7857e32a5c34fe63a11e46a41a774aa1b2728)
1 /*
2  * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved.
3  * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8 */
9 
10 
11 #include "smp.h"
12 
13 #include <string.h>
14 
15 #include <KernelExport.h>
16 
17 #include <kernel.h>
18 #include <safemode.h>
19 #include <boot/stage2.h>
20 #include <boot/menu.h>
21 #include <arch/x86/apic.h>
22 #include <arch/x86/arch_acpi.h>
23 #include <arch/x86/arch_smp.h>
24 #include <arch/x86/arch_system_info.h>
25 
26 #include "mmu.h"
27 #include "acpi.h"
28 
29 
30 #define NO_SMP 0
31 
32 #define TRACE_SMP
33 #ifdef TRACE_SMP
34 #	define TRACE(x) dprintf x
35 #else
36 #	define TRACE(x) ;
37 #endif
38 
39 struct gdt_idt_descr {
40 	uint16 a;
41 	uint32 *b;
42 } _PACKED;
43 
44 static struct scan_spots_struct smp_scan_spots[] = {
45 	{ 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 },
46 	{ 0xf0000, 0x100000, 0x100000 - 0xf0000 },
47 	{ 0, 0, 0 }
48 };
49 
50 extern "C" void execute_n_instructions(int count);
51 
52 extern "C" void smp_trampoline(void);
53 extern "C" void smp_trampoline_end(void);
54 
55 
56 static int smp_get_current_cpu(void);
57 
58 
59 static uint32
60 apic_read(uint32 offset)
61 {
62 	return *(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset);
63 }
64 
65 
66 static void
67 apic_write(uint32 offset, uint32 data)
68 {
69 	*(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset) = data;
70 }
71 
72 
73 static int
74 smp_get_current_cpu(void)
75 {
76 	if (gKernelArgs.arch_args.apic == NULL)
77 		return 0;
78 
79 	uint8 apicID = apic_read(APIC_ID) >> 24;
80 	for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
81 		if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID)
82 			return i;
83 	}
84 
85 	return 0;
86 }
87 
88 
89 static mp_floating_struct *
90 smp_mp_probe(uint32 base, uint32 limit)
91 {
92 	TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit));
93 	for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) {
94 		if (*pointer == MP_FLOATING_SIGNATURE) {
95 			TRACE(("smp_mp_probe: found floating pointer structure at %p\n",
96 				pointer));
97 			return (mp_floating_struct *)pointer;
98 		}
99 	}
100 
101 	return NULL;
102 }
103 
104 
105 static status_t
106 smp_do_mp_config(mp_floating_struct *floatingStruct)
107 {
108 	if (floatingStruct->config_length != 1) {
109 		TRACE(("smp: unsupported structure length of %" B_PRIu8 " units\n",
110 			floatingStruct->config_length));
111 		return B_UNSUPPORTED;
112 	}
113 
114 	TRACE(("smp: intel mp version %s, %s",
115 		(floatingStruct->spec_revision == 1) ? "1.1" : "1.4",
116 		(floatingStruct->mp_feature_2 & 0x80)
117 			? "imcr and pic compatibility mode.\n"
118 			: "virtual wire compatibility mode.\n"));
119 
120 	if (floatingStruct->config_table == NULL) {
121 #if 1
122 		// TODO: need to implement
123 		TRACE(("smp: standard configuration %d unimplemented\n",
124 			floatingStruct->mp_feature_1));
125 		gKernelArgs.num_cpus = 1;
126 		return B_OK;
127 #else
128 		// this system conforms to one of the default configurations
129 		TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1));
130 		gKernelArgs.num_cpus = 2;
131 		gKernelArgs.cpu_apic_id[0] = 0;
132 		gKernelArgs.cpu_apic_id[1] = 1;
133 		apic_phys = (unsigned int *)0xfee00000;
134 		ioapic_phys = (unsigned int *)0xfec00000;
135 		dprintf("smp: WARNING: standard configuration code is untested");
136 		return B_OK;
137 #endif
138 	}
139 
140 	// We are not running in standard configuration, so we have to look through
141 	// all of the mp configuration table crap to figure out how many processors
142 	// we have, where our apics are, etc.
143 
144 	mp_config_table *config = floatingStruct->config_table;
145 	gKernelArgs.num_cpus = 0;
146 
147 	if (config->signature != MP_CONFIG_TABLE_SIGNATURE) {
148 		TRACE(("smp: invalid config table signature, aborting\n"));
149 		return B_ERROR;
150 	}
151 
152 	if (config->base_table_length < sizeof(mp_config_table)) {
153 		TRACE(("smp: config table length %" B_PRIu16
154 			" too short for structure, aborting\n",
155 			config->base_table_length));
156 		return B_ERROR;
157 	}
158 
159 	// print our new found configuration.
160 	TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem,
161 		config->product));
162 	TRACE(("smp: base table has %d entries, extended section %d bytes\n",
163 		config->num_base_entries, config->ext_length));
164 
165 	gKernelArgs.arch_args.apic_phys = (uint32)config->apic;
166 	if ((gKernelArgs.arch_args.apic_phys % 4096) != 0) {
167 		// MP specs mandate a 4K alignment for the local APIC(s)
168 		TRACE(("smp: local apic %p has bad alignment, aborting\n",
169 			(void *)gKernelArgs.arch_args.apic_phys));
170 		return B_ERROR;
171 	}
172 
173 	char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table));
174 	for (int32 i = 0; i < config->num_base_entries; i++) {
175 		switch (*pointer) {
176 			case MP_BASE_PROCESSOR:
177 			{
178 				struct mp_base_processor *processor
179 					= (struct mp_base_processor *)pointer;
180 				pointer += sizeof(struct mp_base_processor);
181 
182 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
183 					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
184 						MAX_BOOT_CPUS));
185 					continue;
186 				}
187 
188 				// skip if the processor is not enabled.
189 				if (!(processor->cpu_flags & 0x1)) {
190 					TRACE(("smp: skip apic id %d: disabled\n",
191 						processor->apic_id));
192 					continue;
193 				}
194 
195 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
196 					= processor->apic_id;
197 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
198 					= processor->apic_version;
199 
200 #ifdef TRACE_SMP
201 				const char *cpuFamily[] = { "", "", "", "", "Intel 486",
202 					"Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" };
203 #endif
204 				TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n",
205 					gKernelArgs.num_cpus,
206 					cpuFamily[(processor->signature & 0xf00) >> 8],
207 					processor->apic_id, processor->apic_version,
208 					(processor->cpu_flags & 0x2) ? ", BSP" : ""));
209 
210 				gKernelArgs.num_cpus++;
211 				break;
212 			}
213 			case MP_BASE_BUS:
214 			{
215 				struct mp_base_bus *bus = (struct mp_base_bus *)pointer;
216 				pointer += sizeof(struct mp_base_bus);
217 
218 				TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id,
219 					bus->name[0], bus->name[1], bus->name[2], bus->name[3],
220 					bus->name[4], bus->name[5]));
221 
222 				break;
223 			}
224 			case MP_BASE_IO_APIC:
225 			{
226 				struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer;
227 				pointer += sizeof(struct mp_base_ioapic);
228 
229 				if (gKernelArgs.arch_args.ioapic_phys == 0) {
230 					gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr;
231 					if (gKernelArgs.arch_args.ioapic_phys % 1024) {
232 						// MP specs mandate a 1K alignment for the IO-APICs
233 						TRACE(("smp: io apic %p has bad alignment, aborting\n",
234 							(void *)gKernelArgs.arch_args.ioapic_phys));
235 						return B_ERROR;
236 					}
237 				}
238 
239 				TRACE(("smp: found io apic with apic id %d, version %d\n",
240 					io->ioapic_id, io->ioapic_version));
241 
242 				break;
243 			}
244 			case MP_BASE_IO_INTR:
245 			case MP_BASE_LOCAL_INTR:
246 			{
247 				struct mp_base_interrupt *interrupt
248 					= (struct mp_base_interrupt *)pointer;
249 				pointer += sizeof(struct mp_base_interrupt);
250 
251 				dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest "
252 					"apic %d, int %3d, polarity %d, trigger mode %d\n",
253 					interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local",
254 					interrupt->interrupt_type, interrupt->source_bus_id,
255 					interrupt->source_bus_irq, interrupt->dest_apic_id,
256 					interrupt->dest_apic_int, interrupt->polarity,
257 					interrupt->trigger_mode);
258 				break;
259 			}
260 		}
261 	}
262 
263 	if (gKernelArgs.num_cpus == 0) {
264 		TRACE(("smp: didn't find any processors, aborting\n"));
265 		return B_ERROR;
266 	}
267 
268 	dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n",
269 		(void *)gKernelArgs.arch_args.apic_phys,
270 		(void *)gKernelArgs.arch_args.ioapic_phys,
271 		gKernelArgs.num_cpus);
272 
273 	return B_OK;
274 }
275 
276 
277 static status_t
278 smp_do_acpi_config(void)
279 {
280 	TRACE(("smp: using ACPI to detect MP configuration\n"));
281 
282 	// reset CPU count
283 	gKernelArgs.num_cpus = 0;
284 
285 	acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE);
286 
287 	if (madt == NULL) {
288 		TRACE(("smp: Failed to find MADT!\n"));
289 		return B_ERROR;
290 	}
291 
292 	gKernelArgs.arch_args.apic_phys = madt->local_apic_address;
293 	TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address));
294 
295 	acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt));
296 	acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length);
297 	while (apic < end) {
298 		switch (apic->type) {
299 			case ACPI_MADT_LOCAL_APIC:
300 			{
301 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
302 					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
303 						MAX_BOOT_CPUS));
304 					break;
305 				}
306 
307 				acpi_local_apic *localApic = (acpi_local_apic *)apic;
308 				TRACE(("smp: found local APIC with id %u\n",
309 					localApic->apic_id));
310 				if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) {
311 					TRACE(("smp: APIC is disabled and will not be used\n"));
312 					break;
313 				}
314 
315 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
316 					= localApic->apic_id;
317 				// TODO: how to find out? putting 0x10 in to indicate a local apic
318 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
319 					= 0x10;
320 				gKernelArgs.num_cpus++;
321 				break;
322 			}
323 
324 			case ACPI_MADT_IO_APIC: {
325 				acpi_io_apic *ioApic = (acpi_io_apic *)apic;
326 				TRACE(("smp: found io APIC with id %u and address 0x%lx\n",
327 					ioApic->io_apic_id, ioApic->io_apic_address));
328 				if (gKernelArgs.arch_args.ioapic_phys == 0)
329 					gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address;
330 				break;
331 			}
332 			default:
333 				break;
334 		}
335 
336 		apic = (acpi_apic *)((uint8 *)apic + apic->length);
337 	}
338 
339 	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
340 }
341 
342 
343 /*!	Target function of the trampoline code.
344 	The trampoline code should have the pgdir and a gdt set up for us,
345 	along with us being on the final stack for this processor. We need
346 	to set up the local APIC and load the global idt and gdt. When we're
347 	done, we'll jump into the kernel with the cpu number as an argument.
348 */
349 static int
350 smp_cpu_ready(void)
351 {
352 	uint32 curr_cpu = smp_get_current_cpu();
353 	struct gdt_idt_descr idt_descr;
354 	struct gdt_idt_descr gdt_descr;
355 
356 	//TRACE(("smp_cpu_ready: entry cpu %ld\n", curr_cpu));
357 
358 	// Important.  Make sure supervisor threads can fault on read only pages...
359 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
360 	asm("cld");
361 	asm("fninit");
362 
363 	// Set up the final idt
364 	idt_descr.a = IDT_LIMIT - 1;
365 	idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt;
366 
367 	asm("lidt	%0;"
368 		: : "m" (idt_descr));
369 
370 	// Set up the final gdt
371 	gdt_descr.a = GDT_LIMIT - 1;
372 	gdt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_gdt;
373 
374 	asm("lgdt	%0;"
375 		: : "m" (gdt_descr));
376 
377 	asm("pushl  %0; "					// push the cpu number
378 		"pushl 	%1;	"					// kernel args
379 		"pushl 	$0x0;"					// dummy retval for call to main
380 		"pushl 	%2;	"					// this is the start address
381 		"ret;		"					// jump.
382 		: : "g" (curr_cpu), "g" (&gKernelArgs),
383 			"g" (gKernelArgs.kernel_image.elf_header.e_entry));
384 
385 	// no where to return to
386 	return 0;
387 }
388 
389 
390 static void
391 calculate_apic_timer_conversion_factor(void)
392 {
393 	int64 t1, t2;
394 	uint32 config;
395 	uint32 count;
396 
397 	// setup the timer
398 	config = apic_read(APIC_LVT_TIMER);
399 	config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED;
400 		// timer masked, vector 0
401 	apic_write(APIC_LVT_TIMER, config);
402 
403 	config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f);
404 	apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1);
405 		// divide clock by one
406 
407 	t1 = system_time();
408 	apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
409 
410 	execute_n_instructions(128 * 20000);
411 
412 	count = apic_read(APIC_CURRENT_TIMER_COUNT);
413 	t2 = system_time();
414 
415 	count = 0xffffffff - count;
416 
417 	gKernelArgs.arch_args.apic_time_cv_factor
418 		= (uint32)((1000000.0/(t2 - t1)) * count);
419 
420 	TRACE(("APIC ticks/sec = %ld\n",
421 		gKernelArgs.arch_args.apic_time_cv_factor));
422 }
423 
424 
425 //	#pragma mark -
426 
427 
428 void
429 smp_init_other_cpus(void)
430 {
431 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
432 		// SMP has been disabled!
433 		TRACE(("smp disabled per safemode setting\n"));
434 		gKernelArgs.num_cpus = 1;
435 	}
436 
437 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) {
438 		TRACE(("local apic disabled per safemode setting, disabling smp\n"));
439 		gKernelArgs.arch_args.apic_phys = 0;
440 		gKernelArgs.num_cpus = 1;
441 	}
442 
443 	if (gKernelArgs.arch_args.apic_phys == 0)
444 		return;
445 
446 	TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus,
447 		gKernelArgs.num_cpus != 1 ? "s" : ""));
448 	TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys));
449 	TRACE(("smp: ioapic_phys = %p\n",
450 		(void *)gKernelArgs.arch_args.ioapic_phys));
451 
452 	// map in the apic
453 	gKernelArgs.arch_args.apic = (uint32 *)mmu_map_physical_memory(
454 		gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags);
455 
456 	TRACE(("smp: apic (mapped) = %p\n", gKernelArgs.arch_args.apic));
457 
458 	// calculate how fast the apic timer is
459 	calculate_apic_timer_conversion_factor();
460 
461 	if (gKernelArgs.num_cpus < 2)
462 		return;
463 
464 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
465 		// create a final stack the trampoline code will put the ap processor on
466 		gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL,
467 			KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
468 		gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE
469 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
470 	}
471 }
472 
473 
474 void
475 smp_boot_other_cpus(void)
476 {
477 	if (gKernelArgs.num_cpus < 2)
478 		return;
479 
480 	TRACE(("trampolining other cpus\n"));
481 
482 	// The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved
483 	// for this, or when PXE services are used 0x8b000-0x8cfff.
484 
485 	// allocate a stack and a code area for the smp trampoline
486 	// (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS,
487 	// and when PXE services are used, the 0x8d000-0x9ffff is also reserved)
488 #ifdef _PXE_ENV
489 	uint32 trampolineCode = 0x8b000;
490 	uint32 trampolineStack = 0x8c000;
491 #else
492 	uint32 trampolineCode = 0x9f000;
493 	uint32 trampolineStack = 0x9e000;
494 #endif
495 
496 	// copy the trampoline code over
497 	memcpy((char *)trampolineCode, (const void*)&smp_trampoline,
498 		(uint32)&smp_trampoline_end - (uint32)&smp_trampoline);
499 
500 	// boot the cpus
501 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
502 		uint32 *finalStack;
503 		uint32 *tempStack;
504 		uint32 config;
505 		uint32 numStartups;
506 		uint32 j;
507 
508 		// set this stack up
509 		finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start;
510 		memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0,
511 			KERNEL_STACK_SIZE);
512 		tempStack = (finalStack
513 			+ (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
514 				/ sizeof(uint32)) - 1;
515 		*tempStack = (uint32)&smp_cpu_ready;
516 
517 		// set the trampoline stack up
518 		tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4);
519 		// final location of the stack
520 		*tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE
521 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32);
522 		tempStack--;
523 		// page dir
524 		*tempStack = gKernelArgs.arch_args.phys_pgdir;
525 
526 		// put a gdt descriptor at the bottom of the stack
527 		*((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT
528 		*((uint32 *)(trampolineStack + 2)) = trampolineStack + 8;
529 
530 		// put the gdt at the bottom
531 		memcpy(&((uint32 *)trampolineStack)[2],
532 			(void *)gKernelArgs.arch_args.vir_gdt, 6 * 4);
533 
534 		/* clear apic errors */
535 		if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) {
536 			apic_write(APIC_ERROR_STATUS, 0);
537 			apic_read(APIC_ERROR_STATUS);
538 		}
539 
540 //dprintf("assert INIT\n");
541 		/* send (aka assert) INIT IPI */
542 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
543 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
544 		apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */
545 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
546 			| APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT
547 			| APIC_DELIVERY_MODE_INIT;
548 		apic_write(APIC_INTR_COMMAND_1, config);
549 
550 dprintf("wait for delivery\n");
551 		// wait for pending to end
552 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
553 			asm volatile ("pause;");
554 
555 dprintf("deassert INIT\n");
556 		/* deassert INIT */
557 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
558 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
559 		apic_write(APIC_INTR_COMMAND_2, config);
560 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
561 			| APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT;
562 		apic_write(APIC_INTR_COMMAND_1, config);
563 
564 dprintf("wait for delivery\n");
565 		// wait for pending to end
566 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
567 			asm volatile ("pause;");
568 
569 		/* wait 10ms */
570 		spin(10000);
571 
572 		/* is this a local apic or an 82489dx ? */
573 		numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0)
574 			? 2 : 0;
575 dprintf("num startups = %ld\n", numStartups);
576 		for (j = 0; j < numStartups; j++) {
577 			/* it's a local apic, so send STARTUP IPIs */
578 dprintf("send STARTUP\n");
579 			apic_write(APIC_ERROR_STATUS, 0);
580 
581 			/* set target pe */
582 			config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
583 				| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
584 			apic_write(APIC_INTR_COMMAND_2, config);
585 
586 			/* send the IPI */
587 			config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800)
588 				| APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12);
589 			apic_write(APIC_INTR_COMMAND_1, config);
590 
591 			/* wait */
592 			spin(200);
593 
594 dprintf("wait for delivery\n");
595 			while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
596 				asm volatile ("pause;");
597 		}
598 
599 		// Wait for the trampoline code to clear the final stack location.
600 		// This serves as a notification for us that it has loaded the address
601 		// and it is safe for us to overwrite it to trampoline the next CPU.
602 		tempStack++;
603 		while (*tempStack != 0)
604 			spin(1000);
605 	}
606 
607 	TRACE(("done trampolining\n"));
608 }
609 
610 
611 void
612 smp_add_safemode_menus(Menu *menu)
613 {
614 	MenuItem *item;
615 
616 	if (gKernelArgs.arch_args.ioapic_phys != 0) {
617 		menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC"));
618 		item->SetType(MENU_ITEM_MARKABLE);
619 		item->SetData(B_SAFEMODE_DISABLE_IOAPIC);
620 		item->SetHelpText("Disables using the IO APIC for interrupt routing, "
621 			"forcing the use of the legacy PIC instead.");
622 	}
623 
624 	if (gKernelArgs.arch_args.apic_phys != 0) {
625 		menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC"));
626 		item->SetType(MENU_ITEM_MARKABLE);
627 		item->SetData(B_SAFEMODE_DISABLE_APIC);
628 		item->SetHelpText("Disables using the local APIC, also disables SMP.");
629 	}
630 
631 	if (gKernelArgs.num_cpus < 2)
632 		return;
633 
634 	item = new(nothrow) MenuItem("Disable SMP");
635 	menu->AddItem(item);
636 	item->SetData(B_SAFEMODE_DISABLE_SMP);
637 	item->SetType(MENU_ITEM_MARKABLE);
638 	item->SetHelpText("Disables all but one CPU core.");
639 }
640 
641 
642 void
643 smp_init(void)
644 {
645 #if NO_SMP
646 	gKernelArgs.num_cpus = 1;
647 	return;
648 #endif
649 
650 	cpuid_info info;
651 	if (get_current_cpuid(&info, 1) != B_OK)
652 		return;
653 
654 	if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) {
655 		// Local APICs aren't present; As they form the basis for all inter CPU
656 		// communication and therefore SMP, we don't need to go any further.
657 		dprintf("no local APIC present, not attempting SMP init\n");
658 		return;
659 	}
660 
661 	// first try to find ACPI tables to get MP configuration as it handles
662 	// physical as well as logical MP configurations as in multiple cpus,
663 	// multiple cores or hyper threading.
664 	if (smp_do_acpi_config() == B_OK)
665 		return;
666 
667 	// then try to find MPS tables and do configuration based on them
668 	for (int32 i = 0; smp_scan_spots[i].length > 0; i++) {
669 		mp_floating_struct *floatingStruct = smp_mp_probe(
670 			smp_scan_spots[i].start, smp_scan_spots[i].stop);
671 		if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK)
672 			return;
673 	}
674 
675 	// Everything failed or we are not running an SMP system, reset anything
676 	// that might have been set through an incomplete configuration attempt.
677 	gKernelArgs.arch_args.apic_phys = 0;
678 	gKernelArgs.arch_args.ioapic_phys = 0;
679 	gKernelArgs.num_cpus = 1;
680 }
681