xref: /haiku/src/system/boot/platform/bios_ia32/smp.cpp (revision 7a74a5df454197933bc6e80a542102362ee98703)
1 /*
2  * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved.
3  * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de.
4  * Distributed under the terms of the MIT License.
5  *
6  * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7  * Distributed under the terms of the NewOS License.
8 */
9 
10 
11 #include "smp.h"
12 
13 #include <string.h>
14 
15 #include <KernelExport.h>
16 
17 #include <kernel.h>
18 #include <safemode.h>
19 #include <boot/stage2.h>
20 #include <boot/menu.h>
21 #include <arch/x86/apic.h>
22 #include <arch/x86/arch_acpi.h>
23 #include <arch/x86/arch_smp.h>
24 #include <arch/x86/arch_system_info.h>
25 
26 #include "mmu.h"
27 #include "acpi.h"
28 #include "hpet.h"
29 
30 
31 #define NO_SMP 0
32 
33 #define TRACE_SMP
34 #ifdef TRACE_SMP
35 #	define TRACE(x) dprintf x
36 #else
37 #	define TRACE(x) ;
38 #endif
39 
40 struct gdt_idt_descr {
41 	uint16 a;
42 	uint32 *b;
43 } _PACKED;
44 
45 static struct scan_spots_struct smp_scan_spots[] = {
46 	{ 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 },
47 	{ 0xf0000, 0x100000, 0x100000 - 0xf0000 },
48 	{ 0, 0, 0 }
49 };
50 
51 extern "C" void execute_n_instructions(int count);
52 
53 extern "C" void smp_trampoline(void);
54 extern "C" void smp_trampoline_end(void);
55 
56 
57 static int smp_get_current_cpu(void);
58 
59 
60 static uint32
61 apic_read(uint32 offset)
62 {
63 	return *(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset);
64 }
65 
66 
67 static void
68 apic_write(uint32 offset, uint32 data)
69 {
70 	*(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset) = data;
71 }
72 
73 
74 static int
75 smp_get_current_cpu(void)
76 {
77 	if (gKernelArgs.arch_args.apic == NULL)
78 		return 0;
79 
80 	uint8 apicID = apic_read(APIC_ID) >> 24;
81 	for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
82 		if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID)
83 			return i;
84 	}
85 
86 	return 0;
87 }
88 
89 
90 static mp_floating_struct *
91 smp_mp_probe(uint32 base, uint32 limit)
92 {
93 	TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit));
94 	for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) {
95 		if (*pointer == MP_FLOATING_SIGNATURE) {
96 			TRACE(("smp_mp_probe: found floating pointer structure at %p\n",
97 				pointer));
98 			return (mp_floating_struct *)pointer;
99 		}
100 	}
101 
102 	return NULL;
103 }
104 
105 
106 static status_t
107 smp_do_mp_config(mp_floating_struct *floatingStruct)
108 {
109 	if (floatingStruct->config_length != 1) {
110 		TRACE(("smp: unsupported structure length of %" B_PRIu8 " units\n",
111 			floatingStruct->config_length));
112 		return B_UNSUPPORTED;
113 	}
114 
115 	TRACE(("smp: intel mp version %s, %s",
116 		(floatingStruct->spec_revision == 1) ? "1.1" : "1.4",
117 		(floatingStruct->mp_feature_2 & 0x80)
118 			? "imcr and pic compatibility mode.\n"
119 			: "virtual wire compatibility mode.\n"));
120 
121 	if (floatingStruct->config_table == NULL) {
122 #if 1
123 		// TODO: need to implement
124 		TRACE(("smp: standard configuration %d unimplemented\n",
125 			floatingStruct->mp_feature_1));
126 		gKernelArgs.num_cpus = 1;
127 		return B_OK;
128 #else
129 		// this system conforms to one of the default configurations
130 		TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1));
131 		gKernelArgs.num_cpus = 2;
132 		gKernelArgs.cpu_apic_id[0] = 0;
133 		gKernelArgs.cpu_apic_id[1] = 1;
134 		apic_phys = (unsigned int *)0xfee00000;
135 		ioapic_phys = (unsigned int *)0xfec00000;
136 		dprintf("smp: WARNING: standard configuration code is untested");
137 		return B_OK;
138 #endif
139 	}
140 
141 	// We are not running in standard configuration, so we have to look through
142 	// all of the mp configuration table crap to figure out how many processors
143 	// we have, where our apics are, etc.
144 
145 	mp_config_table *config = floatingStruct->config_table;
146 	gKernelArgs.num_cpus = 0;
147 
148 	if (config->signature != MP_CONFIG_TABLE_SIGNATURE) {
149 		TRACE(("smp: invalid config table signature, aborting\n"));
150 		return B_ERROR;
151 	}
152 
153 	if (config->base_table_length < sizeof(mp_config_table)) {
154 		TRACE(("smp: config table length %" B_PRIu16
155 			" too short for structure, aborting\n",
156 			config->base_table_length));
157 		return B_ERROR;
158 	}
159 
160 	// print our new found configuration.
161 	TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem,
162 		config->product));
163 	TRACE(("smp: base table has %d entries, extended section %d bytes\n",
164 		config->num_base_entries, config->ext_length));
165 
166 	gKernelArgs.arch_args.apic_phys = (uint32)config->apic;
167 	if ((gKernelArgs.arch_args.apic_phys % 4096) != 0) {
168 		// MP specs mandate a 4K alignment for the local APIC(s)
169 		TRACE(("smp: local apic %p has bad alignment, aborting\n",
170 			(void *)gKernelArgs.arch_args.apic_phys));
171 		return B_ERROR;
172 	}
173 
174 	char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table));
175 	for (int32 i = 0; i < config->num_base_entries; i++) {
176 		switch (*pointer) {
177 			case MP_BASE_PROCESSOR:
178 			{
179 				struct mp_base_processor *processor
180 					= (struct mp_base_processor *)pointer;
181 				pointer += sizeof(struct mp_base_processor);
182 
183 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
184 					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
185 						MAX_BOOT_CPUS));
186 					continue;
187 				}
188 
189 				// skip if the processor is not enabled.
190 				if (!(processor->cpu_flags & 0x1)) {
191 					TRACE(("smp: skip apic id %d: disabled\n",
192 						processor->apic_id));
193 					continue;
194 				}
195 
196 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
197 					= processor->apic_id;
198 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
199 					= processor->apic_version;
200 
201 #ifdef TRACE_SMP
202 				const char *cpuFamily[] = { "", "", "", "", "Intel 486",
203 					"Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" };
204 #endif
205 				TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n",
206 					gKernelArgs.num_cpus,
207 					cpuFamily[(processor->signature & 0xf00) >> 8],
208 					processor->apic_id, processor->apic_version,
209 					(processor->cpu_flags & 0x2) ? ", BSP" : ""));
210 
211 				gKernelArgs.num_cpus++;
212 				break;
213 			}
214 			case MP_BASE_BUS:
215 			{
216 				struct mp_base_bus *bus = (struct mp_base_bus *)pointer;
217 				pointer += sizeof(struct mp_base_bus);
218 
219 				TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id,
220 					bus->name[0], bus->name[1], bus->name[2], bus->name[3],
221 					bus->name[4], bus->name[5]));
222 
223 				break;
224 			}
225 			case MP_BASE_IO_APIC:
226 			{
227 				struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer;
228 				pointer += sizeof(struct mp_base_ioapic);
229 
230 				if (gKernelArgs.arch_args.ioapic_phys == 0) {
231 					gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr;
232 					if (gKernelArgs.arch_args.ioapic_phys % 1024) {
233 						// MP specs mandate a 1K alignment for the IO-APICs
234 						TRACE(("smp: io apic %p has bad alignment, aborting\n",
235 							(void *)gKernelArgs.arch_args.ioapic_phys));
236 						return B_ERROR;
237 					}
238 				}
239 
240 				TRACE(("smp: found io apic with apic id %d, version %d\n",
241 					io->ioapic_id, io->ioapic_version));
242 
243 				break;
244 			}
245 			case MP_BASE_IO_INTR:
246 			case MP_BASE_LOCAL_INTR:
247 			{
248 				struct mp_base_interrupt *interrupt
249 					= (struct mp_base_interrupt *)pointer;
250 				pointer += sizeof(struct mp_base_interrupt);
251 
252 				dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest "
253 					"apic %d, int %3d, polarity %d, trigger mode %d\n",
254 					interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local",
255 					interrupt->interrupt_type, interrupt->source_bus_id,
256 					interrupt->source_bus_irq, interrupt->dest_apic_id,
257 					interrupt->dest_apic_int, interrupt->polarity,
258 					interrupt->trigger_mode);
259 				break;
260 			}
261 		}
262 	}
263 
264 	if (gKernelArgs.num_cpus == 0) {
265 		TRACE(("smp: didn't find any processors, aborting\n"));
266 		return B_ERROR;
267 	}
268 
269 	dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n",
270 		(void *)gKernelArgs.arch_args.apic_phys,
271 		(void *)gKernelArgs.arch_args.ioapic_phys,
272 		gKernelArgs.num_cpus);
273 
274 	return B_OK;
275 }
276 
277 
278 static status_t
279 smp_do_acpi_config(void)
280 {
281 	TRACE(("smp: using ACPI to detect MP configuration\n"));
282 
283 	// reset CPU count
284 	gKernelArgs.num_cpus = 0;
285 
286 	acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE);
287 
288 	if (madt == NULL) {
289 		TRACE(("smp: Failed to find MADT!\n"));
290 		return B_ERROR;
291 	}
292 
293 	gKernelArgs.arch_args.apic_phys = madt->local_apic_address;
294 	TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address));
295 
296 	acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt));
297 	acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length);
298 	while (apic < end) {
299 		switch (apic->type) {
300 			case ACPI_MADT_LOCAL_APIC:
301 			{
302 				if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) {
303 					TRACE(("smp: already reached maximum boot CPUs (%d)\n",
304 						MAX_BOOT_CPUS));
305 					break;
306 				}
307 
308 				acpi_local_apic *localApic = (acpi_local_apic *)apic;
309 				TRACE(("smp: found local APIC with id %u\n",
310 					localApic->apic_id));
311 				if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) {
312 					TRACE(("smp: APIC is disabled and will not be used\n"));
313 					break;
314 				}
315 
316 				gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus]
317 					= localApic->apic_id;
318 				// TODO: how to find out? putting 0x10 in to indicate a local apic
319 				gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus]
320 					= 0x10;
321 				gKernelArgs.num_cpus++;
322 				break;
323 			}
324 
325 			case ACPI_MADT_IO_APIC: {
326 				acpi_io_apic *ioApic = (acpi_io_apic *)apic;
327 				TRACE(("smp: found io APIC with id %u and address 0x%lx\n",
328 					ioApic->io_apic_id, ioApic->io_apic_address));
329 				if (gKernelArgs.arch_args.ioapic_phys == 0)
330 					gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address;
331 				break;
332 			}
333 			default:
334 				break;
335 		}
336 
337 		apic = (acpi_apic *)((uint8 *)apic + apic->length);
338 	}
339 
340 	return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR;
341 }
342 
343 
344 /*!	Target function of the trampoline code.
345 	The trampoline code should have the pgdir and a gdt set up for us,
346 	along with us being on the final stack for this processor. We need
347 	to set up the local APIC and load the global idt and gdt. When we're
348 	done, we'll jump into the kernel with the cpu number as an argument.
349 */
350 static int
351 smp_cpu_ready(void)
352 {
353 	uint32 curr_cpu = smp_get_current_cpu();
354 	struct gdt_idt_descr idt_descr;
355 	struct gdt_idt_descr gdt_descr;
356 
357 	//TRACE(("smp_cpu_ready: entry cpu %ld\n", curr_cpu));
358 
359 	// Important.  Make sure supervisor threads can fault on read only pages...
360 	asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
361 	asm("cld");
362 	asm("fninit");
363 
364 	// Set up the final idt
365 	idt_descr.a = IDT_LIMIT - 1;
366 	idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt;
367 
368 	asm("lidt	%0;"
369 		: : "m" (idt_descr));
370 
371 	// Set up the final gdt
372 	gdt_descr.a = GDT_LIMIT - 1;
373 	gdt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_gdt;
374 
375 	asm("lgdt	%0;"
376 		: : "m" (gdt_descr));
377 
378 	asm("pushl  %0; "					// push the cpu number
379 		"pushl 	%1;	"					// kernel args
380 		"pushl 	$0x0;"					// dummy retval for call to main
381 		"pushl 	%2;	"					// this is the start address
382 		"ret;		"					// jump.
383 		: : "g" (curr_cpu), "g" (&gKernelArgs),
384 			"g" (gKernelArgs.kernel_image.elf_header.e_entry));
385 
386 	// no where to return to
387 	return 0;
388 }
389 
390 
391 static void
392 calculate_apic_timer_conversion_factor(void)
393 {
394 	int64 t1, t2;
395 	uint32 config;
396 	uint32 count;
397 
398 	// setup the timer
399 	config = apic_read(APIC_LVT_TIMER);
400 	config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED;
401 		// timer masked, vector 0
402 	apic_write(APIC_LVT_TIMER, config);
403 
404 	config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f);
405 	apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1);
406 		// divide clock by one
407 
408 	t1 = system_time();
409 	apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter
410 
411 	execute_n_instructions(128 * 20000);
412 
413 	count = apic_read(APIC_CURRENT_TIMER_COUNT);
414 	t2 = system_time();
415 
416 	count = 0xffffffff - count;
417 
418 	gKernelArgs.arch_args.apic_time_cv_factor
419 		= (uint32)((1000000.0/(t2 - t1)) * count);
420 
421 	TRACE(("APIC ticks/sec = %ld\n",
422 		gKernelArgs.arch_args.apic_time_cv_factor));
423 }
424 
425 
426 //	#pragma mark -
427 
428 
429 void
430 smp_init_other_cpus(void)
431 {
432 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) {
433 		// SMP has been disabled!
434 		TRACE(("smp disabled per safemode setting\n"));
435 		gKernelArgs.num_cpus = 1;
436 	}
437 
438 	if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) {
439 		TRACE(("local apic disabled per safemode setting, disabling smp\n"));
440 		gKernelArgs.arch_args.apic_phys = 0;
441 		gKernelArgs.num_cpus = 1;
442 	}
443 
444 	if (gKernelArgs.arch_args.apic_phys == 0)
445 		return;
446 
447 	TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus,
448 		gKernelArgs.num_cpus != 1 ? "s" : ""));
449 	TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys));
450 	TRACE(("smp: ioapic_phys = %p\n",
451 		(void *)gKernelArgs.arch_args.ioapic_phys));
452 
453 	// map in the apic
454 	gKernelArgs.arch_args.apic = (uint32 *)mmu_map_physical_memory(
455 		gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags);
456 
457 	TRACE(("smp: apic (mapped) = %p\n", gKernelArgs.arch_args.apic));
458 
459 	// calculate how fast the apic timer is
460 	calculate_apic_timer_conversion_factor();
461 
462 	if (gKernelArgs.num_cpus < 2)
463 		return;
464 
465 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
466 		// create a final stack the trampoline code will put the ap processor on
467 		gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL,
468 			KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
469 		gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE
470 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
471 	}
472 }
473 
474 
475 void
476 smp_boot_other_cpus(void)
477 {
478 	if (gKernelArgs.num_cpus < 2)
479 		return;
480 
481 	TRACE(("trampolining other cpus\n"));
482 
483 	// The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved
484 	// for this, or when PXE services are used 0x8b000-0x8cfff.
485 
486 	// allocate a stack and a code area for the smp trampoline
487 	// (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS,
488 	// and when PXE services are used, the 0x8d000-0x9ffff is also reserved)
489 #ifdef _PXE_ENV
490 	uint32 trampolineCode = 0x8b000;
491 	uint32 trampolineStack = 0x8c000;
492 #else
493 	uint32 trampolineCode = 0x9f000;
494 	uint32 trampolineStack = 0x9e000;
495 #endif
496 
497 	// copy the trampoline code over
498 	memcpy((char *)trampolineCode, (const void*)&smp_trampoline,
499 		(uint32)&smp_trampoline_end - (uint32)&smp_trampoline);
500 
501 	// boot the cpus
502 	for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) {
503 		uint32 *finalStack;
504 		uint32 *tempStack;
505 		uint32 config;
506 		uint32 numStartups;
507 		uint32 j;
508 
509 		// set this stack up
510 		finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start;
511 		memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0,
512 			KERNEL_STACK_SIZE);
513 		tempStack = (finalStack
514 			+ (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
515 				/ sizeof(uint32)) - 1;
516 		*tempStack = (uint32)&smp_cpu_ready;
517 
518 		// set the trampoline stack up
519 		tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4);
520 		// final location of the stack
521 		*tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE
522 			+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32);
523 		tempStack--;
524 		// page dir
525 		*tempStack = gKernelArgs.arch_args.phys_pgdir;
526 
527 		// put a gdt descriptor at the bottom of the stack
528 		*((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT
529 		*((uint32 *)(trampolineStack + 2)) = trampolineStack + 8;
530 
531 		// put the gdt at the bottom
532 		memcpy(&((uint32 *)trampolineStack)[2],
533 			(void *)gKernelArgs.arch_args.vir_gdt, 6 * 4);
534 
535 		/* clear apic errors */
536 		if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) {
537 			apic_write(APIC_ERROR_STATUS, 0);
538 			apic_read(APIC_ERROR_STATUS);
539 		}
540 
541 //dprintf("assert INIT\n");
542 		/* send (aka assert) INIT IPI */
543 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
544 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
545 		apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */
546 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
547 			| APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT
548 			| APIC_DELIVERY_MODE_INIT;
549 		apic_write(APIC_INTR_COMMAND_1, config);
550 
551 dprintf("wait for delivery\n");
552 		// wait for pending to end
553 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
554 			asm volatile ("pause;");
555 
556 dprintf("deassert INIT\n");
557 		/* deassert INIT */
558 		config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
559 			| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
560 		apic_write(APIC_INTR_COMMAND_2, config);
561 		config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000)
562 			| APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT;
563 		apic_write(APIC_INTR_COMMAND_1, config);
564 
565 dprintf("wait for delivery\n");
566 		// wait for pending to end
567 		while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
568 			asm volatile ("pause;");
569 
570 		/* wait 10ms */
571 		spin(10000);
572 
573 		/* is this a local apic or an 82489dx ? */
574 		numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0)
575 			? 2 : 0;
576 dprintf("num startups = %ld\n", numStartups);
577 		for (j = 0; j < numStartups; j++) {
578 			/* it's a local apic, so send STARTUP IPIs */
579 dprintf("send STARTUP\n");
580 			apic_write(APIC_ERROR_STATUS, 0);
581 
582 			/* set target pe */
583 			config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK)
584 				| (gKernelArgs.arch_args.cpu_apic_id[i] << 24);
585 			apic_write(APIC_INTR_COMMAND_2, config);
586 
587 			/* send the IPI */
588 			config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800)
589 				| APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12);
590 			apic_write(APIC_INTR_COMMAND_1, config);
591 
592 			/* wait */
593 			spin(200);
594 
595 dprintf("wait for delivery\n");
596 			while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0)
597 				asm volatile ("pause;");
598 		}
599 
600 		// Wait for the trampoline code to clear the final stack location.
601 		// This serves as a notification for us that it has loaded the address
602 		// and it is safe for us to overwrite it to trampoline the next CPU.
603 		tempStack++;
604 		while (*tempStack != 0)
605 			spin(1000);
606 	}
607 
608 	TRACE(("done trampolining\n"));
609 }
610 
611 
612 void
613 smp_add_safemode_menus(Menu *menu)
614 {
615 	MenuItem *item;
616 
617 	if (gKernelArgs.arch_args.ioapic_phys != 0) {
618 		menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC"));
619 		item->SetType(MENU_ITEM_MARKABLE);
620 		item->SetData(B_SAFEMODE_DISABLE_IOAPIC);
621 		item->SetHelpText("Disables using the IO APIC for interrupt routing, "
622 			"forcing the use of the legacy PIC instead.");
623 	}
624 
625 	if (gKernelArgs.arch_args.apic_phys != 0) {
626 		menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC"));
627 		item->SetType(MENU_ITEM_MARKABLE);
628 		item->SetData(B_SAFEMODE_DISABLE_APIC);
629 		item->SetHelpText("Disables using the local APIC, also disables SMP.");
630 	}
631 
632 	if (gKernelArgs.num_cpus < 2)
633 		return;
634 
635 	item = new(nothrow) MenuItem("Disable SMP");
636 	menu->AddItem(item);
637 	item->SetData(B_SAFEMODE_DISABLE_SMP);
638 	item->SetType(MENU_ITEM_MARKABLE);
639 	item->SetHelpText("Disables all but one CPU core.");
640 }
641 
642 
643 void
644 smp_init(void)
645 {
646 #if NO_SMP
647 	gKernelArgs.num_cpus = 1;
648 	return;
649 #endif
650 
651 	cpuid_info info;
652 	if (get_current_cpuid(&info, 1) != B_OK)
653 		return;
654 
655 	if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) {
656 		// Local APICs aren't present; As they form the basis for all inter CPU
657 		// communication and therefore SMP, we don't need to go any further.
658 		dprintf("no local APIC present, not attempting SMP init\n");
659 		return;
660 	}
661 
662 	// first try to find ACPI tables to get MP configuration as it handles
663 	// physical as well as logical MP configurations as in multiple cpus,
664 	// multiple cores or hyper threading.
665 	if (smp_do_acpi_config() == B_OK)
666 		return;
667 
668 	// then try to find MPS tables and do configuration based on them
669 	for (int32 i = 0; smp_scan_spots[i].length > 0; i++) {
670 		mp_floating_struct *floatingStruct = smp_mp_probe(
671 			smp_scan_spots[i].start, smp_scan_spots[i].stop);
672 		if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK)
673 			return;
674 	}
675 
676 	// Everything failed or we are not running an SMP system, reset anything
677 	// that might have been set through an incomplete configuration attempt.
678 	gKernelArgs.arch_args.apic_phys = 0;
679 	gKernelArgs.arch_args.ioapic_phys = 0;
680 	gKernelArgs.num_cpus = 1;
681 }
682