1 /* 2 * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved. 3 * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include "smp.h" 12 13 #include <string.h> 14 15 #include <KernelExport.h> 16 17 #include <kernel.h> 18 #include <safemode.h> 19 #include <boot/stage2.h> 20 #include <boot/menu.h> 21 #include <arch/x86/apic.h> 22 #include <arch/x86/arch_acpi.h> 23 #include <arch/x86/arch_smp.h> 24 #include <arch/x86/arch_system_info.h> 25 26 #include "mmu.h" 27 #include "acpi.h" 28 #include "hpet.h" 29 30 31 #define NO_SMP 0 32 33 #define TRACE_SMP 34 #ifdef TRACE_SMP 35 # define TRACE(x) dprintf x 36 #else 37 # define TRACE(x) ; 38 #endif 39 40 struct gdt_idt_descr { 41 uint16 a; 42 uint32 *b; 43 } _PACKED; 44 45 static struct scan_spots_struct smp_scan_spots[] = { 46 { 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 }, 47 { 0xf0000, 0x100000, 0x100000 - 0xf0000 }, 48 { 0, 0, 0 } 49 }; 50 51 extern "C" void execute_n_instructions(int count); 52 53 extern "C" void smp_trampoline(void); 54 extern "C" void smp_trampoline_end(void); 55 56 57 static int smp_get_current_cpu(void); 58 59 60 static uint32 61 apic_read(uint32 offset) 62 { 63 return *(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset); 64 } 65 66 67 static void 68 apic_write(uint32 offset, uint32 data) 69 { 70 *(volatile uint32 *)((uint32)gKernelArgs.arch_args.apic + offset) = data; 71 } 72 73 74 static int 75 smp_get_current_cpu(void) 76 { 77 if (gKernelArgs.arch_args.apic == NULL) 78 return 0; 79 80 uint8 apicID = apic_read(APIC_ID) >> 24; 81 for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) { 82 if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID) 83 return i; 84 } 85 86 return 0; 87 } 88 89 90 static mp_floating_struct * 91 smp_mp_probe(uint32 base, uint32 limit) 92 { 93 TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit)); 94 for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) { 95 if (*pointer == MP_FLOATING_SIGNATURE) { 96 TRACE(("smp_mp_probe: found floating pointer structure at %p\n", 97 pointer)); 98 return (mp_floating_struct *)pointer; 99 } 100 } 101 102 return NULL; 103 } 104 105 106 static status_t 107 smp_do_mp_config(mp_floating_struct *floatingStruct) 108 { 109 TRACE(("smp: intel mp version %s, %s", 110 (floatingStruct->spec_revision == 1) ? "1.1" : "1.4", 111 (floatingStruct->mp_feature_2 & 0x80) 112 ? "imcr and pic compatibility mode.\n" 113 : "virtual wire compatibility mode.\n")); 114 115 if (floatingStruct->config_table == NULL) { 116 #if 1 117 // TODO: need to implement 118 TRACE(("smp: standard configuration %d unimplemented\n", 119 floatingStruct->mp_feature_1)); 120 gKernelArgs.num_cpus = 1; 121 return B_OK; 122 #else 123 // this system conforms to one of the default configurations 124 TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1)); 125 gKernelArgs.num_cpus = 2; 126 gKernelArgs.cpu_apic_id[0] = 0; 127 gKernelArgs.cpu_apic_id[1] = 1; 128 apic_phys = (unsigned int *)0xfee00000; 129 ioapic_phys = (unsigned int *)0xfec00000; 130 dprintf("smp: WARNING: standard configuration code is untested"); 131 return B_OK; 132 #endif 133 } 134 135 // We are not running in standard configuration, so we have to look through 136 // all of the mp configuration table crap to figure out how many processors 137 // we have, where our apics are, etc. 138 139 mp_config_table *config = floatingStruct->config_table; 140 gKernelArgs.num_cpus = 0; 141 142 // print our new found configuration. 143 TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem, 144 config->product)); 145 TRACE(("smp: base table has %d entries, extended section %d bytes\n", 146 config->num_base_entries, config->ext_length)); 147 148 gKernelArgs.arch_args.apic_phys = (uint32)config->apic; 149 150 char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table)); 151 for (int32 i = 0; i < config->num_base_entries; i++) { 152 switch (*pointer) { 153 case MP_BASE_PROCESSOR: 154 { 155 struct mp_base_processor *processor 156 = (struct mp_base_processor *)pointer; 157 pointer += sizeof(struct mp_base_processor); 158 159 if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) { 160 TRACE(("smp: already reached maximum boot CPUs (%d)\n", 161 MAX_BOOT_CPUS)); 162 continue; 163 } 164 165 // skip if the processor is not enabled. 166 if (!(processor->cpu_flags & 0x1)) { 167 TRACE(("smp: skip apic id %d: disabled\n", 168 processor->apic_id)); 169 continue; 170 } 171 172 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 173 = processor->apic_id; 174 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 175 = processor->apic_version; 176 177 #ifdef TRACE_SMP 178 const char *cpuFamily[] = { "", "", "", "", "Intel 486", 179 "Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" }; 180 #endif 181 TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n", 182 gKernelArgs.num_cpus, 183 cpuFamily[(processor->signature & 0xf00) >> 8], 184 processor->apic_id, processor->apic_version, 185 (processor->cpu_flags & 0x2) ? ", BSP" : "")); 186 187 gKernelArgs.num_cpus++; 188 break; 189 } 190 case MP_BASE_BUS: 191 { 192 struct mp_base_bus *bus = (struct mp_base_bus *)pointer; 193 pointer += sizeof(struct mp_base_bus); 194 195 TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id, 196 bus->name[0], bus->name[1], bus->name[2], bus->name[3], 197 bus->name[4], bus->name[5])); 198 199 break; 200 } 201 case MP_BASE_IO_APIC: 202 { 203 struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer; 204 pointer += sizeof(struct mp_base_ioapic); 205 206 if (gKernelArgs.arch_args.ioapic_phys == 0) 207 gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr; 208 209 TRACE(("smp: found io apic with apic id %d, version %d\n", 210 io->ioapic_id, io->ioapic_version)); 211 212 break; 213 } 214 case MP_BASE_IO_INTR: 215 case MP_BASE_LOCAL_INTR: 216 { 217 struct mp_base_interrupt *interrupt 218 = (struct mp_base_interrupt *)pointer; 219 pointer += sizeof(struct mp_base_interrupt); 220 221 dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest " 222 "apic %d, int %3d, polarity %d, trigger mode %d\n", 223 interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local", 224 interrupt->interrupt_type, interrupt->source_bus_id, 225 interrupt->source_bus_irq, interrupt->dest_apic_id, 226 interrupt->dest_apic_int, interrupt->polarity, 227 interrupt->trigger_mode); 228 break; 229 } 230 } 231 } 232 233 dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n", 234 (void *)gKernelArgs.arch_args.apic_phys, 235 (void *)gKernelArgs.arch_args.ioapic_phys, 236 gKernelArgs.num_cpus); 237 238 return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR; 239 } 240 241 242 static status_t 243 smp_do_acpi_config(void) 244 { 245 TRACE(("smp: using ACPI to detect MP configuration\n")); 246 247 // reset CPU count 248 gKernelArgs.num_cpus = 0; 249 250 acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE); 251 252 if (madt == NULL) { 253 TRACE(("smp: Failed to find MADT!\n")); 254 return B_ERROR; 255 } 256 257 gKernelArgs.arch_args.apic_phys = madt->local_apic_address; 258 TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address)); 259 260 acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt)); 261 acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length); 262 while (apic < end) { 263 switch (apic->type) { 264 case ACPI_MADT_LOCAL_APIC: 265 { 266 if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) { 267 TRACE(("smp: already reached maximum boot CPUs (%d)\n", 268 MAX_BOOT_CPUS)); 269 break; 270 } 271 272 acpi_local_apic *localApic = (acpi_local_apic *)apic; 273 TRACE(("smp: found local APIC with id %u\n", 274 localApic->apic_id)); 275 if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) { 276 TRACE(("smp: APIC is disabled and will not be used\n")); 277 break; 278 } 279 280 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 281 = localApic->apic_id; 282 // TODO: how to find out? putting 0x10 in to indicate a local apic 283 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 284 = 0x10; 285 gKernelArgs.num_cpus++; 286 break; 287 } 288 289 case ACPI_MADT_IO_APIC: { 290 acpi_io_apic *ioApic = (acpi_io_apic *)apic; 291 TRACE(("smp: found io APIC with id %u and address 0x%lx\n", 292 ioApic->io_apic_id, ioApic->io_apic_address)); 293 if (gKernelArgs.arch_args.ioapic_phys == 0) 294 gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address; 295 break; 296 } 297 default: 298 break; 299 } 300 301 apic = (acpi_apic *)((uint8 *)apic + apic->length); 302 } 303 304 return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR; 305 } 306 307 308 /*! Target function of the trampoline code. 309 The trampoline code should have the pgdir and a gdt set up for us, 310 along with us being on the final stack for this processor. We need 311 to set up the local APIC and load the global idt and gdt. When we're 312 done, we'll jump into the kernel with the cpu number as an argument. 313 */ 314 static int 315 smp_cpu_ready(void) 316 { 317 uint32 curr_cpu = smp_get_current_cpu(); 318 struct gdt_idt_descr idt_descr; 319 struct gdt_idt_descr gdt_descr; 320 321 //TRACE(("smp_cpu_ready: entry cpu %ld\n", curr_cpu)); 322 323 // Important. Make sure supervisor threads can fault on read only pages... 324 asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1)); 325 asm("cld"); 326 asm("fninit"); 327 328 // Set up the final idt 329 idt_descr.a = IDT_LIMIT - 1; 330 idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt; 331 332 asm("lidt %0;" 333 : : "m" (idt_descr)); 334 335 // Set up the final gdt 336 gdt_descr.a = GDT_LIMIT - 1; 337 gdt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_gdt; 338 339 asm("lgdt %0;" 340 : : "m" (gdt_descr)); 341 342 asm("pushl %0; " // push the cpu number 343 "pushl %1; " // kernel args 344 "pushl $0x0;" // dummy retval for call to main 345 "pushl %2; " // this is the start address 346 "ret; " // jump. 347 : : "g" (curr_cpu), "g" (&gKernelArgs), 348 "g" (gKernelArgs.kernel_image.elf_header.e_entry)); 349 350 // no where to return to 351 return 0; 352 } 353 354 355 static void 356 calculate_apic_timer_conversion_factor(void) 357 { 358 int64 t1, t2; 359 uint32 config; 360 uint32 count; 361 362 // setup the timer 363 config = apic_read(APIC_LVT_TIMER); 364 config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED; 365 // timer masked, vector 0 366 apic_write(APIC_LVT_TIMER, config); 367 368 config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f); 369 apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1); 370 // divide clock by one 371 372 t1 = system_time(); 373 apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter 374 375 execute_n_instructions(128 * 20000); 376 377 count = apic_read(APIC_CURRENT_TIMER_COUNT); 378 t2 = system_time(); 379 380 count = 0xffffffff - count; 381 382 gKernelArgs.arch_args.apic_time_cv_factor 383 = (uint32)((1000000.0/(t2 - t1)) * count); 384 385 TRACE(("APIC ticks/sec = %ld\n", 386 gKernelArgs.arch_args.apic_time_cv_factor)); 387 } 388 389 390 // #pragma mark - 391 392 393 void 394 smp_init_other_cpus(void) 395 { 396 if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) { 397 // SMP has been disabled! 398 TRACE(("smp disabled per safemode setting\n")); 399 gKernelArgs.num_cpus = 1; 400 } 401 402 if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) { 403 TRACE(("local apic disabled per safemode setting, disabling smp\n")); 404 gKernelArgs.arch_args.apic_phys = 0; 405 gKernelArgs.num_cpus = 1; 406 } 407 408 if (gKernelArgs.arch_args.apic_phys == 0) 409 return; 410 411 TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus, 412 gKernelArgs.num_cpus != 1 ? "s" : "")); 413 TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys)); 414 TRACE(("smp: ioapic_phys = %p\n", 415 (void *)gKernelArgs.arch_args.ioapic_phys)); 416 417 // map in the apic 418 gKernelArgs.arch_args.apic = (uint32 *)mmu_map_physical_memory( 419 gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags); 420 421 TRACE(("smp: apic (mapped) = %p\n", gKernelArgs.arch_args.apic)); 422 423 // calculate how fast the apic timer is 424 calculate_apic_timer_conversion_factor(); 425 426 if (gKernelArgs.num_cpus < 2) 427 return; 428 429 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 430 // create a final stack the trampoline code will put the ap processor on 431 gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL, 432 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE); 433 gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE 434 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 435 } 436 } 437 438 439 void 440 smp_boot_other_cpus(void) 441 { 442 if (gKernelArgs.num_cpus < 2) 443 return; 444 445 TRACE(("trampolining other cpus\n")); 446 447 // The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved 448 // for this, or when PXE services are used 0x8b000-0x8cfff. 449 450 // allocate a stack and a code area for the smp trampoline 451 // (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS, 452 // and when PXE services are used, the 0x8d000-0x9ffff is also reserved) 453 #ifdef _PXE_ENV 454 uint32 trampolineCode = 0x8b000; 455 uint32 trampolineStack = 0x8c000; 456 #else 457 uint32 trampolineCode = 0x9f000; 458 uint32 trampolineStack = 0x9e000; 459 #endif 460 461 // copy the trampoline code over 462 memcpy((char *)trampolineCode, (const void*)&smp_trampoline, 463 (uint32)&smp_trampoline_end - (uint32)&smp_trampoline); 464 465 // boot the cpus 466 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 467 uint32 *finalStack; 468 uint32 *tempStack; 469 uint32 config; 470 uint32 numStartups; 471 uint32 j; 472 473 // set this stack up 474 finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start; 475 memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0, 476 KERNEL_STACK_SIZE); 477 tempStack = (finalStack 478 + (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE) 479 / sizeof(uint32)) - 1; 480 *tempStack = (uint32)&smp_cpu_ready; 481 482 // set the trampoline stack up 483 tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4); 484 // final location of the stack 485 *tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE 486 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32); 487 tempStack--; 488 // page dir 489 *tempStack = gKernelArgs.arch_args.phys_pgdir; 490 491 // put a gdt descriptor at the bottom of the stack 492 *((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT 493 *((uint32 *)(trampolineStack + 2)) = trampolineStack + 8; 494 495 // put the gdt at the bottom 496 memcpy(&((uint32 *)trampolineStack)[2], 497 (void *)gKernelArgs.arch_args.vir_gdt, 6 * 4); 498 499 /* clear apic errors */ 500 if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) { 501 apic_write(APIC_ERROR_STATUS, 0); 502 apic_read(APIC_ERROR_STATUS); 503 } 504 505 //dprintf("assert INIT\n"); 506 /* send (aka assert) INIT IPI */ 507 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 508 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 509 apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */ 510 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 511 | APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT 512 | APIC_DELIVERY_MODE_INIT; 513 apic_write(APIC_INTR_COMMAND_1, config); 514 515 dprintf("wait for delivery\n"); 516 // wait for pending to end 517 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 518 asm volatile ("pause;"); 519 520 dprintf("deassert INIT\n"); 521 /* deassert INIT */ 522 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 523 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 524 apic_write(APIC_INTR_COMMAND_2, config); 525 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 526 | APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT; 527 apic_write(APIC_INTR_COMMAND_1, config); 528 529 dprintf("wait for delivery\n"); 530 // wait for pending to end 531 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 532 asm volatile ("pause;"); 533 534 /* wait 10ms */ 535 spin(10000); 536 537 /* is this a local apic or an 82489dx ? */ 538 numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) 539 ? 2 : 0; 540 dprintf("num startups = %ld\n", numStartups); 541 for (j = 0; j < numStartups; j++) { 542 /* it's a local apic, so send STARTUP IPIs */ 543 dprintf("send STARTUP\n"); 544 apic_write(APIC_ERROR_STATUS, 0); 545 546 /* set target pe */ 547 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 548 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 549 apic_write(APIC_INTR_COMMAND_2, config); 550 551 /* send the IPI */ 552 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800) 553 | APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12); 554 apic_write(APIC_INTR_COMMAND_1, config); 555 556 /* wait */ 557 spin(200); 558 559 dprintf("wait for delivery\n"); 560 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 561 asm volatile ("pause;"); 562 } 563 564 // Wait for the trampoline code to clear the final stack location. 565 // This serves as a notification for us that it has loaded the address 566 // and it is safe for us to overwrite it to trampoline the next CPU. 567 tempStack++; 568 while (*tempStack != 0) 569 spin(1000); 570 } 571 572 TRACE(("done trampolining\n")); 573 } 574 575 576 void 577 smp_add_safemode_menus(Menu *menu) 578 { 579 MenuItem *item; 580 581 if (gKernelArgs.arch_args.ioapic_phys != 0) { 582 menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC")); 583 item->SetType(MENU_ITEM_MARKABLE); 584 item->SetData(B_SAFEMODE_DISABLE_IOAPIC); 585 item->SetHelpText("Disables using the IO APIC for interrupt routing, " 586 "forcing the use of the legacy PIC instead."); 587 } 588 589 if (gKernelArgs.arch_args.apic_phys != 0) { 590 menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC")); 591 item->SetType(MENU_ITEM_MARKABLE); 592 item->SetData(B_SAFEMODE_DISABLE_APIC); 593 item->SetHelpText("Disables using the local APIC, also disables SMP."); 594 } 595 596 if (gKernelArgs.num_cpus < 2) 597 return; 598 599 item = new(nothrow) MenuItem("Disable SMP"); 600 menu->AddItem(item); 601 item->SetData(B_SAFEMODE_DISABLE_SMP); 602 item->SetType(MENU_ITEM_MARKABLE); 603 item->SetHelpText("Disables all but one CPU core."); 604 } 605 606 607 void 608 smp_init(void) 609 { 610 #if NO_SMP 611 gKernelArgs.num_cpus = 1; 612 return; 613 #endif 614 615 cpuid_info info; 616 if (get_current_cpuid(&info, 1) != B_OK) 617 return; 618 619 if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) { 620 // Local APICs aren't present; As they form the basis for all inter CPU 621 // communication and therefore SMP, we don't need to go any further. 622 dprintf("no local APIC present, not attempting SMP init\n"); 623 return; 624 } 625 626 // first try to find ACPI tables to get MP configuration as it handles 627 // physical as well as logical MP configurations as in multiple cpus, 628 // multiple cores or hyper threading. 629 if (smp_do_acpi_config() == B_OK) 630 return; 631 632 // then try to find MPS tables and do configuration based on them 633 for (int32 i = 0; smp_scan_spots[i].length > 0; i++) { 634 mp_floating_struct *floatingStruct = smp_mp_probe( 635 smp_scan_spots[i].start, smp_scan_spots[i].stop); 636 if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK) 637 return; 638 } 639 640 // everything failed or we are not running an SMP system 641 gKernelArgs.num_cpus = 1; 642 } 643