1 /* 2 * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved. 3 * Copyright 2004-2009, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include "smp.h" 12 13 #include <string.h> 14 15 #include <KernelExport.h> 16 17 #include <kernel.h> 18 #include <safemode.h> 19 #include <boot/stage2.h> 20 #include <boot/menu.h> 21 #include <arch/x86/arch_acpi.h> 22 #include <arch/x86/arch_apic.h> 23 #include <arch/x86/arch_system_info.h> 24 25 #include "mmu.h" 26 #include "acpi.h" 27 #include "hpet.h" 28 29 30 #define NO_SMP 0 31 32 #define TRACE_SMP 33 #ifdef TRACE_SMP 34 # define TRACE(x) dprintf x 35 #else 36 # define TRACE(x) ; 37 #endif 38 39 struct gdt_idt_descr { 40 uint16 a; 41 uint32 *b; 42 } _PACKED; 43 44 static struct scan_spots_struct smp_scan_spots[] = { 45 { 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 }, 46 { 0xf0000, 0x100000, 0x100000 - 0xf0000 }, 47 { 0, 0, 0 } 48 }; 49 50 extern "C" void execute_n_instructions(int count); 51 52 extern "C" void smp_trampoline(void); 53 extern "C" void smp_trampoline_end(void); 54 55 56 static int smp_get_current_cpu(void); 57 58 59 static uint32 60 apic_read(uint32 offset) 61 { 62 return *(uint32 *)((uint32)gKernelArgs.arch_args.apic + offset); 63 } 64 65 66 static void 67 apic_write(uint32 offset, uint32 data) 68 { 69 uint32 *addr = (uint32 *)((uint32)gKernelArgs.arch_args.apic + offset); 70 *addr = data; 71 } 72 73 74 static int 75 smp_get_current_cpu(void) 76 { 77 if (gKernelArgs.arch_args.apic == NULL) 78 return 0; 79 80 return gKernelArgs.arch_args.cpu_os_id[ 81 (apic_read(APIC_ID) & 0xffffffff) >> 24]; 82 } 83 84 85 static mp_floating_struct * 86 smp_mp_probe(uint32 base, uint32 limit) 87 { 88 TRACE(("smp_mp_probe: entry base 0x%lx, limit 0x%lx\n", base, limit)); 89 for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) { 90 if (*pointer == MP_FLOATING_SIGNATURE) { 91 TRACE(("smp_mp_probe: found floating pointer structure at %p\n", 92 pointer)); 93 return (mp_floating_struct *)pointer; 94 } 95 } 96 97 return NULL; 98 } 99 100 101 static status_t 102 smp_do_mp_config(mp_floating_struct *floatingStruct) 103 { 104 TRACE(("smp: intel mp version %s, %s", 105 (floatingStruct->spec_revision == 1) ? "1.1" : "1.4", 106 (floatingStruct->mp_feature_2 & 0x80) 107 ? "imcr and pic compatibility mode.\n" 108 : "virtual wire compatibility mode.\n")); 109 110 if (floatingStruct->config_table == NULL) { 111 #if 1 112 // TODO: need to implement 113 TRACE(("smp: standard configuration %d unimplemented\n", 114 floatingStruct->mp_feature_1)); 115 gKernelArgs.num_cpus = 1; 116 return B_OK; 117 #else 118 // this system conforms to one of the default configurations 119 TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1)); 120 gKernelArgs.num_cpus = 2; 121 gKernelArgs.cpu_apic_id[0] = 0; 122 gKernelArgs.cpu_apic_id[1] = 1; 123 apic_phys = (unsigned int *)0xfee00000; 124 ioapic_phys = (unsigned int *)0xfec00000; 125 dprintf("smp: WARNING: standard configuration code is untested"); 126 return B_OK; 127 #endif 128 } 129 130 // We are not running in standard configuration, so we have to look through 131 // all of the mp configuration table crap to figure out how many processors 132 // we have, where our apics are, etc. 133 134 mp_config_table *config = floatingStruct->config_table; 135 gKernelArgs.num_cpus = 0; 136 137 // print our new found configuration. 138 TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem, 139 config->product)); 140 TRACE(("smp: base table has %d entries, extended section %d bytes\n", 141 config->num_base_entries, config->ext_length)); 142 143 gKernelArgs.arch_args.apic_phys = (uint32)config->apic; 144 145 char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table)); 146 for (int32 i = 0; i < config->num_base_entries; i++) { 147 switch (*pointer) { 148 case MP_BASE_PROCESSOR: 149 { 150 struct mp_base_processor *processor 151 = (struct mp_base_processor *)pointer; 152 pointer += sizeof(struct mp_base_processor); 153 154 if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) { 155 TRACE(("smp: already reached maximum boot CPUs (%d)\n", 156 MAX_BOOT_CPUS)); 157 continue; 158 } 159 160 // skip if the processor is not enabled. 161 if (!(processor->cpu_flags & 0x1)) { 162 TRACE(("smp: skip apic id %d: disabled\n", 163 processor->apic_id)); 164 continue; 165 } 166 167 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 168 = processor->apic_id; 169 gKernelArgs.arch_args.cpu_os_id[processor->apic_id] 170 = gKernelArgs.num_cpus; 171 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 172 = processor->apic_version; 173 174 #ifdef TRACE_SMP 175 const char *cpuFamily[] = { "", "", "", "", "Intel 486", 176 "Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" }; 177 #endif 178 TRACE(("smp: cpu#%ld: %s, apic id %d, version %d%s\n", 179 gKernelArgs.num_cpus, 180 cpuFamily[(processor->signature & 0xf00) >> 8], 181 processor->apic_id, processor->apic_version, 182 (processor->cpu_flags & 0x2) ? ", BSP" : "")); 183 184 gKernelArgs.num_cpus++; 185 break; 186 } 187 case MP_BASE_BUS: 188 { 189 struct mp_base_bus *bus = (struct mp_base_bus *)pointer; 190 pointer += sizeof(struct mp_base_bus); 191 192 TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id, 193 bus->name[0], bus->name[1], bus->name[2], bus->name[3], 194 bus->name[4], bus->name[5])); 195 196 break; 197 } 198 case MP_BASE_IO_APIC: 199 { 200 struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer; 201 pointer += sizeof(struct mp_base_ioapic); 202 203 gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr; 204 205 TRACE(("smp: found io apic with apic id %d, version %d\n", 206 io->ioapic_id, io->ioapic_version)); 207 208 break; 209 } 210 case MP_BASE_IO_INTR: 211 case MP_BASE_LOCAL_INTR: 212 { 213 struct mp_base_interrupt *interrupt 214 = (struct mp_base_interrupt *)pointer; 215 pointer += sizeof(struct mp_base_interrupt); 216 217 dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest " 218 "apic %d, int %3d, polarity %d, trigger mode %d\n", 219 interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local", 220 interrupt->interrupt_type, interrupt->source_bus_id, 221 interrupt->source_bus_irq, interrupt->dest_apic_id, 222 interrupt->dest_apic_int, interrupt->polarity, 223 interrupt->trigger_mode); 224 break; 225 } 226 } 227 } 228 229 dprintf("smp: apic @ %p, i/o apic @ %p, total %ld processors detected\n", 230 (void *)gKernelArgs.arch_args.apic_phys, 231 (void *)gKernelArgs.arch_args.ioapic_phys, 232 gKernelArgs.num_cpus); 233 234 return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR; 235 } 236 237 238 static status_t 239 smp_do_acpi_config(void) 240 { 241 TRACE(("smp: using ACPI to detect MP configuration\n")); 242 243 // reset CPU count 244 gKernelArgs.num_cpus = 0; 245 246 acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE); 247 248 if (madt == NULL) { 249 TRACE(("smp: Failed to find MADT!\n")); 250 return B_ERROR; 251 } 252 253 gKernelArgs.arch_args.apic_phys = madt->local_apic_address; 254 TRACE(("smp: local apic address is 0x%lx\n", madt->local_apic_address)); 255 256 acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt)); 257 acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length); 258 while (apic < end) { 259 switch (apic->type) { 260 case ACPI_MADT_LOCAL_APIC: 261 { 262 if (gKernelArgs.num_cpus == MAX_BOOT_CPUS) { 263 TRACE(("smp: already reached maximum boot CPUs (%d)\n", 264 MAX_BOOT_CPUS)); 265 break; 266 } 267 268 acpi_local_apic *localApic = (acpi_local_apic *)apic; 269 TRACE(("smp: found local APIC with id %u\n", 270 localApic->apic_id)); 271 if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) { 272 TRACE(("smp: APIC is disabled and will not be used\n")); 273 break; 274 } 275 276 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 277 = localApic->apic_id; 278 gKernelArgs.arch_args.cpu_os_id[localApic->apic_id] 279 = gKernelArgs.num_cpus; 280 // TODO: how to find out? putting 0x10 in to indicate a local apic 281 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 282 = 0x10; 283 gKernelArgs.num_cpus++; 284 break; 285 } 286 287 case ACPI_MADT_IO_APIC: { 288 acpi_io_apic *ioApic = (acpi_io_apic *)apic; 289 TRACE(("smp: found io APIC with id %u and address 0x%lx\n", 290 ioApic->io_apic_id, ioApic->io_apic_address)); 291 gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address; 292 break; 293 } 294 } 295 296 apic = (acpi_apic *)((uint8 *)apic + apic->length); 297 } 298 299 return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR; 300 } 301 302 303 /*! Target function of the trampoline code. 304 The trampoline code should have the pgdir and a gdt set up for us, 305 along with us being on the final stack for this processor. We need 306 to set up the local APIC and load the global idt and gdt. When we're 307 done, we'll jump into the kernel with the cpu number as an argument. 308 */ 309 static int 310 smp_cpu_ready(void) 311 { 312 uint32 curr_cpu = smp_get_current_cpu(); 313 struct gdt_idt_descr idt_descr; 314 struct gdt_idt_descr gdt_descr; 315 316 //TRACE(("smp_cpu_ready: entry cpu %ld\n", curr_cpu)); 317 318 // Important. Make sure supervisor threads can fault on read only pages... 319 asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1)); 320 asm("cld"); 321 asm("fninit"); 322 323 // Set up the final idt 324 idt_descr.a = IDT_LIMIT - 1; 325 idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt; 326 327 asm("lidt %0;" 328 : : "m" (idt_descr)); 329 330 // Set up the final gdt 331 gdt_descr.a = GDT_LIMIT - 1; 332 gdt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_gdt; 333 334 asm("lgdt %0;" 335 : : "m" (gdt_descr)); 336 337 asm("pushl %0; " // push the cpu number 338 "pushl %1; " // kernel args 339 "pushl $0x0;" // dummy retval for call to main 340 "pushl %2; " // this is the start address 341 "ret; " // jump. 342 : : "g" (curr_cpu), "g" (&gKernelArgs), 343 "g" (gKernelArgs.kernel_image.elf_header.e_entry)); 344 345 // no where to return to 346 return 0; 347 } 348 349 350 static void 351 calculate_apic_timer_conversion_factor(void) 352 { 353 int64 t1, t2; 354 uint32 config; 355 uint32 count; 356 357 // setup the timer 358 config = apic_read(APIC_LVT_TIMER); 359 config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED; 360 // timer masked, vector 0 361 apic_write(APIC_LVT_TIMER, config); 362 363 config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f); 364 apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1); 365 // divide clock by one 366 367 t1 = system_time(); 368 apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter 369 370 execute_n_instructions(128 * 20000); 371 372 count = apic_read(APIC_CURRENT_TIMER_COUNT); 373 t2 = system_time(); 374 375 count = 0xffffffff - count; 376 377 gKernelArgs.arch_args.apic_time_cv_factor 378 = (uint32)((1000000.0/(t2 - t1)) * count); 379 380 TRACE(("APIC ticks/sec = %ld\n", 381 gKernelArgs.arch_args.apic_time_cv_factor)); 382 } 383 384 385 // #pragma mark - 386 387 388 void 389 smp_init_other_cpus(void) 390 { 391 if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) { 392 // SMP has been disabled! 393 TRACE(("smp disabled per safemode setting\n")); 394 gKernelArgs.num_cpus = 1; 395 } 396 397 if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) { 398 TRACE(("local apic disabled per safemode setting\n")); 399 gKernelArgs.arch_args.apic_phys = 0; 400 } 401 402 if (gKernelArgs.arch_args.apic_phys == 0) 403 return; 404 405 TRACE(("smp: found %ld cpu%s\n", gKernelArgs.num_cpus, 406 gKernelArgs.num_cpus != 1 ? "s" : "")); 407 TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys)); 408 TRACE(("smp: ioapic_phys = %p\n", 409 (void *)gKernelArgs.arch_args.ioapic_phys)); 410 411 // map in the apic & ioapic (if available) 412 gKernelArgs.arch_args.apic = (uint32 *)mmu_map_physical_memory( 413 gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags); 414 if (gKernelArgs.arch_args.ioapic_phys != 0) { 415 gKernelArgs.arch_args.ioapic = (uint32 *)mmu_map_physical_memory( 416 gKernelArgs.arch_args.ioapic_phys, B_PAGE_SIZE, kDefaultPageFlags); 417 } 418 419 TRACE(("smp: apic = %p\n", gKernelArgs.arch_args.apic)); 420 TRACE(("smp: ioapic = %p\n", gKernelArgs.arch_args.ioapic)); 421 422 // calculate how fast the apic timer is 423 calculate_apic_timer_conversion_factor(); 424 425 if (gKernelArgs.num_cpus < 2) 426 return; 427 428 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 429 // create a final stack the trampoline code will put the ap processor on 430 gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL, 431 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE); 432 gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE 433 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 434 } 435 } 436 437 438 void 439 smp_boot_other_cpus(void) 440 { 441 if (gKernelArgs.num_cpus < 2) 442 return; 443 444 TRACE(("trampolining other cpus\n")); 445 446 // The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved 447 // for this, or when PXE services are used 0x8b000-0x8cfff. 448 449 // allocate a stack and a code area for the smp trampoline 450 // (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS, 451 // and when PXE services are used, the 0x8d000-0x9ffff is also reserved) 452 #ifdef _PXE_ENV 453 uint32 trampolineCode = 0x8b000; 454 uint32 trampolineStack = 0x8c000; 455 #else 456 uint32 trampolineCode = 0x9f000; 457 uint32 trampolineStack = 0x9e000; 458 #endif 459 460 // copy the trampoline code over 461 memcpy((char *)trampolineCode, (const void*)&smp_trampoline, 462 (uint32)&smp_trampoline_end - (uint32)&smp_trampoline); 463 464 // boot the cpus 465 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 466 uint32 *finalStack; 467 uint32 *tempStack; 468 uint32 config; 469 uint32 numStartups; 470 uint32 j; 471 472 // set this stack up 473 finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start; 474 memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0, 475 KERNEL_STACK_SIZE); 476 tempStack = (finalStack 477 + (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE) 478 / sizeof(uint32)) - 1; 479 *tempStack = (uint32)&smp_cpu_ready; 480 481 // set the trampoline stack up 482 tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4); 483 // final location of the stack 484 *tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE 485 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32); 486 tempStack--; 487 // page dir 488 *tempStack = gKernelArgs.arch_args.phys_pgdir; 489 490 // put a gdt descriptor at the bottom of the stack 491 *((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT 492 *((uint32 *)(trampolineStack + 2)) = trampolineStack + 8; 493 494 // put the gdt at the bottom 495 memcpy(&((uint32 *)trampolineStack)[2], 496 (void *)gKernelArgs.arch_args.vir_gdt, 6 * 4); 497 498 /* clear apic errors */ 499 if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) { 500 apic_write(APIC_ERROR_STATUS, 0); 501 apic_read(APIC_ERROR_STATUS); 502 } 503 504 //dprintf("assert INIT\n"); 505 /* send (aka assert) INIT IPI */ 506 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 507 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 508 apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */ 509 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 510 | APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT 511 | APIC_DELIVERY_MODE_INIT; 512 apic_write(APIC_INTR_COMMAND_1, config); 513 514 dprintf("wait for delivery\n"); 515 // wait for pending to end 516 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 517 asm volatile ("pause;"); 518 519 dprintf("deassert INIT\n"); 520 /* deassert INIT */ 521 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 522 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 523 apic_write(APIC_INTR_COMMAND_2, config); 524 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 525 | APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT; 526 apic_write(APIC_INTR_COMMAND_1, config); 527 528 dprintf("wait for delivery\n"); 529 // wait for pending to end 530 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 531 asm volatile ("pause;"); 532 533 /* wait 10ms */ 534 spin(10000); 535 536 /* is this a local apic or an 82489dx ? */ 537 numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) 538 ? 2 : 0; 539 dprintf("num startups = %ld\n", numStartups); 540 for (j = 0; j < numStartups; j++) { 541 /* it's a local apic, so send STARTUP IPIs */ 542 dprintf("send STARTUP\n"); 543 apic_write(APIC_ERROR_STATUS, 0); 544 545 /* set target pe */ 546 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 547 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 548 apic_write(APIC_INTR_COMMAND_2, config); 549 550 /* send the IPI */ 551 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800) 552 | APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12); 553 apic_write(APIC_INTR_COMMAND_1, config); 554 555 /* wait */ 556 spin(200); 557 558 dprintf("wait for delivery\n"); 559 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 560 asm volatile ("pause;"); 561 } 562 563 // Wait for the trampoline code to clear the final stack location. 564 // This serves as a notification for us that it has loaded the address 565 // and it is safe for us to overwrite it to trampoline the next CPU. 566 tempStack++; 567 while (*tempStack != 0) 568 spin(1000); 569 } 570 571 TRACE(("done trampolining\n")); 572 } 573 574 575 void 576 smp_add_safemode_menus(Menu *menu) 577 { 578 MenuItem *item; 579 #if 0 580 if (gKernelArgs.arch_args.ioapic_phys != 0) { 581 // TODO: IOAPIC isn't yet used anywhere 582 menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC")); 583 item->SetType(MENU_ITEM_MARKABLE); 584 item->SetData(B_SAFEMODE_DISABLE_IOAPIC); 585 item->SetHelpText("Disables using the IO APIC for interrupt handling, " 586 "forcing instead the use of the PIC."); 587 } 588 #endif 589 if (gKernelArgs.arch_args.apic_phys != 0) { 590 menu->AddItem(item = new(nothrow) MenuItem("Disable LOCAL APIC")); 591 item->SetType(MENU_ITEM_MARKABLE); 592 item->SetData(B_SAFEMODE_DISABLE_APIC); 593 item->SetHelpText("Disables using the LOCAL APIC for timekeeping."); 594 } 595 596 if (gKernelArgs.num_cpus < 2) 597 return; 598 599 item = new(nothrow) MenuItem("Disable SMP"); 600 menu->AddItem(item); 601 item->SetData(B_SAFEMODE_DISABLE_SMP); 602 item->SetType(MENU_ITEM_MARKABLE); 603 item->SetHelpText("Disables all but one CPU core."); 604 } 605 606 607 void 608 smp_init(void) 609 { 610 #if NO_SMP 611 gKernelArgs.num_cpus = 1; 612 return; 613 #endif 614 615 // first try to find ACPI tables to get MP configuration as it handles 616 // physical as well as logical MP configurations as in multiple cpus, 617 // multiple cores or hyper threading. 618 if (smp_do_acpi_config() == B_OK) 619 return; 620 621 // then try to find MPS tables and do configuration based on them 622 for (int32 i = 0; smp_scan_spots[i].length > 0; i++) { 623 mp_floating_struct *floatingStruct = smp_mp_probe( 624 smp_scan_spots[i].start, smp_scan_spots[i].stop); 625 if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK) 626 return; 627 } 628 629 // everything failed or we are not running an SMP system 630 gKernelArgs.num_cpus = 1; 631 } 632