1 /* 2 * Copyright 2008, Dustin Howett, dustin.howett@gmail.com. All rights reserved. 3 * Copyright 2004-2010, Axel Dörfler, axeld@pinc-software.de. 4 * Distributed under the terms of the MIT License. 5 * 6 * Copyright 2001, Travis Geiselbrecht. All rights reserved. 7 * Distributed under the terms of the NewOS License. 8 */ 9 10 11 #include "smp.h" 12 13 #include <string.h> 14 15 #include <KernelExport.h> 16 17 #include <kernel.h> 18 #include <safemode.h> 19 #include <boot/stage2.h> 20 #include <boot/menu.h> 21 #include <arch/x86/apic.h> 22 #include <arch/x86/arch_cpu.h> 23 #include <arch/x86/arch_smp.h> 24 #include <arch/x86/arch_system_info.h> 25 #include <arch/x86/descriptors.h> 26 27 #include "mmu.h" 28 #include "acpi.h" 29 30 31 #define NO_SMP 0 32 33 #define TRACE_SMP 34 #ifdef TRACE_SMP 35 # define TRACE(x) dprintf x 36 #else 37 # define TRACE(x) ; 38 #endif 39 40 41 static struct scan_spots_struct smp_scan_spots[] = { 42 { 0x9fc00, 0xa0000, 0xa0000 - 0x9fc00 }, 43 { 0xf0000, 0x100000, 0x100000 - 0xf0000 }, 44 { 0, 0, 0 } 45 }; 46 47 extern "C" void execute_n_instructions(int count); 48 49 extern "C" void smp_trampoline(void); 50 extern "C" void smp_trampoline_end(void); 51 52 53 static uint32 54 apic_read(uint32 offset) 55 { 56 return *(volatile uint32 *)((addr_t)(void *)gKernelArgs.arch_args.apic + offset); 57 } 58 59 60 static void 61 apic_write(uint32 offset, uint32 data) 62 { 63 *(volatile uint32 *)((addr_t)(void *)gKernelArgs.arch_args.apic + offset) = data; 64 } 65 66 67 static mp_floating_struct * 68 smp_mp_probe(uint32 base, uint32 limit) 69 { 70 TRACE(("smp_mp_probe: entry base 0x%x, limit 0x%x\n", base, limit)); 71 for (uint32 *pointer = (uint32 *)base; (uint32)pointer < limit; pointer++) { 72 if (*pointer == MP_FLOATING_SIGNATURE) { 73 TRACE(("smp_mp_probe: found floating pointer structure at %p\n", 74 pointer)); 75 return (mp_floating_struct *)pointer; 76 } 77 } 78 79 return NULL; 80 } 81 82 83 static status_t 84 smp_do_mp_config(mp_floating_struct *floatingStruct) 85 { 86 if (floatingStruct->config_length != 1) { 87 TRACE(("smp: unsupported structure length of %" B_PRIu8 " units\n", 88 floatingStruct->config_length)); 89 return B_UNSUPPORTED; 90 } 91 92 TRACE(("smp: intel mp version %s, %s", 93 (floatingStruct->spec_revision == 1) ? "1.1" : "1.4", 94 (floatingStruct->mp_feature_2 & 0x80) 95 ? "imcr and pic compatibility mode.\n" 96 : "virtual wire compatibility mode.\n")); 97 98 if (floatingStruct->config_table == NULL) { 99 #if 1 100 // TODO: need to implement 101 TRACE(("smp: standard configuration %d unimplemented\n", 102 floatingStruct->mp_feature_1)); 103 gKernelArgs.num_cpus = 1; 104 return B_OK; 105 #else 106 // this system conforms to one of the default configurations 107 TRACE(("smp: standard configuration %d\n", floatingStruct->mp_feature_1)); 108 gKernelArgs.num_cpus = 2; 109 gKernelArgs.cpu_apic_id[0] = 0; 110 gKernelArgs.cpu_apic_id[1] = 1; 111 apic_phys = (unsigned int *)0xfee00000; 112 ioapic_phys = (unsigned int *)0xfec00000; 113 dprintf("smp: WARNING: standard configuration code is untested"); 114 return B_OK; 115 #endif 116 } 117 118 // We are not running in standard configuration, so we have to look through 119 // all of the mp configuration table crap to figure out how many processors 120 // we have, where our apics are, etc. 121 122 mp_config_table *config = floatingStruct->config_table; 123 gKernelArgs.num_cpus = 0; 124 125 if (config->signature != MP_CONFIG_TABLE_SIGNATURE) { 126 TRACE(("smp: invalid config table signature, aborting\n")); 127 return B_ERROR; 128 } 129 130 if (config->base_table_length < sizeof(mp_config_table)) { 131 TRACE(("smp: config table length %" B_PRIu16 132 " too short for structure, aborting\n", 133 config->base_table_length)); 134 return B_ERROR; 135 } 136 137 // print our new found configuration. 138 TRACE(("smp: oem id: %.8s product id: %.12s\n", config->oem, 139 config->product)); 140 TRACE(("smp: base table has %d entries, extended section %d bytes\n", 141 config->num_base_entries, config->ext_length)); 142 143 gKernelArgs.arch_args.apic_phys = (uint32)config->apic; 144 if ((gKernelArgs.arch_args.apic_phys % 4096) != 0) { 145 // MP specs mandate a 4K alignment for the local APIC(s) 146 TRACE(("smp: local apic %p has bad alignment, aborting\n", 147 (void *)gKernelArgs.arch_args.apic_phys)); 148 return B_ERROR; 149 } 150 151 char *pointer = (char *)((uint32)config + sizeof(struct mp_config_table)); 152 for (int32 i = 0; i < config->num_base_entries; i++) { 153 switch (*pointer) { 154 case MP_BASE_PROCESSOR: 155 { 156 struct mp_base_processor *processor 157 = (struct mp_base_processor *)pointer; 158 pointer += sizeof(struct mp_base_processor); 159 160 if (gKernelArgs.num_cpus == SMP_MAX_CPUS) { 161 TRACE(("smp: already reached maximum CPUs (%d)\n", 162 SMP_MAX_CPUS)); 163 continue; 164 } 165 166 // skip if the processor is not enabled. 167 if (!(processor->cpu_flags & 0x1)) { 168 TRACE(("smp: skip apic id %d: disabled\n", 169 processor->apic_id)); 170 continue; 171 } 172 173 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 174 = processor->apic_id; 175 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 176 = processor->apic_version; 177 178 #ifdef TRACE_SMP 179 const char *cpuFamily[] = { "", "", "", "", "Intel 486", 180 "Intel Pentium", "Intel Pentium Pro", "Intel Pentium II" }; 181 #endif 182 TRACE(("smp: cpu#%d: %s, apic id %d, version %d%s\n", 183 gKernelArgs.num_cpus, 184 cpuFamily[(processor->signature & 0xf00) >> 8], 185 processor->apic_id, processor->apic_version, 186 (processor->cpu_flags & 0x2) ? ", BSP" : "")); 187 188 gKernelArgs.num_cpus++; 189 break; 190 } 191 case MP_BASE_BUS: 192 { 193 struct mp_base_bus *bus = (struct mp_base_bus *)pointer; 194 pointer += sizeof(struct mp_base_bus); 195 196 TRACE(("smp: bus %d: %c%c%c%c%c%c\n", bus->bus_id, 197 bus->name[0], bus->name[1], bus->name[2], bus->name[3], 198 bus->name[4], bus->name[5])); 199 200 break; 201 } 202 case MP_BASE_IO_APIC: 203 { 204 struct mp_base_ioapic *io = (struct mp_base_ioapic *)pointer; 205 pointer += sizeof(struct mp_base_ioapic); 206 207 if (gKernelArgs.arch_args.ioapic_phys == 0) { 208 gKernelArgs.arch_args.ioapic_phys = (uint32)io->addr; 209 if (gKernelArgs.arch_args.ioapic_phys % 1024) { 210 // MP specs mandate a 1K alignment for the IO-APICs 211 TRACE(("smp: io apic %p has bad alignment, aborting\n", 212 (void *)gKernelArgs.arch_args.ioapic_phys)); 213 return B_ERROR; 214 } 215 } 216 217 TRACE(("smp: found io apic with apic id %d, version %d\n", 218 io->ioapic_id, io->ioapic_version)); 219 220 break; 221 } 222 case MP_BASE_IO_INTR: 223 case MP_BASE_LOCAL_INTR: 224 { 225 struct mp_base_interrupt *interrupt 226 = (struct mp_base_interrupt *)pointer; 227 pointer += sizeof(struct mp_base_interrupt); 228 229 dprintf("smp: %s int: type %d, source bus %d, irq %3d, dest " 230 "apic %d, int %3d, polarity %d, trigger mode %d\n", 231 interrupt->type == MP_BASE_IO_INTR ? "I/O" : "local", 232 interrupt->interrupt_type, interrupt->source_bus_id, 233 interrupt->source_bus_irq, interrupt->dest_apic_id, 234 interrupt->dest_apic_int, interrupt->polarity, 235 interrupt->trigger_mode); 236 break; 237 } 238 } 239 } 240 241 if (gKernelArgs.num_cpus == 0) { 242 TRACE(("smp: didn't find any processors, aborting\n")); 243 return B_ERROR; 244 } 245 246 dprintf("smp: apic @ %p, i/o apic @ %p, total %d processors detected\n", 247 (void *)gKernelArgs.arch_args.apic_phys, 248 (void *)gKernelArgs.arch_args.ioapic_phys, 249 gKernelArgs.num_cpus); 250 251 return B_OK; 252 } 253 254 255 static status_t 256 smp_do_acpi_config(void) 257 { 258 TRACE(("smp: using ACPI to detect MP configuration\n")); 259 260 // reset CPU count 261 gKernelArgs.num_cpus = 0; 262 263 acpi_madt *madt = (acpi_madt *)acpi_find_table(ACPI_MADT_SIGNATURE); 264 265 if (madt == NULL) { 266 TRACE(("smp: Failed to find MADT!\n")); 267 return B_ERROR; 268 } 269 270 gKernelArgs.arch_args.apic_phys = madt->local_apic_address; 271 TRACE(("smp: local apic address is 0x%x\n", madt->local_apic_address)); 272 273 acpi_apic *apic = (acpi_apic *)((uint8 *)madt + sizeof(acpi_madt)); 274 acpi_apic *end = (acpi_apic *)((uint8 *)madt + madt->header.length); 275 while (apic < end) { 276 switch (apic->type) { 277 case ACPI_MADT_LOCAL_APIC: 278 { 279 if (gKernelArgs.num_cpus == SMP_MAX_CPUS) { 280 TRACE(("smp: already reached maximum CPUs (%d)\n", 281 SMP_MAX_CPUS)); 282 break; 283 } 284 285 acpi_local_apic *localApic = (acpi_local_apic *)apic; 286 TRACE(("smp: found local APIC with id %u\n", 287 localApic->apic_id)); 288 if ((localApic->flags & ACPI_LOCAL_APIC_ENABLED) == 0) { 289 TRACE(("smp: APIC is disabled and will not be used\n")); 290 break; 291 } 292 293 gKernelArgs.arch_args.cpu_apic_id[gKernelArgs.num_cpus] 294 = localApic->apic_id; 295 // TODO: how to find out? putting 0x10 in to indicate a local apic 296 gKernelArgs.arch_args.cpu_apic_version[gKernelArgs.num_cpus] 297 = 0x10; 298 gKernelArgs.num_cpus++; 299 break; 300 } 301 302 case ACPI_MADT_IO_APIC: { 303 acpi_io_apic *ioApic = (acpi_io_apic *)apic; 304 TRACE(("smp: found io APIC with id %u and address 0x%x\n", 305 ioApic->io_apic_id, ioApic->io_apic_address)); 306 if (gKernelArgs.arch_args.ioapic_phys == 0) 307 gKernelArgs.arch_args.ioapic_phys = ioApic->io_apic_address; 308 break; 309 } 310 default: 311 break; 312 } 313 314 apic = (acpi_apic *)((uint8 *)apic + apic->length); 315 } 316 317 return gKernelArgs.num_cpus > 0 ? B_OK : B_ERROR; 318 } 319 320 321 static void 322 calculate_apic_timer_conversion_factor(void) 323 { 324 int64 t1, t2; 325 uint32 config; 326 uint32 count; 327 328 // setup the timer 329 config = apic_read(APIC_LVT_TIMER); 330 config = (config & APIC_LVT_TIMER_MASK) + APIC_LVT_MASKED; 331 // timer masked, vector 0 332 apic_write(APIC_LVT_TIMER, config); 333 334 config = (apic_read(APIC_TIMER_DIVIDE_CONFIG) & ~0x0000000f); 335 apic_write(APIC_TIMER_DIVIDE_CONFIG, config | APIC_TIMER_DIVIDE_CONFIG_1); 336 // divide clock by one 337 338 t1 = system_time(); 339 apic_write(APIC_INITIAL_TIMER_COUNT, 0xffffffff); // start the counter 340 341 execute_n_instructions(128 * 20000); 342 343 count = apic_read(APIC_CURRENT_TIMER_COUNT); 344 t2 = system_time(); 345 346 count = 0xffffffff - count; 347 348 gKernelArgs.arch_args.apic_time_cv_factor 349 = (uint32)((1000000.0/(t2 - t1)) * count); 350 351 TRACE(("APIC ticks/sec = %d\n", 352 gKernelArgs.arch_args.apic_time_cv_factor)); 353 } 354 355 356 // #pragma mark - 357 358 359 int 360 smp_get_current_cpu(void) 361 { 362 if (gKernelArgs.arch_args.apic == NULL) 363 return 0; 364 365 uint8 apicID = apic_read(APIC_ID) >> 24; 366 for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) { 367 if (gKernelArgs.arch_args.cpu_apic_id[i] == apicID) 368 return i; 369 } 370 371 return 0; 372 } 373 374 375 void 376 smp_init_other_cpus(void) 377 { 378 if (get_safemode_boolean(B_SAFEMODE_DISABLE_SMP, false)) { 379 // SMP has been disabled! 380 TRACE(("smp disabled per safemode setting\n")); 381 gKernelArgs.num_cpus = 1; 382 } 383 384 if (get_safemode_boolean(B_SAFEMODE_DISABLE_APIC, false)) { 385 TRACE(("local apic disabled per safemode setting, disabling smp\n")); 386 gKernelArgs.arch_args.apic_phys = 0; 387 gKernelArgs.num_cpus = 1; 388 } 389 390 if (gKernelArgs.arch_args.apic_phys == 0) 391 return; 392 393 TRACE(("smp: found %d cpu%s\n", gKernelArgs.num_cpus, 394 gKernelArgs.num_cpus != 1 ? "s" : "")); 395 TRACE(("smp: apic_phys = %p\n", (void *)gKernelArgs.arch_args.apic_phys)); 396 TRACE(("smp: ioapic_phys = %p\n", 397 (void *)gKernelArgs.arch_args.ioapic_phys)); 398 399 // map in the apic 400 gKernelArgs.arch_args.apic = (void *)mmu_map_physical_memory( 401 gKernelArgs.arch_args.apic_phys, B_PAGE_SIZE, kDefaultPageFlags); 402 403 TRACE(("smp: apic (mapped) = %p\n", (void *)gKernelArgs.arch_args.apic)); 404 405 // calculate how fast the apic timer is 406 calculate_apic_timer_conversion_factor(); 407 408 if (gKernelArgs.num_cpus < 2) 409 return; 410 411 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 412 // create a final stack the trampoline code will put the ap processor on 413 gKernelArgs.cpu_kstack[i].start = (addr_t)mmu_allocate(NULL, 414 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE); 415 gKernelArgs.cpu_kstack[i].size = KERNEL_STACK_SIZE 416 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE; 417 } 418 } 419 420 421 void 422 smp_boot_other_cpus(void (*entryFunc)(void)) 423 { 424 if (gKernelArgs.num_cpus < 2) 425 return; 426 427 TRACE(("trampolining other cpus\n")); 428 429 // The first 8 MB are identity mapped, either 0x9e000-0x9ffff is reserved 430 // for this, or when PXE services are used 0x8b000-0x8cfff. 431 432 // allocate a stack and a code area for the smp trampoline 433 // (these have to be < 1M physical, 0xa0000-0xfffff is reserved by the BIOS, 434 // and when PXE services are used, the 0x8d000-0x9ffff is also reserved) 435 #ifdef _PXE_ENV 436 uint32 trampolineCode = 0x8b000; 437 uint32 trampolineStack = 0x8c000; 438 #else 439 uint32 trampolineCode = 0x9f000; 440 uint32 trampolineStack = 0x9e000; 441 #endif 442 443 // copy the trampoline code over 444 memcpy((char *)trampolineCode, (const void*)&smp_trampoline, 445 (uint32)&smp_trampoline_end - (uint32)&smp_trampoline); 446 447 // boot the cpus 448 for (uint32 i = 1; i < gKernelArgs.num_cpus; i++) { 449 uint32 *finalStack; 450 uint32 *tempStack; 451 uint32 config; 452 uint32 numStartups; 453 uint32 j; 454 455 // set this stack up 456 finalStack = (uint32 *)gKernelArgs.cpu_kstack[i].start; 457 memset((uint8*)finalStack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE, 0, 458 KERNEL_STACK_SIZE); 459 tempStack = (finalStack 460 + (KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE) 461 / sizeof(uint32)) - 1; 462 *tempStack = (uint32)entryFunc; 463 464 // set the trampoline stack up 465 tempStack = (uint32 *)(trampolineStack + B_PAGE_SIZE - 4); 466 // final location of the stack 467 *tempStack = ((uint32)finalStack) + KERNEL_STACK_SIZE 468 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE - sizeof(uint32); 469 tempStack--; 470 // page dir 471 *tempStack = x86_read_cr3() & 0xfffff000; 472 473 // put a gdt descriptor at the bottom of the stack 474 *((uint16 *)trampolineStack) = 0x18 - 1; // LIMIT 475 *((uint32 *)(trampolineStack + 2)) = trampolineStack + 8; 476 477 // construct a temporary gdt at the bottom 478 segment_descriptor* tempGDT 479 = (segment_descriptor*)&((uint32 *)trampolineStack)[2]; 480 clear_segment_descriptor(&tempGDT[0]); 481 set_segment_descriptor(&tempGDT[1], 0, 0xffffffff, DT_CODE_READABLE, 482 DPL_KERNEL); 483 set_segment_descriptor(&tempGDT[2], 0, 0xffffffff, DT_DATA_WRITEABLE, 484 DPL_KERNEL); 485 486 /* clear apic errors */ 487 if (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) { 488 apic_write(APIC_ERROR_STATUS, 0); 489 apic_read(APIC_ERROR_STATUS); 490 } 491 492 //dprintf("assert INIT\n"); 493 /* send (aka assert) INIT IPI */ 494 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 495 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 496 apic_write(APIC_INTR_COMMAND_2, config); /* set target pe */ 497 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 498 | APIC_TRIGGER_MODE_LEVEL | APIC_INTR_COMMAND_1_ASSERT 499 | APIC_DELIVERY_MODE_INIT; 500 apic_write(APIC_INTR_COMMAND_1, config); 501 502 dprintf("wait for delivery\n"); 503 // wait for pending to end 504 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 505 asm volatile ("pause;"); 506 507 dprintf("deassert INIT\n"); 508 /* deassert INIT */ 509 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 510 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 511 apic_write(APIC_INTR_COMMAND_2, config); 512 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff00000) 513 | APIC_TRIGGER_MODE_LEVEL | APIC_DELIVERY_MODE_INIT; 514 apic_write(APIC_INTR_COMMAND_1, config); 515 516 dprintf("wait for delivery\n"); 517 // wait for pending to end 518 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 519 asm volatile ("pause;"); 520 521 /* wait 10ms */ 522 spin(10000); 523 524 /* is this a local apic or an 82489dx ? */ 525 numStartups = (gKernelArgs.arch_args.cpu_apic_version[i] & 0xf0) 526 ? 2 : 0; 527 dprintf("num startups = %d\n", numStartups); 528 for (j = 0; j < numStartups; j++) { 529 /* it's a local apic, so send STARTUP IPIs */ 530 dprintf("send STARTUP\n"); 531 apic_write(APIC_ERROR_STATUS, 0); 532 533 /* set target pe */ 534 config = (apic_read(APIC_INTR_COMMAND_2) & APIC_INTR_COMMAND_2_MASK) 535 | (gKernelArgs.arch_args.cpu_apic_id[i] << 24); 536 apic_write(APIC_INTR_COMMAND_2, config); 537 538 /* send the IPI */ 539 config = (apic_read(APIC_INTR_COMMAND_1) & 0xfff0f800) 540 | APIC_DELIVERY_MODE_STARTUP | (trampolineCode >> 12); 541 apic_write(APIC_INTR_COMMAND_1, config); 542 543 /* wait */ 544 spin(200); 545 546 dprintf("wait for delivery\n"); 547 while ((apic_read(APIC_INTR_COMMAND_1) & APIC_DELIVERY_STATUS) != 0) 548 asm volatile ("pause;"); 549 } 550 551 // Wait for the trampoline code to clear the final stack location. 552 // This serves as a notification for us that it has loaded the address 553 // and it is safe for us to overwrite it to trampoline the next CPU. 554 tempStack++; 555 while (*tempStack != 0) 556 spin(1000); 557 } 558 559 TRACE(("done trampolining\n")); 560 } 561 562 563 void 564 smp_add_safemode_menus(Menu *menu) 565 { 566 MenuItem *item; 567 568 if (gKernelArgs.arch_args.ioapic_phys != 0) { 569 menu->AddItem(item = new(nothrow) MenuItem("Disable IO-APIC")); 570 item->SetType(MENU_ITEM_MARKABLE); 571 item->SetData(B_SAFEMODE_DISABLE_IOAPIC); 572 item->SetHelpText("Disables using the IO APIC for interrupt routing, " 573 "forcing the use of the legacy PIC instead."); 574 } 575 576 if (gKernelArgs.arch_args.apic_phys != 0) { 577 menu->AddItem(item = new(nothrow) MenuItem("Disable local APIC")); 578 item->SetType(MENU_ITEM_MARKABLE); 579 item->SetData(B_SAFEMODE_DISABLE_APIC); 580 item->SetHelpText("Disables using the local APIC, also disables SMP."); 581 582 cpuid_info info; 583 if (get_current_cpuid(&info, 1, 0) == B_OK 584 && (info.regs.ecx & IA32_FEATURE_EXT_X2APIC) != 0) { 585 menu->AddItem(item = new(nothrow) MenuItem("Disable X2APIC")); 586 item->SetType(MENU_ITEM_MARKABLE); 587 item->SetData(B_SAFEMODE_DISABLE_X2APIC); 588 item->SetHelpText("Disables using X2APIC."); 589 } 590 591 get_current_cpuid(&info, 0, 0); 592 uint32 maxBasicLeaf = info.eax_0.max_eax; 593 if (maxBasicLeaf >= 7) { 594 if (get_current_cpuid(&info, 7, 0) == B_OK 595 && (info.regs.ebx & (IA32_FEATURE_SMEP 596 | IA32_FEATURE_SMAP)) != 0) { 597 menu->AddItem(item = new(nothrow) MenuItem( 598 "Disable SMEP and SMAP")); 599 item->SetType(MENU_ITEM_MARKABLE); 600 item->SetData(B_SAFEMODE_DISABLE_SMEP_SMAP); 601 item->SetHelpText("Disables using SMEP and SMAP."); 602 } 603 604 if (get_current_cpuid(&info, 7, 0) == B_OK 605 && (info.regs.ecx & IA32_FEATURE_LA57) != 0) { 606 menu->AddItem(item = new(nothrow) MenuItem( 607 "Ignore memory beyond 256 TiB")); 608 item->SetType(MENU_ITEM_MARKABLE); 609 item->SetData(B_SAFEMODE_256_TB_MEMORY_LIMIT); 610 item->SetHelpText("Ignores all memory beyond the 256 TiB " 611 "address limit, overriding the setting in the kernel " 612 "settings file."); 613 } 614 } 615 } 616 617 cpuid_info info; 618 if (get_current_cpuid(&info, 1, 0) == B_OK 619 && (info.regs.edx & IA32_FEATURE_PAT) != 0) { 620 menu->AddItem(item = new(nothrow) MenuItem("Disable PAT")); 621 item->SetType(MENU_ITEM_MARKABLE); 622 item->SetData(B_SAFEMODE_DISABLE_PAT); 623 item->SetHelpText("Disables using page attribute tables for memory " 624 "type setting, falling back to MTRRs."); 625 } 626 627 if (gKernelArgs.num_cpus < 2) 628 return; 629 630 item = new(nothrow) MenuItem("Disable SMP"); 631 menu->AddItem(item); 632 item->SetData(B_SAFEMODE_DISABLE_SMP); 633 item->SetType(MENU_ITEM_MARKABLE); 634 item->SetHelpText("Disables all but one CPU core."); 635 } 636 637 638 void 639 smp_init(void) 640 { 641 #if NO_SMP 642 gKernelArgs.num_cpus = 1; 643 return; 644 #endif 645 646 cpuid_info info; 647 if (get_current_cpuid(&info, 1, 0) != B_OK) 648 return; 649 650 if ((info.eax_1.features & IA32_FEATURE_APIC) == 0) { 651 // Local APICs aren't present; As they form the basis for all inter CPU 652 // communication and therefore SMP, we don't need to go any further. 653 dprintf("no local APIC present, not attempting SMP init\n"); 654 return; 655 } 656 657 // first try to find ACPI tables to get MP configuration as it handles 658 // physical as well as logical MP configurations as in multiple cpus, 659 // multiple cores or hyper threading. 660 if (smp_do_acpi_config() == B_OK) 661 return; 662 663 // then try to find MPS tables and do configuration based on them 664 for (int32 i = 0; smp_scan_spots[i].length > 0; i++) { 665 mp_floating_struct *floatingStruct = smp_mp_probe( 666 smp_scan_spots[i].start, smp_scan_spots[i].stop); 667 if (floatingStruct != NULL && smp_do_mp_config(floatingStruct) == B_OK) 668 return; 669 } 670 671 // Everything failed or we are not running an SMP system, reset anything 672 // that might have been set through an incomplete configuration attempt. 673 gKernelArgs.arch_args.apic_phys = 0; 674 gKernelArgs.arch_args.ioapic_phys = 0; 675 gKernelArgs.num_cpus = 1; 676 } 677