1 /* 2 * Copyright 2006-2018, Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Axel Dörfler, axeld@pinc-software.de 7 * Alexander von Gluck IV, kallisti5@unixzen.com 8 * Adrien Destugues, pulkomandy@pulkomandy.tk 9 */ 10 11 12 #include "intel_extreme.h" 13 14 #include <unistd.h> 15 #include <stdio.h> 16 #include <string.h> 17 #include <errno.h> 18 19 #include <AreaKeeper.h> 20 #include <boot_item.h> 21 #include <driver_settings.h> 22 #include <util/kernel_cpp.h> 23 24 #include <vesa_info.h> 25 26 #include "driver.h" 27 #include "power.h" 28 #include "utility.h" 29 30 31 #define TRACE_INTELEXTREME 32 #ifdef TRACE_INTELEXTREME 33 # define TRACE(x...) dprintf("intel_extreme: " x) 34 #else 35 # define TRACE(x) ; 36 #endif 37 38 #define ERROR(x...) dprintf("intel_extreme: " x) 39 #define CALLED(x...) TRACE("intel_extreme: CALLED %s\n", __PRETTY_FUNCTION__) 40 41 42 static void 43 init_overlay_registers(overlay_registers* _registers) 44 { 45 user_memset(_registers, 0, B_PAGE_SIZE); 46 47 overlay_registers registers; 48 memset(®isters, 0, sizeof(registers)); 49 registers.contrast_correction = 0x48; 50 registers.saturation_cos_correction = 0x9a; 51 // this by-passes contrast and saturation correction 52 53 user_memcpy(_registers, ®isters, sizeof(overlay_registers)); 54 } 55 56 57 static void 58 read_settings(bool &hardwareCursor) 59 { 60 hardwareCursor = false; 61 62 void* settings = load_driver_settings("intel_extreme"); 63 if (settings != NULL) { 64 hardwareCursor = get_driver_boolean_parameter(settings, 65 "hardware_cursor", true, true); 66 67 unload_driver_settings(settings); 68 } 69 } 70 71 72 static int32 73 release_vblank_sem(intel_info &info) 74 { 75 int32 count; 76 if (get_sem_count(info.shared_info->vblank_sem, &count) == B_OK 77 && count < 0) { 78 release_sem_etc(info.shared_info->vblank_sem, -count, 79 B_DO_NOT_RESCHEDULE); 80 return B_INVOKE_SCHEDULER; 81 } 82 83 return B_HANDLED_INTERRUPT; 84 } 85 86 87 static void 88 gen8_enable_interrupts(intel_info& info, pipe_index pipe, bool enable) 89 { 90 ASSERT(pipe != INTEL_PIPE_ANY); 91 ASSERT(info.device_type.Generation() >= 12 || pipe != INTEL_PIPE_D); 92 93 const uint32 regMask = PCH_INTERRUPT_PIPE_MASK_BDW(pipe); 94 const uint32 regEnabled = PCH_INTERRUPT_PIPE_ENABLED_BDW(pipe); 95 const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(pipe); 96 const uint32 value = enable ? PCH_INTERRUPT_VBLANK_BDW : 0; 97 write32(info, regIdentity, ~0); 98 write32(info, regEnabled, value); 99 write32(info, regMask, ~value); 100 } 101 102 103 static uint32 104 gen11_enable_global_interrupts(intel_info& info, bool enable) 105 { 106 write32(info, GEN11_GFX_MSTR_IRQ, enable ? GEN11_MASTER_IRQ : 0); 107 return enable ? 0 : read32(info, GEN11_GFX_MSTR_IRQ); 108 } 109 110 111 static uint32 112 gen8_enable_global_interrupts(intel_info& info, bool enable) 113 { 114 write32(info, PCH_MASTER_INT_CTL_BDW, enable ? PCH_MASTER_INT_CTL_GLOBAL_BDW : 0); 115 return enable ? 0 : read32(info, PCH_MASTER_INT_CTL_BDW); 116 } 117 118 119 /*! 120 Checks interrupt status with provided master interrupt control register. 121 For Gen8 to Gen11. 122 */ 123 static int32 124 gen8_handle_interrupts(intel_info& info, uint32 interrupt) 125 { 126 int32 handled = B_HANDLED_INTERRUPT; 127 if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_A)) != 0) { 128 const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_A); 129 uint32 identity = read32(info, regIdentity); 130 if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) { 131 handled = release_vblank_sem(info); 132 write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW); 133 } else { 134 dprintf("gen8_handle_interrupts unhandled interrupt on pipe A\n"); 135 } 136 interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_A); 137 } 138 if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_B)) != 0) { 139 const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_B); 140 uint32 identity = read32(info, regIdentity); 141 if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) { 142 handled = release_vblank_sem(info); 143 write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW); 144 } else { 145 dprintf("gen8_handle_interrupts unhandled interrupt on pipe B\n"); 146 } 147 interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_B); 148 } 149 if ((interrupt & PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_C)) != 0) { 150 const uint32 regIdentity = PCH_INTERRUPT_PIPE_IDENTITY_BDW(INTEL_PIPE_C); 151 uint32 identity = read32(info, regIdentity); 152 if ((identity & PCH_INTERRUPT_VBLANK_BDW) != 0) { 153 handled = release_vblank_sem(info); 154 write32(info, regIdentity, identity | PCH_INTERRUPT_VBLANK_BDW); 155 } else { 156 dprintf("gen8_handle_interrupts unhandled interrupt on pipe C\n"); 157 } 158 interrupt &= ~PCH_MASTER_INT_CTL_PIPE_PENDING_BDW(INTEL_PIPE_C); 159 } 160 161 if ((interrupt & GEN8_DE_PORT_IRQ) != 0) { 162 uint32 iir = read32(info, GEN8_DE_PORT_IIR); 163 if (iir != 0) { 164 write32(info, GEN8_DE_PORT_IIR, iir); 165 } 166 interrupt &= ~GEN8_DE_PORT_IRQ; 167 } 168 169 if (info.device_type.Generation() >= 11 && (interrupt & GEN11_DE_HPD_IRQ) != 0) { 170 dprintf("gen8_handle_interrupts HPD\n"); 171 uint32 iir = read32(info, GEN11_DE_HPD_IIR); 172 if (iir != 0) { 173 dprintf("gen8_handle_interrupts HPD_IIR %" B_PRIx32 "\n", iir); 174 write32(info, GEN11_DE_HPD_IIR, iir); 175 } 176 interrupt &= ~GEN11_DE_HPD_IRQ; 177 } 178 179 if ((interrupt & GEN8_DE_PCH_IRQ) != 0) { 180 dprintf("gen8_handle_interrupts PCH\n"); 181 uint32 iir = read32(info, SDEIIR); 182 if (iir != 0) { 183 dprintf("gen8_handle_interrupts PCH_IIR %" B_PRIx32 "\n", iir); 184 write32(info, SDEIIR, iir); 185 if (info.shared_info->pch_info >= INTEL_PCH_ICP) { 186 uint32 ddiHotplug = read32(info, SHOTPLUG_CTL_DDI); 187 write32(info, SHOTPLUG_CTL_DDI, ddiHotplug); 188 dprintf("gen8_handle_interrupts PCH_IIR ddiHotplug %" B_PRIx32 "\n", ddiHotplug); 189 190 uint32 tcHotplug = read32(info, SHOTPLUG_CTL_TC); 191 write32(info, SHOTPLUG_CTL_TC, tcHotplug); 192 dprintf("gen8_handle_interrupts PCH_IIR tcHotplug %" B_PRIx32 "\n", tcHotplug); 193 } 194 } 195 interrupt &= ~GEN8_DE_PCH_IRQ; 196 } 197 198 interrupt &= ~PCH_MASTER_INT_CTL_GLOBAL_BDW; 199 if (interrupt != 0) 200 dprintf("gen8_handle_interrupts unhandled %" B_PRIx32 "\n", interrupt); 201 return handled; 202 } 203 204 205 206 /** Get the appropriate interrupt mask for enabling or testing interrupts on 207 * the given pipe. 208 * 209 * The bits to test or set are different depending on the hardware generation. 210 * 211 * \param info Intel_extreme driver information 212 * \param pipe pipe to use 213 * \param enable true to get the mask for enabling the interrupts, false to get 214 * the mask for testing them. 215 */ 216 static uint32 217 intel_get_interrupt_mask(intel_info& info, pipe_index pipe, bool enable) 218 { 219 uint32 mask = 0; 220 bool hasPCH = info.pch_info != INTEL_PCH_NONE; 221 222 // Intel changed the PCH register mapping between Sandy Bridge and the 223 // later generations (Ivy Bridge and up). 224 // The PCH register itself does not exist in pre-PCH platforms, and the 225 // previous interrupt register of course also had a different mapping. 226 227 if (pipe == INTEL_PIPE_A) { 228 if (info.device_type.InGroup(INTEL_GROUP_SNB) 229 || info.device_type.InGroup(INTEL_GROUP_ILK)) 230 mask |= PCH_INTERRUPT_VBLANK_PIPEA_SNB; 231 else if (hasPCH) 232 mask |= PCH_INTERRUPT_VBLANK_PIPEA; 233 else 234 mask |= INTERRUPT_VBLANK_PIPEA; 235 } 236 237 if (pipe == INTEL_PIPE_B) { 238 if (info.device_type.InGroup(INTEL_GROUP_SNB) 239 || info.device_type.InGroup(INTEL_GROUP_ILK)) 240 mask |= PCH_INTERRUPT_VBLANK_PIPEB_SNB; 241 else if (hasPCH) 242 mask |= PCH_INTERRUPT_VBLANK_PIPEB; 243 else 244 mask |= INTERRUPT_VBLANK_PIPEB; 245 } 246 247 #if 0 // FIXME enable when we support the 3rd pipe 248 if (pipe == INTEL_PIPE_C) { 249 // Older generations only had two pipes 250 if (hasPCH && info.device_type.Generation() > 6) 251 mask |= PCH_INTERRUPT_VBLANK_PIPEC; 252 } 253 #endif 254 255 // On SandyBridge, there is an extra "global enable" flag, which must also 256 // be set when enabling the interrupts (but not when testing for them). 257 if (enable && info.device_type.InFamily(INTEL_FAMILY_SER5)) 258 mask |= PCH_INTERRUPT_GLOBAL_SNB; 259 260 return mask; 261 } 262 263 264 static void 265 intel_enable_interrupts(intel_info& info, pipes which, bool enable) 266 { 267 uint32 finalMask = 0; 268 const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, true); 269 const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, true); 270 if (which.HasPipe(INTEL_PIPE_A)) 271 finalMask |= pipeAMask; 272 if (which.HasPipe(INTEL_PIPE_B)) 273 finalMask |= pipeBMask; 274 275 const uint32 value = enable ? finalMask : 0; 276 277 // Clear all the interrupts 278 write32(info, find_reg(info, INTEL_INTERRUPT_IDENTITY), ~0); 279 280 // enable interrupts - we only want VBLANK interrupts 281 write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), value); 282 write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~value); 283 } 284 285 286 static bool 287 intel_check_interrupt(intel_info& info, pipes& which) 288 { 289 which.ClearPipe(INTEL_PIPE_ANY); 290 const uint32 pipeAMask = intel_get_interrupt_mask(info, INTEL_PIPE_A, false); 291 const uint32 pipeBMask = intel_get_interrupt_mask(info, INTEL_PIPE_B, false); 292 const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY); 293 const uint32 interrupt = read32(info, regIdentity); 294 if ((interrupt & pipeAMask) != 0) 295 which.SetPipe(INTEL_PIPE_A); 296 if ((interrupt & pipeBMask) != 0) 297 which.SetPipe(INTEL_PIPE_B); 298 return which.HasPipe(INTEL_PIPE_ANY); 299 } 300 301 302 static void 303 g35_clear_interrupt_status(intel_info& info, pipe_index pipe) 304 { 305 // These registers do not exist on later GPUs. 306 if (info.device_type.Generation() > 4) 307 return; 308 309 const uint32 value = DISPLAY_PIPE_VBLANK_STATUS | DISPLAY_PIPE_VBLANK_ENABLED; 310 switch (pipe) { 311 case INTEL_PIPE_A: 312 write32(info, INTEL_DISPLAY_A_PIPE_STATUS, value); 313 break; 314 case INTEL_PIPE_B: 315 write32(info, INTEL_DISPLAY_B_PIPE_STATUS, value); 316 break; 317 default: 318 break; 319 } 320 } 321 322 323 static void 324 intel_clear_pipe_interrupt(intel_info& info, pipe_index pipe) 325 { 326 // On G35/G45, prior to clearing Display Pipe interrupt in IIR 327 // the corresponding interrupt status must first be cleared. 328 g35_clear_interrupt_status(info, pipe); 329 330 const uint32 regIdentity = find_reg(info, INTEL_INTERRUPT_IDENTITY); 331 const uint32 bit = intel_get_interrupt_mask(info, pipe, false); 332 const uint32 identity = read32(info, regIdentity); 333 write32(info, regIdentity, identity | bit); 334 } 335 336 337 /*! 338 Interrupt routine for Gen8 and Gen9. 339 See Gen12 Display Engine: Interrupt Service Routine chapter. 340 */ 341 static int32 342 gen8_interrupt_handler(void* data) 343 { 344 intel_info& info = *(intel_info*)data; 345 346 uint32 interrupt = gen8_enable_global_interrupts(info, false); 347 if (interrupt == 0) { 348 gen8_enable_global_interrupts(info, true); 349 return B_UNHANDLED_INTERRUPT; 350 } 351 352 int32 handled = gen8_handle_interrupts(info, interrupt); 353 354 gen8_enable_global_interrupts(info, true); 355 return handled; 356 } 357 358 359 /*! 360 Interrupt routine for Gen11. 361 See Gen12 Display Engine: Interrupt Service Routine chapter. 362 */ 363 static int32 364 gen11_interrupt_handler(void* data) 365 { 366 intel_info& info = *(intel_info*)data; 367 368 uint32 interrupt = gen11_enable_global_interrupts(info, false); 369 370 if (interrupt == 0) { 371 gen11_enable_global_interrupts(info, true); 372 return B_UNHANDLED_INTERRUPT; 373 } 374 375 int32 handled = B_HANDLED_INTERRUPT; 376 if ((interrupt & GEN11_DISPLAY_IRQ) != 0) 377 handled = gen8_handle_interrupts(info, read32(info, GEN11_DISPLAY_INT_CTL)); 378 379 gen11_enable_global_interrupts(info, true); 380 return handled; 381 } 382 383 384 static int32 385 intel_interrupt_handler(void* data) 386 { 387 intel_info &info = *(intel_info*)data; 388 389 pipes which; 390 bool shouldHandle = intel_check_interrupt(info, which); 391 392 if (!shouldHandle) 393 return B_UNHANDLED_INTERRUPT; 394 395 int32 handled = B_HANDLED_INTERRUPT; 396 397 while (shouldHandle) { 398 if (which.HasPipe(INTEL_PIPE_A)) { 399 handled = release_vblank_sem(info); 400 401 intel_clear_pipe_interrupt(info, INTEL_PIPE_A); 402 } 403 404 if (which.HasPipe(INTEL_PIPE_B)) { 405 handled = release_vblank_sem(info); 406 407 intel_clear_pipe_interrupt(info, INTEL_PIPE_B); 408 } 409 410 #if 0 411 // FIXME we don't have support for the 3rd pipe yet 412 if (which.HasPipe(INTEL_PIPE_C)) { 413 handled = release_vblank_sem(info); 414 415 intel_clear_pipe_interrupt(info, INTEL_PIPE_C); 416 } 417 #endif 418 419 shouldHandle = intel_check_interrupt(info, which); 420 } 421 422 return handled; 423 } 424 425 426 static void 427 init_interrupt_handler(intel_info &info) 428 { 429 info.shared_info->vblank_sem = create_sem(0, "intel extreme vblank"); 430 if (info.shared_info->vblank_sem < B_OK) 431 return; 432 433 status_t status = B_OK; 434 435 // We need to change the owner of the sem to the calling team (usually the 436 // app_server), because userland apps cannot acquire kernel semaphores 437 thread_id thread = find_thread(NULL); 438 thread_info threadInfo; 439 if (get_thread_info(thread, &threadInfo) != B_OK 440 || set_sem_owner(info.shared_info->vblank_sem, threadInfo.team) 441 != B_OK) { 442 status = B_ERROR; 443 } 444 445 // Find the right interrupt vector, using MSIs if available. 446 info.irq = 0xff; 447 info.use_msi = false; 448 if (info.pci->u.h0.interrupt_pin != 0x00) 449 info.irq = info.pci->u.h0.interrupt_line; 450 if (gPCIx86Module != NULL && gPCIx86Module->get_msi_count(info.pci->bus, 451 info.pci->device, info.pci->function) >= 1) { 452 uint8 msiVector = 0; 453 if (gPCIx86Module->configure_msi(info.pci->bus, info.pci->device, 454 info.pci->function, 1, &msiVector) == B_OK 455 && gPCIx86Module->enable_msi(info.pci->bus, info.pci->device, 456 info.pci->function) == B_OK) { 457 TRACE("using message signaled interrupts\n"); 458 info.irq = msiVector; 459 info.use_msi = true; 460 } 461 } 462 463 if (status == B_OK && info.irq != 0xff) { 464 // we've gotten an interrupt line for us to use 465 466 info.fake_interrupts = false; 467 468 if (info.device_type.Generation() >= 8) { 469 interrupt_handler handler = &gen8_interrupt_handler; 470 if (info.device_type.Generation() >= 11) 471 handler = &gen11_interrupt_handler; 472 status = install_io_interrupt_handler(info.irq, 473 handler, (void*)&info, 0); 474 if (status == B_OK) { 475 gen8_enable_interrupts(info, INTEL_PIPE_A, true); 476 gen8_enable_interrupts(info, INTEL_PIPE_B, true); 477 if (info.device_type.Generation() >= 11) 478 gen8_enable_interrupts(info, INTEL_PIPE_C, true); 479 gen8_enable_global_interrupts(info, true); 480 481 if (info.device_type.Generation() >= 11) { 482 if (info.shared_info->pch_info >= INTEL_PCH_ICP) { 483 read32(info, SDEIIR); 484 write32(info, SDEIER, 0xffffffff); 485 write32(info, SDEIMR, ~SDE_GMBUS_ICP); 486 read32(info, SDEIMR); 487 } 488 489 uint32 mask = GEN8_AUX_CHANNEL_A; 490 mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; 491 mask |= CNL_AUX_CHANNEL_F; 492 mask |= ICL_AUX_CHANNEL_E; 493 read32(info, GEN8_DE_PORT_IIR); 494 write32(info, GEN8_DE_PORT_IER, mask); 495 write32(info, GEN8_DE_PORT_IMR, ~mask); 496 read32(info, GEN8_DE_PORT_IMR); 497 498 read32(info, GEN8_DE_MISC_IIR); 499 write32(info, GEN8_DE_MISC_IER, GEN8_DE_EDP_PSR); 500 write32(info, GEN8_DE_MISC_IMR, ~GEN8_DE_EDP_PSR); 501 read32(info, GEN8_DE_MISC_IMR); 502 503 read32(info, GEN11_GU_MISC_IIR); 504 write32(info, GEN11_GU_MISC_IER, GEN11_GU_MISC_GSE); 505 write32(info, GEN11_GU_MISC_IMR, ~GEN11_GU_MISC_GSE); 506 read32(info, GEN11_GU_MISC_IMR); 507 508 read32(info, GEN11_DE_HPD_IIR); 509 write32(info, GEN11_DE_HPD_IER, 510 GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK); 511 write32(info, GEN11_DE_HPD_IMR, 0xffffffff); 512 read32(info, GEN11_DE_HPD_IMR); 513 514 write32(info, GEN11_TC_HOTPLUG_CTL, 0); 515 write32(info, GEN11_TBT_HOTPLUG_CTL, 0); 516 517 if (info.shared_info->pch_info >= INTEL_PCH_ICP) { 518 if (info.shared_info->pch_info <= INTEL_PCH_TGP) 519 write32(info, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 520 read32(info, SDEIMR); 521 write32(info, SDEIMR, 0x3f023f07); 522 read32(info, SDEIMR); 523 524 uint32 ctl = read32(info, SHOTPLUG_CTL_DDI); 525 // we enable everything, should come from the VBT 526 ctl |= SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) 527 | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) 528 | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) 529 | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D); 530 write32(info, SHOTPLUG_CTL_DDI, ctl); 531 ctl = read32(info, SHOTPLUG_CTL_TC); 532 // we enable everything, should come from the VBT 533 ctl |= SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC1) 534 | SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC2) 535 | SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC3) 536 | SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC4) 537 | SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC5) 538 | SHOTPLUG_CTL_TC_HPD_ENABLE(HPD_PORT_TC6); 539 write32(info, SHOTPLUG_CTL_TC, ctl); 540 } 541 542 gen11_enable_global_interrupts(info, true); 543 } 544 } 545 } else { 546 status = install_io_interrupt_handler(info.irq, 547 &intel_interrupt_handler, (void*)&info, 0); 548 if (status == B_OK) { 549 g35_clear_interrupt_status(info, INTEL_PIPE_A); 550 g35_clear_interrupt_status(info, INTEL_PIPE_B); 551 552 pipes which; 553 which.SetPipe(INTEL_PIPE_A); 554 which.SetPipe(INTEL_PIPE_B); 555 intel_enable_interrupts(info, which, true); 556 } 557 } 558 } 559 if (status < B_OK) { 560 // There is no interrupt reserved for us, or we couldn't install our 561 // interrupt handler, let's fake the vblank interrupt for our clients 562 // using a timer interrupt 563 info.fake_interrupts = true; 564 565 // TODO: fake interrupts! 566 ERROR("Fake interrupt mode (no PCI interrupt line assigned\n"); 567 status = B_ERROR; 568 } 569 570 if (status < B_OK) { 571 delete_sem(info.shared_info->vblank_sem); 572 info.shared_info->vblank_sem = B_ERROR; 573 } 574 } 575 576 577 // #pragma mark - 578 579 580 status_t 581 intel_free_memory(intel_info &info, addr_t base) 582 { 583 return gGART->free_memory(info.aperture, base); 584 } 585 586 587 status_t 588 intel_allocate_memory(intel_info &info, size_t size, size_t alignment, 589 uint32 flags, addr_t* _base, phys_addr_t* _physicalBase) 590 { 591 return gGART->allocate_memory(info.aperture, size, alignment, 592 flags, _base, _physicalBase); 593 } 594 595 596 status_t 597 intel_extreme_init(intel_info &info) 598 { 599 CALLED(); 600 info.aperture = gGART->map_aperture(info.pci->bus, info.pci->device, 601 info.pci->function, 0, &info.aperture_base); 602 if (info.aperture < B_OK) { 603 ERROR("error: could not map GART aperture! (%s)\n", 604 strerror(info.aperture)); 605 return info.aperture; 606 } 607 608 AreaKeeper sharedCreator; 609 info.shared_area = sharedCreator.Create("intel extreme shared info", 610 (void**)&info.shared_info, B_ANY_KERNEL_ADDRESS, 611 ROUND_TO_PAGE_SIZE(sizeof(intel_shared_info)) + 3 * B_PAGE_SIZE, 612 B_FULL_LOCK, 613 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA); 614 if (info.shared_area < B_OK) { 615 ERROR("error: could not create shared area!\n"); 616 gGART->unmap_aperture(info.aperture); 617 return info.shared_area; 618 } 619 620 // enable power 621 gPCI->set_powerstate(info.pci->bus, info.pci->device, info.pci->function, 622 PCI_pm_state_d0); 623 624 memset((void*)info.shared_info, 0, sizeof(intel_shared_info)); 625 626 int mmioIndex = 1; 627 if (info.device_type.Generation() >= 3) { 628 // For some reason Intel saw the need to change the order of the 629 // mappings with the introduction of the i9xx family 630 mmioIndex = 0; 631 } 632 633 // evaluate driver settings, if any 634 635 bool hardwareCursor; 636 read_settings(hardwareCursor); 637 638 // memory mapped I/O 639 640 // TODO: registers are mapped twice (by us and intel_gart), maybe we 641 // can share it between the drivers 642 643 phys_addr_t addr = info.pci->u.h0.base_registers[mmioIndex]; 644 uint64 barSize = info.pci->u.h0.base_register_sizes[mmioIndex]; 645 if ((info.pci->u.h0.base_register_flags[mmioIndex] & PCI_address_type) == PCI_address_type_64) { 646 addr |= (uint64)info.pci->u.h0.base_registers[mmioIndex + 1] << 32; 647 barSize |= (uint64)info.pci->u.h0.base_register_sizes[mmioIndex + 1] << 32; 648 } 649 AreaKeeper mmioMapper; 650 info.registers_area = mmioMapper.Map("intel extreme mmio", addr, barSize, 651 B_ANY_KERNEL_ADDRESS, 652 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_CLONEABLE_AREA, 653 (void**)&info.registers); 654 if (mmioMapper.InitCheck() < B_OK) { 655 ERROR("error: could not map memory I/O!\n"); 656 gGART->unmap_aperture(info.aperture); 657 return info.registers_area; 658 } 659 660 bool hasPCH = (info.pch_info != INTEL_PCH_NONE); 661 662 ERROR("Init Intel generation %d GPU %s PCH split.\n", 663 info.device_type.Generation(), hasPCH ? "with" : "without"); 664 665 uint32* blocks = info.shared_info->register_blocks; 666 blocks[REGISTER_BLOCK(REGS_FLAT)] = 0; 667 668 // setup the register blocks for the different architectures 669 if (hasPCH) { 670 // PCH based platforms (IronLake through ultra-low-power Broadwells) 671 blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)] 672 = PCH_NORTH_SHARED_REGISTER_BASE; 673 blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)] 674 = PCH_NORTH_PIPE_AND_PORT_REGISTER_BASE; 675 blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)] 676 = PCH_NORTH_PLANE_CONTROL_REGISTER_BASE; 677 blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] 678 = PCH_SOUTH_SHARED_REGISTER_BASE; 679 blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] 680 = PCH_SOUTH_TRANSCODER_AND_PORT_REGISTER_BASE; 681 } else { 682 // (G)MCH/ICH based platforms 683 blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)] 684 = MCH_SHARED_REGISTER_BASE; 685 blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)] 686 = MCH_PIPE_AND_PORT_REGISTER_BASE; 687 blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)] 688 = MCH_PLANE_CONTROL_REGISTER_BASE; 689 blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] 690 = ICH_SHARED_REGISTER_BASE; 691 blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] 692 = ICH_PORT_REGISTER_BASE; 693 } 694 695 // Everything in the display PRM gets +0x180000 696 if (info.device_type.InGroup(INTEL_GROUP_VLV)) { 697 // "I nearly got violent with the hw guys when they told me..." 698 blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)] += VLV_DISPLAY_BASE; 699 blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)] += VLV_DISPLAY_BASE; 700 } 701 702 TRACE("REGS_NORTH_SHARED: 0x%" B_PRIx32 "\n", 703 blocks[REGISTER_BLOCK(REGS_NORTH_SHARED)]); 704 TRACE("REGS_NORTH_PIPE_AND_PORT: 0x%" B_PRIx32 "\n", 705 blocks[REGISTER_BLOCK(REGS_NORTH_PIPE_AND_PORT)]); 706 TRACE("REGS_NORTH_PLANE_CONTROL: 0x%" B_PRIx32 "\n", 707 blocks[REGISTER_BLOCK(REGS_NORTH_PLANE_CONTROL)]); 708 TRACE("REGS_SOUTH_SHARED: 0x%" B_PRIx32 "\n", 709 blocks[REGISTER_BLOCK(REGS_SOUTH_SHARED)]); 710 TRACE("REGS_SOUTH_TRANSCODER_PORT: 0x%" B_PRIx32 "\n", 711 blocks[REGISTER_BLOCK(REGS_SOUTH_TRANSCODER_PORT)]); 712 713 // make sure bus master, memory-mapped I/O, and frame buffer is enabled 714 set_pci_config(info.pci, PCI_command, 2, get_pci_config(info.pci, 715 PCI_command, 2) | PCI_command_io | PCI_command_memory 716 | PCI_command_master); 717 718 // reserve ring buffer memory (currently, this memory is placed in 719 // the graphics memory), but this could bring us problems with 720 // write combining... 721 722 ring_buffer &primary = info.shared_info->primary_ring_buffer; 723 if (intel_allocate_memory(info, 16 * B_PAGE_SIZE, 0, 0, 724 (addr_t*)&primary.base) == B_OK) { 725 primary.register_base = INTEL_PRIMARY_RING_BUFFER; 726 primary.size = 16 * B_PAGE_SIZE; 727 primary.offset = (addr_t)primary.base - info.aperture_base; 728 } 729 730 // Enable clock gating 731 intel_en_gating(info); 732 733 // Enable automatic gpu downclocking if we can to save power 734 intel_en_downclock(info); 735 736 // no errors, so keep areas and mappings 737 sharedCreator.Detach(); 738 mmioMapper.Detach(); 739 740 aperture_info apertureInfo; 741 gGART->get_aperture_info(info.aperture, &apertureInfo); 742 743 info.shared_info->registers_area = info.registers_area; 744 info.shared_info->graphics_memory = (uint8*)info.aperture_base; 745 info.shared_info->physical_graphics_memory = apertureInfo.physical_base; 746 info.shared_info->graphics_memory_size = apertureInfo.size; 747 info.shared_info->frame_buffer = 0; 748 info.shared_info->dpms_mode = B_DPMS_ON; 749 info.shared_info->min_brightness = 2; 750 info.shared_info->internal_crt_support = true; 751 info.shared_info->pch_info = info.pch_info; 752 info.shared_info->device_type = info.device_type; 753 754 // Pull VBIOS info for later use 755 info.shared_info->got_vbt = parse_vbt_from_bios(info.shared_info); 756 757 /* at least 855gm can't drive more than one head at time */ 758 if (info.device_type.InFamily(INTEL_FAMILY_8xx)) 759 info.shared_info->single_head_locked = 1; 760 761 if (info.device_type.InFamily(INTEL_FAMILY_SER5)) { 762 info.shared_info->pll_info.reference_frequency = 120000;// 120 MHz 763 info.shared_info->pll_info.max_frequency = 350000; 764 // 350 MHz RAM DAC speed 765 info.shared_info->pll_info.min_frequency = 20000; // 20 MHz 766 } else if (info.device_type.InFamily(INTEL_FAMILY_9xx)) { 767 info.shared_info->pll_info.reference_frequency = 96000; // 96 MHz 768 info.shared_info->pll_info.max_frequency = 400000; 769 // 400 MHz RAM DAC speed 770 info.shared_info->pll_info.min_frequency = 20000; // 20 MHz 771 } else if (info.device_type.HasDDI() && (info.device_type.Generation() <= 8)) { 772 info.shared_info->pll_info.reference_frequency = 135000;// 135 MHz 773 info.shared_info->pll_info.max_frequency = 350000; 774 // 350 MHz RAM DAC speed 775 info.shared_info->pll_info.min_frequency = 25000; // 25 MHz 776 } else if ((info.device_type.Generation() >= 9) && 777 info.device_type.InGroup(INTEL_GROUP_SKY)) { 778 info.shared_info->pll_info.reference_frequency = 24000; // 24 MHz 779 info.shared_info->pll_info.max_frequency = 350000; 780 // 350 MHz RAM DAC speed 781 info.shared_info->pll_info.min_frequency = 25000; // 25 MHz 782 } else if (info.device_type.Generation() >= 9) { 783 uint32 refInfo = 784 (read32(info, ICL_DSSM) & ICL_DSSM_REF_FREQ_MASK) >> ICL_DSSM_REF_FREQ_SHIFT; 785 switch (refInfo) { 786 case ICL_DSSM_24000: 787 info.shared_info->pll_info.reference_frequency = 24000; // 24 MHz 788 break; 789 case ICL_DSSM_19200: 790 info.shared_info->pll_info.reference_frequency = 19200; // 19.2 MHz 791 break; 792 case ICL_DSSM_38400: 793 info.shared_info->pll_info.reference_frequency = 38400; // 38.4 MHz 794 break; 795 default: 796 ERROR("error: unknown ref. freq. strap, using 24Mhz! %" B_PRIx32 "\n", refInfo); 797 info.shared_info->pll_info.reference_frequency = 24000; // 24 MHz 798 break; 799 } 800 info.shared_info->pll_info.max_frequency = 350000; 801 // 350 MHz RAM DAC speed 802 info.shared_info->pll_info.min_frequency = 25000; // 25 MHz 803 } else { 804 info.shared_info->pll_info.reference_frequency = 48000; // 48 MHz 805 info.shared_info->pll_info.max_frequency = 350000; 806 // 350 MHz RAM DAC speed 807 info.shared_info->pll_info.min_frequency = 25000; // 25 MHz 808 } 809 810 info.shared_info->pll_info.divisor_register = INTEL_DISPLAY_A_PLL_DIVISOR_0; 811 812 #ifdef __HAIKU__ 813 strlcpy(info.shared_info->device_identifier, info.device_identifier, 814 sizeof(info.shared_info->device_identifier)); 815 #else 816 strcpy(info.shared_info->device_identifier, info.device_identifier); 817 #endif 818 819 // setup overlay registers 820 821 status_t status = intel_allocate_memory(info, B_PAGE_SIZE, 0, 822 intel_uses_physical_overlay(*info.shared_info) 823 ? B_APERTURE_NEED_PHYSICAL : 0, 824 (addr_t*)&info.overlay_registers, 825 &info.shared_info->physical_overlay_registers); 826 if (status == B_OK) { 827 info.shared_info->overlay_offset = (addr_t)info.overlay_registers 828 - info.aperture_base; 829 TRACE("Overlay registers mapped at 0x%" B_PRIx32 " = %p - %" 830 B_PRIxADDR " (%" B_PRIxPHYSADDR ")\n", 831 info.shared_info->overlay_offset, info.overlay_registers, 832 info.aperture_base, info.shared_info->physical_overlay_registers); 833 init_overlay_registers(info.overlay_registers); 834 } else { 835 ERROR("error: could not allocate overlay memory! %s\n", 836 strerror(status)); 837 } 838 839 // Allocate hardware status page and the cursor memory 840 TRACE("Allocating hardware status page"); 841 842 if (intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL, 843 (addr_t*)info.shared_info->status_page, 844 &info.shared_info->physical_status_page) == B_OK) { 845 // TODO: set status page 846 } 847 if (hardwareCursor) { 848 intel_allocate_memory(info, B_PAGE_SIZE, 0, B_APERTURE_NEED_PHYSICAL, 849 (addr_t*)&info.shared_info->cursor_memory, 850 &info.shared_info->physical_cursor_memory); 851 } 852 853 edid1_info* edidInfo = (edid1_info*)get_boot_item(VESA_EDID_BOOT_INFO, 854 NULL); 855 if (edidInfo != NULL) { 856 info.shared_info->has_vesa_edid_info = true; 857 memcpy(&info.shared_info->vesa_edid_info, edidInfo, sizeof(edid1_info)); 858 } 859 860 init_interrupt_handler(info); 861 862 if (hasPCH) { 863 if (info.device_type.Generation() == 5) { 864 info.shared_info->fdi_link_frequency = (read32(info, FDI_PLL_BIOS_0) 865 & FDI_PLL_FB_CLOCK_MASK) + 2; 866 info.shared_info->fdi_link_frequency *= 100; 867 } else { 868 info.shared_info->fdi_link_frequency = 2700; 869 } 870 if (info.shared_info->pch_info >= INTEL_PCH_CNP) { 871 // TODO read/write info.shared_info->hraw_clock 872 } else { 873 info.shared_info->hraw_clock = (read32(info, PCH_RAWCLK_FREQ) 874 & RAWCLK_FREQ_MASK) * 1000; 875 TRACE("%s: rawclk rate: %" B_PRIu32 " kHz\n", __func__, info.shared_info->hraw_clock); 876 } 877 } else { 878 // TODO read info.shared_info->hraw_clock 879 info.shared_info->fdi_link_frequency = 0; 880 } 881 882 if (info.device_type.InGroup(INTEL_GROUP_HAS)) { 883 uint32 lcpll = read32(info, LCPLL_CTL); 884 if ((lcpll & LCPLL_CD_SOURCE_FCLK) != 0) 885 info.shared_info->hw_cdclk = 800000; 886 else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) 887 info.shared_info->hw_cdclk = 450000; 888 /* ULT type is missing 889 else if (IS_ULT) 890 info.shared_info->hw_cdclk = 337500; 891 */ 892 else 893 info.shared_info->hw_cdclk = 540000; 894 } else if (info.device_type.InGroup(INTEL_GROUP_SNB) 895 || info.device_type.InGroup(INTEL_GROUP_IVB)) { 896 info.shared_info->hw_cdclk = 400000; 897 } else if (info.device_type.InGroup(INTEL_GROUP_ILK)) { 898 info.shared_info->hw_cdclk = 450000; 899 } 900 TRACE("%s: hw_cdclk: %" B_PRIu32 " kHz\n", __func__, info.shared_info->hw_cdclk); 901 902 TRACE("%s: completed successfully!\n", __func__); 903 return B_OK; 904 } 905 906 907 void 908 intel_extreme_uninit(intel_info &info) 909 { 910 CALLED(); 911 912 if (!info.fake_interrupts && info.shared_info->vblank_sem > 0) { 913 // disable interrupt generation 914 if (info.device_type.Generation() >= 8) { 915 if (info.device_type.Generation() >= 11) { 916 gen11_enable_global_interrupts(info, false); 917 } 918 gen8_enable_global_interrupts(info, false); 919 interrupt_handler handler = &gen8_interrupt_handler; 920 if (info.device_type.Generation() >= 11) 921 handler = &gen11_interrupt_handler; 922 remove_io_interrupt_handler(info.irq, handler, &info); 923 } else { 924 write32(info, find_reg(info, INTEL_INTERRUPT_ENABLED), 0); 925 write32(info, find_reg(info, INTEL_INTERRUPT_MASK), ~0); 926 remove_io_interrupt_handler(info.irq, intel_interrupt_handler, &info); 927 } 928 929 if (info.use_msi && gPCIx86Module != NULL) { 930 gPCIx86Module->disable_msi(info.pci->bus, 931 info.pci->device, info.pci->function); 932 gPCIx86Module->unconfigure_msi(info.pci->bus, 933 info.pci->device, info.pci->function); 934 } 935 } 936 937 gGART->unmap_aperture(info.aperture); 938 939 delete_area(info.registers_area); 940 delete_area(info.shared_area); 941 } 942 943