1 /* 2 * Copyright 2007, Hugo Santos. All Rights Reserved. 3 * Copyright 2004, Marcus Overhagen. All Rights Reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 extern "C" { 9 #include "device.h" 10 } 11 12 #include <stdlib.h> 13 14 #include <algorithm> 15 16 #include <arch/cpu.h> 17 18 extern "C" { 19 #include <compat/dev/pci/pcireg.h> 20 #include <compat/dev/pci/pcivar.h> 21 #include <compat/machine/resource.h> 22 #include <compat/sys/mutex.h> 23 #include <compat/machine/bus.h> 24 #include <compat/sys/rman.h> 25 #include <compat/sys/bus.h> 26 } 27 28 // private kernel header to get B_NO_HANDLED_INFO 29 #include <int.h> 30 31 #include <PCI_x86.h> 32 33 34 //#define DEBUG_BUS_SPACE_RW 35 #ifdef DEBUG_BUS_SPACE_RW 36 # define TRACE_BUS_SPACE_RW(x) driver_printf x 37 #else 38 # define TRACE_BUS_SPACE_RW(x) 39 #endif 40 41 //#define DEBUG_PCI 42 #ifdef DEBUG_PCI 43 # define TRACE_PCI(dev, format, args...) device_printf(dev, format , ##args) 44 #else 45 # define TRACE_PCI(dev, format, args...) do { } while (0) 46 #endif 47 48 49 #define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1)) 50 51 52 struct internal_intr { 53 device_t dev; 54 driver_filter_t filter; 55 driver_intr_t *handler; 56 void *arg; 57 int irq; 58 uint32 flags; 59 60 thread_id thread; 61 sem_id sem; 62 int32 handling; 63 }; 64 65 static int32 intr_wrapper(void *data); 66 67 68 static int 69 fls(int mask) 70 { 71 int bit; 72 if (mask == 0) 73 return (0); 74 for (bit = 1; mask != 1; bit++) 75 mask = (unsigned int)mask >> 1; 76 return (bit); 77 } 78 79 80 static area_id 81 map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection, 82 const char *name) 83 { 84 uint32 offset = _phy & (B_PAGE_SIZE - 1); 85 phys_addr_t physicalAddr = _phy - offset; 86 area_id area; 87 88 size = ROUNDUP(size + offset, B_PAGE_SIZE); 89 area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS, 90 protection, virtualAddr); 91 if (area < B_OK) 92 return area; 93 94 *virtualAddr = (uint8 *)(*virtualAddr) + offset; 95 96 return area; 97 } 98 99 100 static int 101 bus_alloc_irq_resource(device_t dev, struct resource *res) 102 { 103 uint8 irq = pci_read_config(dev, PCI_interrupt_line, 1); 104 if (irq == 0 || irq == 0xff) 105 return -1; 106 107 /* TODO: IRQ resources! */ 108 res->r_bustag = 0; 109 res->r_bushandle = irq; 110 111 return 0; 112 } 113 114 115 static int 116 bus_alloc_mem_resource(device_t dev, struct resource *res, int regid) 117 { 118 uint32 addr = pci_read_config(dev, regid, 4) & PCI_address_memory_32_mask; 119 uint32 size = 128 * 1024; /* XXX */ 120 void *virtualAddr; 121 122 res->r_mapped_area = map_mem(&virtualAddr, addr, size, 0, 123 "bus_alloc_resource(MEMORY)"); 124 if (res->r_mapped_area < B_OK) 125 return -1; 126 127 res->r_bustag = I386_BUS_SPACE_MEM; 128 res->r_bushandle = (bus_space_handle_t)virtualAddr; 129 return 0; 130 } 131 132 133 static int 134 bus_alloc_ioport_resource(device_t dev, struct resource *res, int regid) 135 { 136 res->r_bustag = I386_BUS_SPACE_IO; 137 res->r_bushandle = pci_read_config(dev, regid, 4) & PCI_address_io_mask; 138 return 0; 139 } 140 141 142 struct resource * 143 bus_alloc_resource(device_t dev, int type, int *rid, unsigned long start, 144 unsigned long end, unsigned long count, uint32 flags) 145 { 146 struct resource *res; 147 int result = -1; 148 149 if (type != SYS_RES_IRQ && type != SYS_RES_MEMORY 150 && type != SYS_RES_IOPORT) 151 return NULL; 152 153 device_printf(dev, "bus_alloc_resource(%i, [%i], 0x%lx, 0x%lx, 0x%lx," 154 "0x%" B_PRIx32 ")\n", type, *rid, start, end, count, flags); 155 156 // maybe a local array of resources is enough 157 res = (struct resource *)malloc(sizeof(struct resource)); 158 if (res == NULL) 159 return NULL; 160 161 if (type == SYS_RES_IRQ) { 162 if (*rid == 0) { 163 // pinned interrupt 164 result = bus_alloc_irq_resource(dev, res); 165 } else { 166 // msi or msi-x interrupt at index *rid - 1 167 pci_info *info; 168 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 169 res->r_bustag = 1; 170 res->r_bushandle = info->u.h0.interrupt_line + *rid - 1; 171 result = 0; 172 } 173 } else if (type == SYS_RES_MEMORY) 174 result = bus_alloc_mem_resource(dev, res, *rid); 175 else if (type == SYS_RES_IOPORT) 176 result = bus_alloc_ioport_resource(dev, res, *rid); 177 178 if (result < 0) { 179 free(res); 180 return NULL; 181 } 182 183 res->r_type = type; 184 return res; 185 } 186 187 188 int 189 bus_release_resource(device_t dev, int type, int rid, struct resource *res) 190 { 191 if (res->r_type != type) 192 panic("bus_release_resource: mismatch"); 193 194 if (type == SYS_RES_MEMORY) 195 delete_area(res->r_mapped_area); 196 197 free(res); 198 return 0; 199 } 200 201 202 int 203 bus_alloc_resources(device_t dev, struct resource_spec *resourceSpec, 204 struct resource **resources) 205 { 206 int i; 207 208 for (i = 0; resourceSpec[i].type != -1; i++) { 209 resources[i] = bus_alloc_resource_any(dev, 210 resourceSpec[i].type, &resourceSpec[i].rid, resourceSpec[i].flags); 211 if (resources[i] == NULL 212 && (resourceSpec[i].flags & RF_OPTIONAL) == 0) { 213 for (++i; resourceSpec[i].type != -1; i++) { 214 resources[i] = NULL; 215 } 216 217 bus_release_resources(dev, resourceSpec, resources); 218 return ENXIO; 219 } 220 } 221 return 0; 222 } 223 224 225 void 226 bus_release_resources(device_t dev, const struct resource_spec *resourceSpec, 227 struct resource **resources) 228 { 229 int i; 230 231 for (i = 0; resourceSpec[i].type != -1; i++) { 232 if (resources[i] == NULL) 233 continue; 234 235 bus_release_resource(dev, resourceSpec[i].type, resourceSpec[i].rid, 236 resources[i]); 237 resources[i] = NULL; 238 } 239 } 240 241 242 bus_space_handle_t 243 rman_get_bushandle(struct resource *res) 244 { 245 return res->r_bushandle; 246 } 247 248 249 bus_space_tag_t 250 rman_get_bustag(struct resource *res) 251 { 252 return res->r_bustag; 253 } 254 255 256 int 257 rman_get_rid(struct resource *res) 258 { 259 return 0; 260 } 261 262 263 // #pragma mark - Interrupt handling 264 265 266 static int32 267 intr_wrapper(void *data) 268 { 269 struct internal_intr *intr = (struct internal_intr *)data; 270 271 //device_printf(intr->dev, "in interrupt handler.\n"); 272 273 if (!HAIKU_CHECK_DISABLE_INTERRUPTS(intr->dev)) 274 return B_UNHANDLED_INTERRUPT; 275 276 release_sem_etc(intr->sem, 1, B_DO_NOT_RESCHEDULE); 277 return intr->handling ? B_HANDLED_INTERRUPT : B_INVOKE_SCHEDULER; 278 } 279 280 281 static int32 282 intr_fast_wrapper(void *data) 283 { 284 struct internal_intr *intr = (struct internal_intr *)data; 285 286 intr->handler(intr->arg); 287 288 // We don't know if the interrupt has been handled. 289 return B_UNHANDLED_INTERRUPT; 290 } 291 292 293 static int32 294 intr_handler(void *data) 295 { 296 struct internal_intr *intr = (struct internal_intr *)data; 297 status_t status; 298 299 while (1) { 300 status = acquire_sem(intr->sem); 301 if (status < B_OK) 302 break; 303 304 //device_printf(intr->dev, "in soft interrupt handler.\n"); 305 306 atomic_or(&intr->handling, 1); 307 intr->handler(intr->arg); 308 atomic_and(&intr->handling, 0); 309 HAIKU_REENABLE_INTERRUPTS(intr->dev); 310 } 311 312 return 0; 313 } 314 315 316 static void 317 free_internal_intr(struct internal_intr *intr) 318 { 319 if (intr->sem >= B_OK) { 320 status_t status; 321 delete_sem(intr->sem); 322 wait_for_thread(intr->thread, &status); 323 } 324 325 free(intr); 326 } 327 328 329 int 330 bus_setup_intr(device_t dev, struct resource *res, int flags, 331 driver_filter_t filter, driver_intr_t handler, void *arg, void **_cookie) 332 { 333 /* TODO check MPSAFE etc */ 334 335 struct internal_intr *intr = (struct internal_intr *)malloc( 336 sizeof(struct internal_intr)); 337 char semName[64]; 338 status_t status; 339 340 if (intr == NULL) 341 return B_NO_MEMORY; 342 343 intr->dev = dev; 344 intr->filter = filter; 345 intr->handler = handler; 346 intr->arg = arg; 347 intr->irq = res->r_bushandle; 348 intr->flags = flags; 349 intr->sem = -1; 350 intr->thread = -1; 351 352 if (filter != NULL) { 353 status = install_io_interrupt_handler(intr->irq, 354 (interrupt_handler)intr->filter, intr->arg, 0); 355 } else if ((flags & INTR_FAST) != 0) { 356 status = install_io_interrupt_handler(intr->irq, 357 intr_fast_wrapper, intr, B_NO_HANDLED_INFO); 358 } else { 359 snprintf(semName, sizeof(semName), "%s intr", dev->device_name); 360 361 intr->sem = create_sem(0, semName); 362 if (intr->sem < B_OK) { 363 free(intr); 364 return B_NO_MEMORY; 365 } 366 367 snprintf(semName, sizeof(semName), "%s intr handler", dev->device_name); 368 369 intr->thread = spawn_kernel_thread(intr_handler, semName, 370 B_REAL_TIME_DISPLAY_PRIORITY, intr); 371 if (intr->thread < B_OK) { 372 delete_sem(intr->sem); 373 free(intr); 374 return B_NO_MEMORY; 375 } 376 377 status = install_io_interrupt_handler(intr->irq, 378 intr_wrapper, intr, B_NO_HANDLED_INFO); 379 } 380 381 if (status == B_OK && res->r_bustag == 1 && gPCIx86 != NULL) { 382 // this is an msi, enable it 383 pci_info *info 384 = &((struct root_device_softc *)dev->root->softc)->pci_info; 385 if (((struct root_device_softc *)dev->root->softc)->is_msi) { 386 if (gPCIx86->enable_msi(info->bus, info->device, 387 info->function) != B_OK) { 388 device_printf(dev, "enabling msi failed\n"); 389 bus_teardown_intr(dev, res, intr); 390 return ENODEV; 391 } 392 } else if (((struct root_device_softc *)dev->root->softc)->is_msix) { 393 if (gPCIx86->enable_msix(info->bus, info->device, 394 info->function) != B_OK) { 395 device_printf(dev, "enabling msix failed\n"); 396 bus_teardown_intr(dev, res, intr); 397 return ENODEV; 398 } 399 } 400 } 401 402 if (status < B_OK) { 403 free_internal_intr(intr); 404 return status; 405 } 406 407 resume_thread(intr->thread); 408 409 *_cookie = intr; 410 return 0; 411 } 412 413 414 int 415 bus_teardown_intr(device_t dev, struct resource *res, void *arg) 416 { 417 struct internal_intr *intr = (struct internal_intr *)arg; 418 struct root_device_softc *root = (struct root_device_softc *)dev->root->softc; 419 420 if ((root->is_msi || root->is_msix) && gPCIx86 != NULL) { 421 // disable msi generation 422 pci_info *info = &root->pci_info; 423 gPCIx86->disable_msi(info->bus, info->device, info->function); 424 } 425 426 if (intr->filter != NULL) { 427 remove_io_interrupt_handler(intr->irq, (interrupt_handler)intr->filter, 428 intr->arg); 429 } else if (intr->flags & INTR_FAST) { 430 remove_io_interrupt_handler(intr->irq, intr_fast_wrapper, intr); 431 } else { 432 remove_io_interrupt_handler(intr->irq, intr_wrapper, intr); 433 } 434 435 free_internal_intr(intr); 436 return 0; 437 } 438 439 440 // #pragma mark - bus functions 441 442 443 bus_dma_tag_t 444 bus_get_dma_tag(device_t dev) 445 { 446 return NULL; 447 } 448 449 450 int 451 bus_generic_suspend(device_t dev) 452 { 453 UNIMPLEMENTED(); 454 return B_ERROR; 455 } 456 457 458 int 459 bus_generic_resume(device_t dev) 460 { 461 UNIMPLEMENTED(); 462 return B_ERROR; 463 } 464 465 466 void 467 bus_generic_shutdown(device_t dev) 468 { 469 UNIMPLEMENTED(); 470 } 471 472 473 int 474 bus_print_child_header(device_t dev, device_t child) 475 { 476 UNIMPLEMENTED(); 477 return B_ERROR; 478 } 479 480 481 int 482 bus_print_child_footer(device_t dev, device_t child) 483 { 484 UNIMPLEMENTED(); 485 return B_ERROR; 486 } 487 488 489 int 490 bus_generic_print_child(device_t dev, device_t child) 491 { 492 UNIMPLEMENTED(); 493 return B_ERROR; 494 } 495 496 497 void 498 bus_generic_driver_added(device_t dev, driver_t *driver) 499 { 500 UNIMPLEMENTED(); 501 } 502 503 504 int 505 bus_child_present(device_t child) 506 { 507 device_t parent = device_get_parent(child); 508 if (parent == NULL) 509 return 0; 510 511 return bus_child_present(parent); 512 } 513 514 515 // #pragma mark - PCI functions 516 517 518 uint32_t 519 pci_read_config(device_t dev, int offset, int size) 520 { 521 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 522 523 uint32_t value = gPci->read_pci_config(info->bus, info->device, 524 info->function, offset, size); 525 TRACE_PCI(dev, "pci_read_config(%i, %i) = 0x%x\n", offset, size, value); 526 return value; 527 } 528 529 530 void 531 pci_write_config(device_t dev, int offset, uint32_t value, int size) 532 { 533 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 534 535 TRACE_PCI(dev, "pci_write_config(%i, 0x%x, %i)\n", offset, value, size); 536 537 gPci->write_pci_config(info->bus, info->device, info->function, offset, 538 size, value); 539 } 540 541 542 uint16_t 543 pci_get_vendor(device_t dev) 544 { 545 return pci_read_config(dev, PCI_vendor_id, 2); 546 } 547 548 549 uint16_t 550 pci_get_device(device_t dev) 551 { 552 return pci_read_config(dev, PCI_device_id, 2); 553 } 554 555 556 uint16_t 557 pci_get_subvendor(device_t dev) 558 { 559 return pci_read_config(dev, PCI_subsystem_vendor_id, 2); 560 } 561 562 563 uint16_t 564 pci_get_subdevice(device_t dev) 565 { 566 return pci_read_config(dev, PCI_subsystem_id, 2); 567 } 568 569 570 uint8_t 571 pci_get_revid(device_t dev) 572 { 573 return pci_read_config(dev, PCI_revision, 1); 574 } 575 576 577 uint32_t 578 pci_get_domain(device_t dev) 579 { 580 return 0; 581 } 582 583 uint32_t 584 pci_get_devid(device_t dev) 585 { 586 return pci_read_config(dev, PCI_device_id, 2) << 16 | 587 pci_read_config(dev, PCI_vendor_id, 2); 588 } 589 590 uint8_t 591 pci_get_cachelnsz(device_t dev) 592 { 593 return pci_read_config(dev, PCI_line_size, 1); 594 } 595 596 uint8_t * 597 pci_get_ether(device_t dev) 598 { 599 /* used in if_dc to get the MAC from CardBus CIS for Xircom card */ 600 return NULL; /* NULL is handled in the caller correctly */ 601 } 602 603 uint8_t 604 pci_get_bus(device_t dev) 605 { 606 pci_info *info 607 = &((struct root_device_softc *)dev->root->softc)->pci_info; 608 return info->bus; 609 } 610 611 612 uint8_t 613 pci_get_slot(device_t dev) 614 { 615 pci_info *info 616 = &((struct root_device_softc *)dev->root->softc)->pci_info; 617 return info->device; 618 } 619 620 621 uint8_t 622 pci_get_function(device_t dev) 623 { 624 pci_info *info 625 = &((struct root_device_softc *)dev->root->softc)->pci_info; 626 return info->function; 627 } 628 629 630 device_t 631 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 632 { 633 // We don't support that yet - if we want to support the multi port 634 // feature of the Broadcom BCM 570x driver, we would have to change 635 // that. 636 return NULL; 637 } 638 639 640 static void 641 pci_set_command_bit(device_t dev, uint16_t bit) 642 { 643 uint16_t command = pci_read_config(dev, PCI_command, 2); 644 pci_write_config(dev, PCI_command, command | bit, 2); 645 } 646 647 648 int 649 pci_enable_busmaster(device_t dev) 650 { 651 pci_set_command_bit(dev, PCI_command_master); 652 return 0; 653 } 654 655 656 int 657 pci_enable_io(device_t dev, int space) 658 { 659 /* adapted from FreeBSD's pci_enable_io_method */ 660 int bit = 0; 661 662 switch (space) { 663 case SYS_RES_IOPORT: 664 bit = PCI_command_io; 665 break; 666 case SYS_RES_MEMORY: 667 bit = PCI_command_memory; 668 break; 669 default: 670 return EINVAL; 671 } 672 673 pci_set_command_bit(dev, bit); 674 if (pci_read_config(dev, PCI_command, 2) & bit) 675 return 0; 676 677 device_printf(dev, "pci_enable_io(%d) failed.\n", space); 678 679 return ENXIO; 680 } 681 682 683 int 684 pci_find_cap(device_t dev, int capability, int *capreg) 685 { 686 return pci_find_extcap(dev, capability, capreg); 687 } 688 689 690 int 691 pci_find_extcap(device_t child, int capability, int *_capabilityRegister) 692 { 693 uint8 capabilityPointer; 694 uint8 headerType; 695 uint16 status; 696 697 status = pci_read_config(child, PCIR_STATUS, 2); 698 if ((status & PCIM_STATUS_CAPPRESENT) == 0) 699 return ENXIO; 700 701 headerType = pci_read_config(child, PCI_header_type, 1); 702 switch (headerType & PCIM_HDRTYPE) { 703 case 0: 704 case 1: 705 capabilityPointer = PCIR_CAP_PTR; 706 break; 707 case 2: 708 capabilityPointer = PCIR_CAP_PTR_2; 709 break; 710 default: 711 return ENXIO; 712 } 713 capabilityPointer = pci_read_config(child, capabilityPointer, 1); 714 715 while (capabilityPointer != 0) { 716 if (pci_read_config(child, capabilityPointer + PCICAP_ID, 1) 717 == capability) { 718 if (_capabilityRegister != NULL) 719 *_capabilityRegister = capabilityPointer; 720 return 0; 721 } 722 capabilityPointer = pci_read_config(child, 723 capabilityPointer + PCICAP_NEXTPTR, 1); 724 } 725 726 return ENOENT; 727 } 728 729 730 int 731 pci_msi_count(device_t dev) 732 { 733 pci_info *info; 734 if (gPCIx86 == NULL) 735 return 0; 736 737 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 738 return gPCIx86->get_msi_count(info->bus, info->device, info->function); 739 } 740 741 742 int 743 pci_alloc_msi(device_t dev, int *count) 744 { 745 pci_info *info; 746 uint8 startVector = 0; 747 if (gPCIx86 == NULL) 748 return ENODEV; 749 750 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 751 752 if (gPCIx86->configure_msi(info->bus, info->device, info->function, *count, 753 &startVector) != B_OK) { 754 return ENODEV; 755 } 756 757 ((struct root_device_softc *)dev->root->softc)->is_msi = true; 758 info->u.h0.interrupt_line = startVector; 759 return EOK; 760 } 761 762 763 int 764 pci_release_msi(device_t dev) 765 { 766 pci_info *info; 767 if (gPCIx86 == NULL) 768 return ENODEV; 769 770 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 771 gPCIx86->unconfigure_msi(info->bus, info->device, info->function); 772 ((struct root_device_softc *)dev->root->softc)->is_msi = false; 773 ((struct root_device_softc *)dev->root->softc)->is_msix = false; 774 return EOK; 775 } 776 777 778 int 779 pci_msix_count(device_t dev) 780 { 781 pci_info *info; 782 if (gPCIx86 == NULL) 783 return 0; 784 785 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 786 return gPCIx86->get_msix_count(info->bus, info->device, info->function); 787 } 788 789 790 int 791 pci_alloc_msix(device_t dev, int *count) 792 { 793 pci_info *info; 794 uint8 startVector = 0; 795 if (gPCIx86 == NULL) 796 return ENODEV; 797 798 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 799 800 if (gPCIx86->configure_msix(info->bus, info->device, info->function, *count, 801 &startVector) != B_OK) { 802 return ENODEV; 803 } 804 805 ((struct root_device_softc *)dev->root->softc)->is_msix = true; 806 info->u.h0.interrupt_line = startVector; 807 return EOK; 808 } 809 810 811 int 812 pci_get_max_read_req(device_t dev) 813 { 814 int cap; 815 uint16_t val; 816 817 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 818 return (0); 819 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 820 val &= PCIM_EXP_CTL_MAX_READ_REQUEST; 821 val >>= 12; 822 return (1 << (val + 7)); 823 } 824 825 826 int 827 pci_set_max_read_req(device_t dev, int size) 828 { 829 int cap; 830 uint16_t val; 831 832 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 833 return (0); 834 if (size < 128) 835 size = 128; 836 if (size > 4096) 837 size = 4096; 838 size = (1 << (fls(size) - 1)); 839 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 840 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 841 val |= (fls(size) - 8) << 12; 842 pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2); 843 return (size); 844 } 845 846 847 int 848 pci_get_powerstate(device_t dev) 849 { 850 int capabilityRegister; 851 uint16 status; 852 int powerState = PCI_POWERSTATE_D0; 853 854 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 855 return powerState; 856 857 status = pci_read_config(dev, capabilityRegister + PCIR_POWER_STATUS, 2); 858 switch (status & PCI_pm_mask) { 859 case PCI_pm_state_d0: 860 break; 861 case PCI_pm_state_d1: 862 powerState = PCI_POWERSTATE_D1; 863 break; 864 case PCI_pm_state_d2: 865 powerState = PCI_POWERSTATE_D2; 866 break; 867 case PCI_pm_state_d3: 868 powerState = PCI_POWERSTATE_D3; 869 break; 870 default: 871 powerState = PCI_POWERSTATE_UNKNOWN; 872 break; 873 } 874 875 TRACE_PCI(dev, "%s: D%i\n", __func__, powerState); 876 return powerState; 877 } 878 879 880 int 881 pci_set_powerstate(device_t dev, int newPowerState) 882 { 883 int capabilityRegister; 884 int oldPowerState; 885 uint8 currentPowerManagementStatus; 886 uint8 newPowerManagementStatus; 887 uint16 powerManagementCapabilities; 888 bigtime_t stateTransitionDelayInUs = 0; 889 890 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 891 return EOPNOTSUPP; 892 893 oldPowerState = pci_get_powerstate(dev); 894 if (oldPowerState == newPowerState) 895 return EOK; 896 897 switch (std::max(oldPowerState, newPowerState)) { 898 case PCI_POWERSTATE_D2: 899 stateTransitionDelayInUs = 200; 900 break; 901 case PCI_POWERSTATE_D3: 902 stateTransitionDelayInUs = 10000; 903 break; 904 } 905 906 currentPowerManagementStatus = pci_read_config(dev, capabilityRegister 907 + PCIR_POWER_STATUS, 2); 908 newPowerManagementStatus = currentPowerManagementStatus & ~PCI_pm_mask; 909 powerManagementCapabilities = pci_read_config(dev, capabilityRegister 910 + PCIR_POWER_CAP, 2); 911 912 switch (newPowerState) { 913 case PCI_POWERSTATE_D0: 914 newPowerManagementStatus |= PCIM_PSTAT_D0; 915 break; 916 case PCI_POWERSTATE_D1: 917 if ((powerManagementCapabilities & PCI_pm_d1supp) == 0) 918 return EOPNOTSUPP; 919 newPowerManagementStatus |= PCIM_PSTAT_D1; 920 break; 921 case PCI_POWERSTATE_D2: 922 if ((powerManagementCapabilities & PCI_pm_d2supp) == 0) 923 return EOPNOTSUPP; 924 newPowerManagementStatus |= PCIM_PSTAT_D2; 925 break; 926 case PCI_POWERSTATE_D3: 927 newPowerManagementStatus |= PCIM_PSTAT_D3; 928 break; 929 default: 930 return EINVAL; 931 } 932 933 TRACE_PCI(dev, "%s: D%i -> D%i\n", __func__, oldPowerState, newPowerState); 934 pci_write_config(dev, capabilityRegister + PCIR_POWER_STATUS, newPowerState, 935 2); 936 if (stateTransitionDelayInUs != 0) 937 snooze(stateTransitionDelayInUs); 938 939 return EOK; 940 } 941