1 /* 2 * Copyright 2007, Hugo Santos. All Rights Reserved. 3 * Copyright 2004, Marcus Overhagen. All Rights Reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 extern "C" { 9 #include "device.h" 10 } 11 12 #include <stdlib.h> 13 14 #include <algorithm> 15 16 #include <arch/cpu.h> 17 18 extern "C" { 19 #include <compat/dev/pci/pcireg.h> 20 #include <compat/dev/pci/pcivar.h> 21 #include <compat/machine/resource.h> 22 #include <compat/sys/mutex.h> 23 #include <compat/machine/bus.h> 24 #include <compat/sys/rman.h> 25 #include <compat/sys/bus.h> 26 } 27 28 // private kernel header to get B_NO_HANDLED_INFO 29 #include <int.h> 30 31 #include <PCI_x86.h> 32 33 34 //#define DEBUG_BUS_SPACE_RW 35 #ifdef DEBUG_BUS_SPACE_RW 36 # define TRACE_BUS_SPACE_RW(x) driver_printf x 37 #else 38 # define TRACE_BUS_SPACE_RW(x) 39 #endif 40 41 //#define DEBUG_PCI 42 #ifdef DEBUG_PCI 43 # define TRACE_PCI(dev, format, args...) device_printf(dev, format , ##args) 44 #else 45 # define TRACE_PCI(dev, format, args...) do { } while (0) 46 #endif 47 48 49 struct internal_intr { 50 device_t dev; 51 driver_filter_t* filter; 52 driver_intr_t *handler; 53 void *arg; 54 int irq; 55 uint32 flags; 56 57 thread_id thread; 58 sem_id sem; 59 int32 handling; 60 }; 61 62 static int32 intr_wrapper(void *data); 63 64 65 static int 66 fls(int mask) 67 { 68 int bit; 69 if (mask == 0) 70 return (0); 71 for (bit = 1; mask != 1; bit++) 72 mask = (unsigned int)mask >> 1; 73 return (bit); 74 } 75 76 77 static area_id 78 map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection, 79 const char *name) 80 { 81 uint32 offset = _phy & (B_PAGE_SIZE - 1); 82 phys_addr_t physicalAddr = _phy - offset; 83 area_id area; 84 85 size = roundup(size + offset, B_PAGE_SIZE); 86 area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS, 87 protection, virtualAddr); 88 if (area < B_OK) 89 return area; 90 91 *virtualAddr = (uint8 *)(*virtualAddr) + offset; 92 93 return area; 94 } 95 96 97 static int 98 bus_alloc_irq_resource(device_t dev, struct resource *res) 99 { 100 uint8 irq = pci_read_config(dev, PCI_interrupt_line, 1); 101 if (irq == 0 || irq == 0xff) 102 return -1; 103 104 /* TODO: IRQ resources! */ 105 res->r_bustag = 0; 106 res->r_bushandle = irq; 107 108 return 0; 109 } 110 111 112 static int 113 bus_alloc_mem_resource(device_t dev, struct resource *res, int regid) 114 { 115 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 116 117 // check the offset really is of a BAR 118 if (regid < PCI_base_registers || (regid % sizeof(uint32) != 0) 119 || (regid >= PCI_base_registers + 6 * (int)sizeof(uint32))) 120 return -1; 121 122 // turn offset into array index 123 regid -= PCI_base_registers; 124 regid /= sizeof(uint32); 125 126 uint32 addr = info->u.h0.base_registers[regid]; 127 uint32 size = info->u.h0.base_register_sizes[regid]; 128 uchar flags = info->u.h0.base_register_flags[regid]; 129 130 // reject empty regions 131 if (size == 0) 132 return -1; 133 134 // reject I/O space 135 if (flags & PCI_address_space) 136 return -1; 137 138 // TODO: check flags & PCI_address_prefetchable ? 139 140 // enable this I/O resource 141 if (pci_enable_io(dev, SYS_RES_MEMORY) != 0) 142 return -1; 143 144 void *virtualAddr; 145 146 res->r_mapped_area = map_mem(&virtualAddr, addr, size, 0, 147 "bus_alloc_resource(MEMORY)"); 148 if (res->r_mapped_area < B_OK) 149 return -1; 150 151 res->r_bustag = X86_BUS_SPACE_MEM; 152 res->r_bushandle = (bus_space_handle_t)virtualAddr; 153 return 0; 154 } 155 156 157 static int 158 bus_alloc_ioport_resource(device_t dev, struct resource *res, int regid) 159 { 160 // enable this I/O resource 161 if (pci_enable_io(dev, SYS_RES_IOPORT) != 0) 162 return -1; 163 164 res->r_bustag = X86_BUS_SPACE_IO; 165 res->r_bushandle = pci_read_config(dev, regid, 4) & PCI_address_io_mask; 166 return 0; 167 } 168 169 170 struct resource * 171 bus_alloc_resource(device_t dev, int type, int *rid, unsigned long start, 172 unsigned long end, unsigned long count, uint32 flags) 173 { 174 struct resource *res; 175 int result = -1; 176 177 if (type != SYS_RES_IRQ && type != SYS_RES_MEMORY 178 && type != SYS_RES_IOPORT) 179 return NULL; 180 181 device_printf(dev, "bus_alloc_resource(%i, [%i], 0x%lx, 0x%lx, 0x%lx," 182 "0x%" B_PRIx32 ")\n", type, *rid, start, end, count, flags); 183 184 // maybe a local array of resources is enough 185 res = (struct resource *)malloc(sizeof(struct resource)); 186 if (res == NULL) 187 return NULL; 188 189 if (type == SYS_RES_IRQ) { 190 if (*rid == 0) { 191 // pinned interrupt 192 result = bus_alloc_irq_resource(dev, res); 193 } else { 194 // msi or msi-x interrupt at index *rid - 1 195 pci_info *info; 196 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 197 res->r_bustag = 1; 198 res->r_bushandle = info->u.h0.interrupt_line + *rid - 1; 199 result = 0; 200 } 201 } else if (type == SYS_RES_MEMORY) 202 result = bus_alloc_mem_resource(dev, res, *rid); 203 else if (type == SYS_RES_IOPORT) 204 result = bus_alloc_ioport_resource(dev, res, *rid); 205 206 if (result < 0) { 207 free(res); 208 return NULL; 209 } 210 211 res->r_type = type; 212 return res; 213 } 214 215 216 int 217 bus_release_resource(device_t dev, int type, int rid, struct resource *res) 218 { 219 if (res->r_type != type) 220 panic("bus_release_resource: mismatch"); 221 222 if (type == SYS_RES_MEMORY) 223 delete_area(res->r_mapped_area); 224 225 free(res); 226 return 0; 227 } 228 229 230 int 231 bus_alloc_resources(device_t dev, struct resource_spec *resourceSpec, 232 struct resource **resources) 233 { 234 int i; 235 236 for (i = 0; resourceSpec[i].type != -1; i++) { 237 resources[i] = bus_alloc_resource_any(dev, 238 resourceSpec[i].type, &resourceSpec[i].rid, resourceSpec[i].flags); 239 if (resources[i] == NULL 240 && (resourceSpec[i].flags & RF_OPTIONAL) == 0) { 241 for (++i; resourceSpec[i].type != -1; i++) { 242 resources[i] = NULL; 243 } 244 245 bus_release_resources(dev, resourceSpec, resources); 246 return ENXIO; 247 } 248 } 249 return 0; 250 } 251 252 253 void 254 bus_release_resources(device_t dev, const struct resource_spec *resourceSpec, 255 struct resource **resources) 256 { 257 int i; 258 259 for (i = 0; resourceSpec[i].type != -1; i++) { 260 if (resources[i] == NULL) 261 continue; 262 263 bus_release_resource(dev, resourceSpec[i].type, resourceSpec[i].rid, 264 resources[i]); 265 resources[i] = NULL; 266 } 267 } 268 269 270 bus_space_handle_t 271 rman_get_bushandle(struct resource *res) 272 { 273 return res->r_bushandle; 274 } 275 276 277 bus_space_tag_t 278 rman_get_bustag(struct resource *res) 279 { 280 return res->r_bustag; 281 } 282 283 284 int 285 rman_get_rid(struct resource *res) 286 { 287 return 0; 288 } 289 290 291 void* 292 rman_get_virtual(struct resource *res) 293 { 294 return NULL; 295 } 296 297 298 // #pragma mark - Interrupt handling 299 300 301 static int32 302 intr_wrapper(void *data) 303 { 304 struct internal_intr *intr = (struct internal_intr *)data; 305 306 //device_printf(intr->dev, "in interrupt handler.\n"); 307 308 if (!HAIKU_CHECK_DISABLE_INTERRUPTS(intr->dev)) 309 return B_UNHANDLED_INTERRUPT; 310 311 release_sem_etc(intr->sem, 1, B_DO_NOT_RESCHEDULE); 312 return intr->handling ? B_HANDLED_INTERRUPT : B_INVOKE_SCHEDULER; 313 } 314 315 316 static int32 317 intr_handler(void *data) 318 { 319 struct internal_intr *intr = (struct internal_intr *)data; 320 status_t status; 321 322 while (1) { 323 status = acquire_sem(intr->sem); 324 if (status < B_OK) 325 break; 326 327 //device_printf(intr->dev, "in soft interrupt handler.\n"); 328 329 atomic_or(&intr->handling, 1); 330 intr->handler(intr->arg); 331 atomic_and(&intr->handling, 0); 332 HAIKU_REENABLE_INTERRUPTS(intr->dev); 333 } 334 335 return 0; 336 } 337 338 339 static void 340 free_internal_intr(struct internal_intr *intr) 341 { 342 if (intr->sem >= B_OK) { 343 status_t status; 344 delete_sem(intr->sem); 345 wait_for_thread(intr->thread, &status); 346 } 347 348 free(intr); 349 } 350 351 352 int 353 bus_setup_intr(device_t dev, struct resource *res, int flags, 354 driver_filter_t* filter, driver_intr_t handler, void *arg, void **_cookie) 355 { 356 /* TODO check MPSAFE etc */ 357 358 struct internal_intr *intr = (struct internal_intr *)malloc( 359 sizeof(struct internal_intr)); 360 char semName[64]; 361 status_t status; 362 363 if (intr == NULL) 364 return B_NO_MEMORY; 365 366 intr->dev = dev; 367 intr->filter = filter; 368 intr->handler = handler; 369 intr->arg = arg; 370 intr->irq = res->r_bushandle; 371 intr->flags = flags; 372 intr->sem = -1; 373 intr->thread = -1; 374 375 if (filter != NULL) { 376 status = install_io_interrupt_handler(intr->irq, 377 (interrupt_handler)intr->filter, intr->arg, 0); 378 } else { 379 snprintf(semName, sizeof(semName), "%s intr", dev->device_name); 380 381 intr->sem = create_sem(0, semName); 382 if (intr->sem < B_OK) { 383 free(intr); 384 return B_NO_MEMORY; 385 } 386 387 snprintf(semName, sizeof(semName), "%s intr handler", dev->device_name); 388 389 intr->thread = spawn_kernel_thread(intr_handler, semName, 390 B_REAL_TIME_DISPLAY_PRIORITY, intr); 391 if (intr->thread < B_OK) { 392 delete_sem(intr->sem); 393 free(intr); 394 return B_NO_MEMORY; 395 } 396 397 status = install_io_interrupt_handler(intr->irq, 398 intr_wrapper, intr, 0); 399 } 400 401 if (status == B_OK && res->r_bustag == 1 && gPCIx86 != NULL) { 402 // this is an msi, enable it 403 pci_info *info 404 = &((struct root_device_softc *)dev->root->softc)->pci_info; 405 if (((struct root_device_softc *)dev->root->softc)->is_msi) { 406 if (gPCIx86->enable_msi(info->bus, info->device, 407 info->function) != B_OK) { 408 device_printf(dev, "enabling msi failed\n"); 409 bus_teardown_intr(dev, res, intr); 410 return ENODEV; 411 } 412 } else if (((struct root_device_softc *)dev->root->softc)->is_msix) { 413 if (gPCIx86->enable_msix(info->bus, info->device, 414 info->function) != B_OK) { 415 device_printf(dev, "enabling msix failed\n"); 416 bus_teardown_intr(dev, res, intr); 417 return ENODEV; 418 } 419 } 420 } 421 422 if (status < B_OK) { 423 free_internal_intr(intr); 424 return status; 425 } 426 427 resume_thread(intr->thread); 428 429 *_cookie = intr; 430 return 0; 431 } 432 433 434 int 435 bus_teardown_intr(device_t dev, struct resource *res, void *arg) 436 { 437 struct internal_intr *intr = (struct internal_intr *)arg; 438 if (intr == NULL) 439 return -1; 440 441 struct root_device_softc *root = (struct root_device_softc *)dev->root->softc; 442 443 if ((root->is_msi || root->is_msix) && gPCIx86 != NULL) { 444 // disable msi generation 445 pci_info *info = &root->pci_info; 446 gPCIx86->disable_msi(info->bus, info->device, info->function); 447 } 448 449 if (intr->filter != NULL) { 450 remove_io_interrupt_handler(intr->irq, (interrupt_handler)intr->filter, 451 intr->arg); 452 } else { 453 remove_io_interrupt_handler(intr->irq, intr_wrapper, intr); 454 } 455 456 free_internal_intr(intr); 457 return 0; 458 } 459 460 461 int 462 bus_bind_intr(device_t dev, struct resource *res, int cpu) 463 { 464 if (dev->parent == NULL) 465 return EINVAL; 466 467 // TODO 468 return 0; 469 } 470 471 472 int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, 473 const char* fmt, ...) 474 { 475 if (dev->parent == NULL) 476 return EINVAL; 477 478 // we don't really support names for interrupts 479 return 0; 480 } 481 482 483 // #pragma mark - bus functions 484 485 486 bus_dma_tag_t 487 bus_get_dma_tag(device_t dev) 488 { 489 return NULL; 490 } 491 492 493 int 494 bus_generic_suspend(device_t dev) 495 { 496 UNIMPLEMENTED(); 497 return B_ERROR; 498 } 499 500 501 int 502 bus_generic_resume(device_t dev) 503 { 504 UNIMPLEMENTED(); 505 return B_ERROR; 506 } 507 508 509 void 510 bus_generic_shutdown(device_t dev) 511 { 512 UNIMPLEMENTED(); 513 } 514 515 516 int 517 bus_print_child_header(device_t dev, device_t child) 518 { 519 UNIMPLEMENTED(); 520 return B_ERROR; 521 } 522 523 524 int 525 bus_print_child_footer(device_t dev, device_t child) 526 { 527 UNIMPLEMENTED(); 528 return B_ERROR; 529 } 530 531 532 int 533 bus_generic_print_child(device_t dev, device_t child) 534 { 535 UNIMPLEMENTED(); 536 return B_ERROR; 537 } 538 539 540 void 541 bus_generic_driver_added(device_t dev, driver_t *driver) 542 { 543 UNIMPLEMENTED(); 544 } 545 546 547 int 548 bus_child_present(device_t child) 549 { 550 device_t parent = device_get_parent(child); 551 if (parent == NULL) 552 return 0; 553 554 return bus_child_present(parent); 555 } 556 557 558 void 559 bus_enumerate_hinted_children(device_t bus) 560 { 561 #if 0 562 UNIMPLEMENTED(); 563 #endif 564 } 565 566 567 568 // #pragma mark - PCI functions 569 570 571 uint32_t 572 pci_read_config(device_t dev, int offset, int size) 573 { 574 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 575 576 uint32_t value = gPci->read_pci_config(info->bus, info->device, 577 info->function, offset, size); 578 TRACE_PCI(dev, "pci_read_config(%i, %i) = 0x%x\n", offset, size, value); 579 return value; 580 } 581 582 583 void 584 pci_write_config(device_t dev, int offset, uint32_t value, int size) 585 { 586 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 587 588 TRACE_PCI(dev, "pci_write_config(%i, 0x%x, %i)\n", offset, value, size); 589 590 gPci->write_pci_config(info->bus, info->device, info->function, offset, 591 size, value); 592 } 593 594 595 uint16_t 596 pci_get_vendor(device_t dev) 597 { 598 return pci_read_config(dev, PCI_vendor_id, 2); 599 } 600 601 602 uint16_t 603 pci_get_device(device_t dev) 604 { 605 return pci_read_config(dev, PCI_device_id, 2); 606 } 607 608 609 uint16_t 610 pci_get_subvendor(device_t dev) 611 { 612 return pci_read_config(dev, PCI_subsystem_vendor_id, 2); 613 } 614 615 616 uint16_t 617 pci_get_subdevice(device_t dev) 618 { 619 return pci_read_config(dev, PCI_subsystem_id, 2); 620 } 621 622 623 uint8_t 624 pci_get_revid(device_t dev) 625 { 626 return pci_read_config(dev, PCI_revision, 1); 627 } 628 629 630 uint32_t 631 pci_get_domain(device_t dev) 632 { 633 return 0; 634 } 635 636 uint32_t 637 pci_get_devid(device_t dev) 638 { 639 return pci_read_config(dev, PCI_device_id, 2) << 16 | 640 pci_read_config(dev, PCI_vendor_id, 2); 641 } 642 643 uint8_t 644 pci_get_cachelnsz(device_t dev) 645 { 646 return pci_read_config(dev, PCI_line_size, 1); 647 } 648 649 uint8_t * 650 pci_get_ether(device_t dev) 651 { 652 /* used in if_dc to get the MAC from CardBus CIS for Xircom card */ 653 return NULL; /* NULL is handled in the caller correctly */ 654 } 655 656 uint8_t 657 pci_get_bus(device_t dev) 658 { 659 pci_info *info 660 = &((struct root_device_softc *)dev->root->softc)->pci_info; 661 return info->bus; 662 } 663 664 665 uint8_t 666 pci_get_slot(device_t dev) 667 { 668 pci_info *info 669 = &((struct root_device_softc *)dev->root->softc)->pci_info; 670 return info->device; 671 } 672 673 674 uint8_t 675 pci_get_function(device_t dev) 676 { 677 pci_info *info 678 = &((struct root_device_softc *)dev->root->softc)->pci_info; 679 return info->function; 680 } 681 682 683 device_t 684 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 685 { 686 // We don't support that yet - if we want to support the multi port 687 // feature of the Broadcom BCM 570x driver, we would have to change 688 // that. 689 return NULL; 690 } 691 692 693 static void 694 pci_set_command_bit(device_t dev, uint16_t bit) 695 { 696 uint16_t command = pci_read_config(dev, PCI_command, 2); 697 pci_write_config(dev, PCI_command, command | bit, 2); 698 } 699 700 701 int 702 pci_enable_busmaster(device_t dev) 703 { 704 pci_set_command_bit(dev, PCI_command_master); 705 return 0; 706 } 707 708 709 int 710 pci_enable_io(device_t dev, int space) 711 { 712 /* adapted from FreeBSD's pci_enable_io_method */ 713 int bit = 0; 714 715 switch (space) { 716 case SYS_RES_IOPORT: 717 bit = PCI_command_io; 718 break; 719 case SYS_RES_MEMORY: 720 bit = PCI_command_memory; 721 break; 722 default: 723 return EINVAL; 724 } 725 726 pci_set_command_bit(dev, bit); 727 if (pci_read_config(dev, PCI_command, 2) & bit) 728 return 0; 729 730 device_printf(dev, "pci_enable_io(%d) failed.\n", space); 731 732 return ENXIO; 733 } 734 735 736 int 737 pci_find_cap(device_t dev, int capability, int *capreg) 738 { 739 return pci_find_extcap(dev, capability, capreg); 740 } 741 742 743 int 744 pci_find_extcap(device_t child, int capability, int *_capabilityRegister) 745 { 746 uint8 capabilityPointer; 747 uint8 headerType; 748 uint16 status; 749 750 status = pci_read_config(child, PCIR_STATUS, 2); 751 if ((status & PCIM_STATUS_CAPPRESENT) == 0) 752 return ENXIO; 753 754 headerType = pci_read_config(child, PCI_header_type, 1); 755 switch (headerType & PCIM_HDRTYPE) { 756 case 0: 757 case 1: 758 capabilityPointer = PCIR_CAP_PTR; 759 break; 760 case 2: 761 capabilityPointer = PCIR_CAP_PTR_2; 762 break; 763 default: 764 return ENXIO; 765 } 766 capabilityPointer = pci_read_config(child, capabilityPointer, 1); 767 768 while (capabilityPointer != 0) { 769 if (pci_read_config(child, capabilityPointer + PCICAP_ID, 1) 770 == capability) { 771 if (_capabilityRegister != NULL) 772 *_capabilityRegister = capabilityPointer; 773 return 0; 774 } 775 capabilityPointer = pci_read_config(child, 776 capabilityPointer + PCICAP_NEXTPTR, 1); 777 } 778 779 return ENOENT; 780 } 781 782 783 int 784 pci_msi_count(device_t dev) 785 { 786 pci_info *info; 787 if (gPCIx86 == NULL) 788 return 0; 789 790 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 791 return gPCIx86->get_msi_count(info->bus, info->device, info->function); 792 } 793 794 795 int 796 pci_alloc_msi(device_t dev, int *count) 797 { 798 pci_info *info; 799 uint8 startVector = 0; 800 if (gPCIx86 == NULL) 801 return ENODEV; 802 803 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 804 805 if (gPCIx86->configure_msi(info->bus, info->device, info->function, *count, 806 &startVector) != B_OK) { 807 return ENODEV; 808 } 809 810 ((struct root_device_softc *)dev->root->softc)->is_msi = true; 811 info->u.h0.interrupt_line = startVector; 812 return EOK; 813 } 814 815 816 int 817 pci_release_msi(device_t dev) 818 { 819 pci_info *info; 820 if (gPCIx86 == NULL) 821 return ENODEV; 822 823 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 824 gPCIx86->unconfigure_msi(info->bus, info->device, info->function); 825 ((struct root_device_softc *)dev->root->softc)->is_msi = false; 826 ((struct root_device_softc *)dev->root->softc)->is_msix = false; 827 return EOK; 828 } 829 830 831 int 832 pci_msix_count(device_t dev) 833 { 834 pci_info *info; 835 if (gPCIx86 == NULL) 836 return 0; 837 838 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 839 return gPCIx86->get_msix_count(info->bus, info->device, info->function); 840 } 841 842 843 int 844 pci_alloc_msix(device_t dev, int *count) 845 { 846 pci_info *info; 847 uint8 startVector = 0; 848 if (gPCIx86 == NULL) 849 return ENODEV; 850 851 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 852 853 if (gPCIx86->configure_msix(info->bus, info->device, info->function, *count, 854 &startVector) != B_OK) { 855 return ENODEV; 856 } 857 858 ((struct root_device_softc *)dev->root->softc)->is_msix = true; 859 info->u.h0.interrupt_line = startVector; 860 return EOK; 861 } 862 863 864 int 865 pci_get_max_read_req(device_t dev) 866 { 867 int cap; 868 uint16_t val; 869 870 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 871 return (0); 872 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 873 val &= PCIM_EXP_CTL_MAX_READ_REQUEST; 874 val >>= 12; 875 return (1 << (val + 7)); 876 } 877 878 879 int 880 pci_set_max_read_req(device_t dev, int size) 881 { 882 int cap; 883 uint16_t val; 884 885 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 886 return (0); 887 if (size < 128) 888 size = 128; 889 if (size > 4096) 890 size = 4096; 891 size = (1 << (fls(size) - 1)); 892 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 893 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 894 val |= (fls(size) - 8) << 12; 895 pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2); 896 return (size); 897 } 898 899 900 int 901 pci_get_powerstate(device_t dev) 902 { 903 int capabilityRegister; 904 uint16 status; 905 int powerState = PCI_POWERSTATE_D0; 906 907 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 908 return powerState; 909 910 status = pci_read_config(dev, capabilityRegister + PCIR_POWER_STATUS, 2); 911 switch (status & PCI_pm_mask) { 912 case PCI_pm_state_d0: 913 break; 914 case PCI_pm_state_d1: 915 powerState = PCI_POWERSTATE_D1; 916 break; 917 case PCI_pm_state_d2: 918 powerState = PCI_POWERSTATE_D2; 919 break; 920 case PCI_pm_state_d3: 921 powerState = PCI_POWERSTATE_D3; 922 break; 923 default: 924 powerState = PCI_POWERSTATE_UNKNOWN; 925 break; 926 } 927 928 TRACE_PCI(dev, "%s: D%i\n", __func__, powerState); 929 return powerState; 930 } 931 932 933 int 934 pci_set_powerstate(device_t dev, int newPowerState) 935 { 936 int capabilityRegister; 937 int oldPowerState; 938 uint8 currentPowerManagementStatus; 939 uint8 newPowerManagementStatus; 940 uint16 powerManagementCapabilities; 941 bigtime_t stateTransitionDelayInUs = 0; 942 943 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 944 return EOPNOTSUPP; 945 946 oldPowerState = pci_get_powerstate(dev); 947 if (oldPowerState == newPowerState) 948 return EOK; 949 950 switch (std::max(oldPowerState, newPowerState)) { 951 case PCI_POWERSTATE_D2: 952 stateTransitionDelayInUs = 200; 953 break; 954 case PCI_POWERSTATE_D3: 955 stateTransitionDelayInUs = 10000; 956 break; 957 } 958 959 currentPowerManagementStatus = pci_read_config(dev, capabilityRegister 960 + PCIR_POWER_STATUS, 2); 961 newPowerManagementStatus = currentPowerManagementStatus & ~PCI_pm_mask; 962 powerManagementCapabilities = pci_read_config(dev, capabilityRegister 963 + PCIR_POWER_CAP, 2); 964 965 switch (newPowerState) { 966 case PCI_POWERSTATE_D0: 967 newPowerManagementStatus |= PCIM_PSTAT_D0; 968 break; 969 case PCI_POWERSTATE_D1: 970 if ((powerManagementCapabilities & PCI_pm_d1supp) == 0) 971 return EOPNOTSUPP; 972 newPowerManagementStatus |= PCIM_PSTAT_D1; 973 break; 974 case PCI_POWERSTATE_D2: 975 if ((powerManagementCapabilities & PCI_pm_d2supp) == 0) 976 return EOPNOTSUPP; 977 newPowerManagementStatus |= PCIM_PSTAT_D2; 978 break; 979 case PCI_POWERSTATE_D3: 980 newPowerManagementStatus |= PCIM_PSTAT_D3; 981 break; 982 default: 983 return EINVAL; 984 } 985 986 TRACE_PCI(dev, "%s: D%i -> D%i\n", __func__, oldPowerState, newPowerState); 987 pci_write_config(dev, capabilityRegister + PCIR_POWER_STATUS, newPowerState, 988 2); 989 if (stateTransitionDelayInUs != 0) 990 snooze(stateTransitionDelayInUs); 991 992 return EOK; 993 } 994