1 /* 2 * Copyright 2007, Hugo Santos. All Rights Reserved. 3 * Copyright 2004, Marcus Overhagen. All Rights Reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 extern "C" { 9 #include "device.h" 10 } 11 12 #include <stdlib.h> 13 14 #include <algorithm> 15 16 #include <arch/cpu.h> 17 18 extern "C" { 19 #include <compat/dev/pci/pcireg.h> 20 #include <compat/dev/pci/pcivar.h> 21 #include <compat/machine/resource.h> 22 #include <compat/sys/mutex.h> 23 #include <compat/machine/bus.h> 24 #include <compat/sys/rman.h> 25 #include <compat/sys/bus.h> 26 } 27 28 // private kernel header to get B_NO_HANDLED_INFO 29 #include <int.h> 30 31 #include <PCI_x86.h> 32 33 34 //#define DEBUG_BUS_SPACE_RW 35 #ifdef DEBUG_BUS_SPACE_RW 36 # define TRACE_BUS_SPACE_RW(x) driver_printf x 37 #else 38 # define TRACE_BUS_SPACE_RW(x) 39 #endif 40 41 //#define DEBUG_PCI 42 #ifdef DEBUG_PCI 43 # define TRACE_PCI(dev, format, args...) device_printf(dev, format , ##args) 44 #else 45 # define TRACE_PCI(dev, format, args...) do { } while (0) 46 #endif 47 48 49 #define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1)) 50 51 52 struct internal_intr { 53 device_t dev; 54 driver_filter_t filter; 55 driver_intr_t *handler; 56 void *arg; 57 int irq; 58 uint32 flags; 59 bool is_msi; 60 61 thread_id thread; 62 sem_id sem; 63 int32 handling; 64 }; 65 66 static int32 intr_wrapper(void *data); 67 68 69 static int 70 fls(int mask) 71 { 72 int bit; 73 if (mask == 0) 74 return (0); 75 for (bit = 1; mask != 1; bit++) 76 mask = (unsigned int)mask >> 1; 77 return (bit); 78 } 79 80 81 static area_id 82 map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection, 83 const char *name) 84 { 85 uint32 offset = _phy & (B_PAGE_SIZE - 1); 86 phys_addr_t physicalAddr = _phy - offset; 87 area_id area; 88 89 size = ROUNDUP(size + offset, B_PAGE_SIZE); 90 area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS, 91 protection, virtualAddr); 92 if (area < B_OK) 93 return area; 94 95 *virtualAddr = (uint8 *)(*virtualAddr) + offset; 96 97 return area; 98 } 99 100 101 static int 102 bus_alloc_irq_resource(device_t dev, struct resource *res) 103 { 104 uint8 irq = pci_read_config(dev, PCI_interrupt_line, 1); 105 if (irq == 0 || irq == 0xff) 106 return -1; 107 108 /* TODO: IRQ resources! */ 109 res->r_bustag = 0; 110 res->r_bushandle = irq; 111 112 return 0; 113 } 114 115 116 static int 117 bus_alloc_mem_resource(device_t dev, struct resource *res, int regid) 118 { 119 uint32 addr = pci_read_config(dev, regid, 4) & PCI_address_memory_32_mask; 120 uint32 size = 128 * 1024; /* XXX */ 121 void *virtualAddr; 122 123 res->r_mapped_area = map_mem(&virtualAddr, addr, size, 0, 124 "bus_alloc_resource(MEMORY)"); 125 if (res->r_mapped_area < B_OK) 126 return -1; 127 128 res->r_bustag = I386_BUS_SPACE_MEM; 129 res->r_bushandle = (bus_space_handle_t)virtualAddr; 130 return 0; 131 } 132 133 134 static int 135 bus_alloc_ioport_resource(device_t dev, struct resource *res, int regid) 136 { 137 res->r_bustag = I386_BUS_SPACE_IO; 138 res->r_bushandle = pci_read_config(dev, regid, 4) & PCI_address_io_mask; 139 return 0; 140 } 141 142 143 struct resource * 144 bus_alloc_resource(device_t dev, int type, int *rid, unsigned long start, 145 unsigned long end, unsigned long count, uint32 flags) 146 { 147 struct resource *res; 148 int result = -1; 149 150 if (type != SYS_RES_IRQ && type != SYS_RES_MEMORY 151 && type != SYS_RES_IOPORT) 152 return NULL; 153 154 device_printf(dev, "bus_alloc_resource(%i, [%i], 0x%lx, 0x%lx, 0x%lx," 155 "0x%" B_PRIx32 ")\n", type, *rid, start, end, count, flags); 156 157 // maybe a local array of resources is enough 158 res = (struct resource *)malloc(sizeof(struct resource)); 159 if (res == NULL) 160 return NULL; 161 162 if (type == SYS_RES_IRQ) { 163 if (*rid == 0) { 164 // pinned interrupt 165 result = bus_alloc_irq_resource(dev, res); 166 } else { 167 // msi or msi-x interrupt at index *rid - 1 168 pci_info *info; 169 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 170 res->r_bustag = 1; 171 res->r_bushandle = info->u.h0.interrupt_line + *rid - 1; 172 result = 0; 173 174 // TODO: msi-x interrupts 175 } 176 } else if (type == SYS_RES_MEMORY) 177 result = bus_alloc_mem_resource(dev, res, *rid); 178 else if (type == SYS_RES_IOPORT) 179 result = bus_alloc_ioport_resource(dev, res, *rid); 180 181 if (result < 0) { 182 free(res); 183 return NULL; 184 } 185 186 res->r_type = type; 187 return res; 188 } 189 190 191 int 192 bus_release_resource(device_t dev, int type, int rid, struct resource *res) 193 { 194 if (res->r_type != type) 195 panic("bus_release_resource: mismatch"); 196 197 if (type == SYS_RES_MEMORY) 198 delete_area(res->r_mapped_area); 199 200 free(res); 201 return 0; 202 } 203 204 205 int 206 bus_alloc_resources(device_t dev, struct resource_spec *resourceSpec, 207 struct resource **resources) 208 { 209 int i; 210 211 for (i = 0; resourceSpec[i].type != -1; i++) { 212 resources[i] = bus_alloc_resource_any(dev, 213 resourceSpec[i].type, &resourceSpec[i].rid, resourceSpec[i].flags); 214 if (resources[i] == NULL 215 && (resourceSpec[i].flags & RF_OPTIONAL) == 0) { 216 for (++i; resourceSpec[i].type != -1; i++) { 217 resources[i] = NULL; 218 } 219 220 bus_release_resources(dev, resourceSpec, resources); 221 return ENXIO; 222 } 223 } 224 return 0; 225 } 226 227 228 void 229 bus_release_resources(device_t dev, const struct resource_spec *resourceSpec, 230 struct resource **resources) 231 { 232 int i; 233 234 for (i = 0; resourceSpec[i].type != -1; i++) { 235 if (resources[i] == NULL) 236 continue; 237 238 bus_release_resource(dev, resourceSpec[i].type, resourceSpec[i].rid, 239 resources[i]); 240 resources[i] = NULL; 241 } 242 } 243 244 245 bus_space_handle_t 246 rman_get_bushandle(struct resource *res) 247 { 248 return res->r_bushandle; 249 } 250 251 252 bus_space_tag_t 253 rman_get_bustag(struct resource *res) 254 { 255 return res->r_bustag; 256 } 257 258 259 int 260 rman_get_rid(struct resource *res) 261 { 262 return 0; 263 } 264 265 266 // #pragma mark - Interrupt handling 267 268 269 static int32 270 intr_wrapper(void *data) 271 { 272 struct internal_intr *intr = (struct internal_intr *)data; 273 274 //device_printf(intr->dev, "in interrupt handler.\n"); 275 276 if (!HAIKU_CHECK_DISABLE_INTERRUPTS(intr->dev)) 277 return B_UNHANDLED_INTERRUPT; 278 279 release_sem_etc(intr->sem, 1, B_DO_NOT_RESCHEDULE); 280 return intr->handling ? B_HANDLED_INTERRUPT : B_INVOKE_SCHEDULER; 281 } 282 283 284 static int32 285 intr_fast_wrapper(void *data) 286 { 287 struct internal_intr *intr = (struct internal_intr *)data; 288 289 intr->handler(intr->arg); 290 291 // We don't know if the interrupt has been handled. 292 return B_UNHANDLED_INTERRUPT; 293 } 294 295 296 static int32 297 intr_handler(void *data) 298 { 299 struct internal_intr *intr = (struct internal_intr *)data; 300 status_t status; 301 302 while (1) { 303 status = acquire_sem(intr->sem); 304 if (status < B_OK) 305 break; 306 307 //device_printf(intr->dev, "in soft interrupt handler.\n"); 308 309 atomic_or(&intr->handling, 1); 310 intr->handler(intr->arg); 311 atomic_and(&intr->handling, 0); 312 HAIKU_REENABLE_INTERRUPTS(intr->dev); 313 } 314 315 return 0; 316 } 317 318 319 static void 320 free_internal_intr(struct internal_intr *intr) 321 { 322 if (intr->sem >= B_OK) { 323 status_t status; 324 delete_sem(intr->sem); 325 wait_for_thread(intr->thread, &status); 326 } 327 328 free(intr); 329 } 330 331 332 int 333 bus_setup_intr(device_t dev, struct resource *res, int flags, 334 driver_filter_t filter, driver_intr_t handler, void *arg, void **_cookie) 335 { 336 /* TODO check MPSAFE etc */ 337 338 struct internal_intr *intr = (struct internal_intr *)malloc( 339 sizeof(struct internal_intr)); 340 char semName[64]; 341 status_t status; 342 343 if (intr == NULL) 344 return B_NO_MEMORY; 345 346 intr->dev = dev; 347 intr->filter = filter; 348 intr->handler = handler; 349 intr->arg = arg; 350 intr->irq = res->r_bushandle; 351 intr->flags = flags; 352 intr->is_msi = false; 353 intr->sem = -1; 354 intr->thread = -1; 355 356 if (filter != NULL) { 357 status = install_io_interrupt_handler(intr->irq, 358 (interrupt_handler)intr->filter, intr->arg, 0); 359 } else if ((flags & INTR_FAST) != 0) { 360 status = install_io_interrupt_handler(intr->irq, 361 intr_fast_wrapper, intr, B_NO_HANDLED_INFO); 362 } else { 363 snprintf(semName, sizeof(semName), "%s intr", dev->device_name); 364 365 intr->sem = create_sem(0, semName); 366 if (intr->sem < B_OK) { 367 free(intr); 368 return B_NO_MEMORY; 369 } 370 371 snprintf(semName, sizeof(semName), "%s intr handler", dev->device_name); 372 373 intr->thread = spawn_kernel_thread(intr_handler, semName, 374 B_REAL_TIME_DISPLAY_PRIORITY, intr); 375 if (intr->thread < B_OK) { 376 delete_sem(intr->sem); 377 free(intr); 378 return B_NO_MEMORY; 379 } 380 381 status = install_io_interrupt_handler(intr->irq, 382 intr_wrapper, intr, B_NO_HANDLED_INFO); 383 } 384 385 if (status == B_OK && res->r_bustag == 1 && gPCIx86 != NULL) { 386 // this is an msi, enable it 387 pci_info *info 388 = &((struct root_device_softc *)dev->root->softc)->pci_info; 389 if (gPCIx86->enable_msi(info->bus, info->device, 390 info->function) != B_OK) { 391 device_printf(dev, "enabling msi failed\n"); 392 bus_teardown_intr(dev, res, intr); 393 return ENODEV; 394 } 395 396 intr->is_msi = true; 397 } 398 399 if (status < B_OK) { 400 free_internal_intr(intr); 401 return status; 402 } 403 404 resume_thread(intr->thread); 405 406 *_cookie = intr; 407 return 0; 408 } 409 410 411 int 412 bus_teardown_intr(device_t dev, struct resource *res, void *arg) 413 { 414 struct internal_intr *intr = (struct internal_intr *)arg; 415 416 if (intr->is_msi && gPCIx86 != NULL) { 417 // disable msi generation 418 pci_info *info 419 = &((struct root_device_softc *)dev->root->softc)->pci_info; 420 gPCIx86->disable_msi(info->bus, info->device, info->function); 421 } 422 423 if (intr->filter != NULL) { 424 remove_io_interrupt_handler(intr->irq, (interrupt_handler)intr->filter, 425 intr->arg); 426 } else if (intr->flags & INTR_FAST) { 427 remove_io_interrupt_handler(intr->irq, intr_fast_wrapper, intr); 428 } else { 429 remove_io_interrupt_handler(intr->irq, intr_wrapper, intr); 430 } 431 432 free_internal_intr(intr); 433 return 0; 434 } 435 436 437 // #pragma mark - bus functions 438 439 440 bus_dma_tag_t 441 bus_get_dma_tag(device_t dev) 442 { 443 return NULL; 444 } 445 446 447 int 448 bus_generic_suspend(device_t dev) 449 { 450 UNIMPLEMENTED(); 451 return B_ERROR; 452 } 453 454 455 int 456 bus_generic_resume(device_t dev) 457 { 458 UNIMPLEMENTED(); 459 return B_ERROR; 460 } 461 462 463 void 464 bus_generic_shutdown(device_t dev) 465 { 466 UNIMPLEMENTED(); 467 } 468 469 470 int 471 bus_print_child_header(device_t dev, device_t child) 472 { 473 UNIMPLEMENTED(); 474 return B_ERROR; 475 } 476 477 478 int 479 bus_print_child_footer(device_t dev, device_t child) 480 { 481 UNIMPLEMENTED(); 482 return B_ERROR; 483 } 484 485 486 int 487 bus_generic_print_child(device_t dev, device_t child) 488 { 489 UNIMPLEMENTED(); 490 return B_ERROR; 491 } 492 493 494 void 495 bus_generic_driver_added(device_t dev, driver_t *driver) 496 { 497 UNIMPLEMENTED(); 498 } 499 500 501 #define BUS_SPACE_READ(size, type, fun) \ 502 type bus_space_read_##size(bus_space_tag_t tag, \ 503 bus_space_handle_t handle, bus_size_t offset) \ 504 { \ 505 type value; \ 506 if (tag == I386_BUS_SPACE_IO) \ 507 value = fun(handle + offset); \ 508 else \ 509 value = *(volatile type *)(handle + offset); \ 510 if (tag == I386_BUS_SPACE_IO) \ 511 TRACE_BUS_SPACE_RW(("bus_space_read_%s(0x%lx, 0x%lx, 0x%lx) = 0x%lx\n", \ 512 #size, (uint32)tag, (uint32)handle, (uint32)offset, (uint32)value)); \ 513 return value; \ 514 } 515 516 #define BUS_SPACE_WRITE(size, type, fun) \ 517 void bus_space_write_##size(bus_space_tag_t tag, \ 518 bus_space_handle_t handle, bus_size_t offset, type value) \ 519 { \ 520 if (tag == I386_BUS_SPACE_IO) \ 521 TRACE_BUS_SPACE_RW(("bus_space_write_%s(0x%lx, 0x%lx, 0x%lx, 0x%lx)\n", \ 522 #size, (uint32)tag, (uint32)handle, (uint32)offset, (uint32)value)); \ 523 if (tag == I386_BUS_SPACE_IO) \ 524 fun(value, handle + offset); \ 525 else \ 526 *(volatile type *)(handle + offset) = value; \ 527 } 528 529 BUS_SPACE_READ(1, uint8_t, in8) 530 BUS_SPACE_READ(2, uint16_t, in16) 531 BUS_SPACE_READ(4, uint32_t, in32) 532 533 BUS_SPACE_WRITE(1, uint8_t, out8) 534 BUS_SPACE_WRITE(2, uint16_t, out16) 535 BUS_SPACE_WRITE(4, uint32_t, out32) 536 537 int 538 bus_child_present(device_t child) 539 { 540 device_t parent = device_get_parent(child); 541 if (parent == NULL) 542 return 0; 543 544 return bus_child_present(parent); 545 } 546 547 548 // #pragma mark - PCI functions 549 550 551 uint32_t 552 pci_read_config(device_t dev, int offset, int size) 553 { 554 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 555 556 uint32_t value = gPci->read_pci_config(info->bus, info->device, 557 info->function, offset, size); 558 TRACE_PCI(dev, "pci_read_config(%i, %i) = 0x%x\n", offset, size, value); 559 return value; 560 } 561 562 563 void 564 pci_write_config(device_t dev, int offset, uint32_t value, int size) 565 { 566 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 567 568 TRACE_PCI(dev, "pci_write_config(%i, 0x%x, %i)\n", offset, value, size); 569 570 gPci->write_pci_config(info->bus, info->device, info->function, offset, 571 size, value); 572 } 573 574 575 uint16_t 576 pci_get_vendor(device_t dev) 577 { 578 return pci_read_config(dev, PCI_vendor_id, 2); 579 } 580 581 582 uint16_t 583 pci_get_device(device_t dev) 584 { 585 return pci_read_config(dev, PCI_device_id, 2); 586 } 587 588 589 uint16_t 590 pci_get_subvendor(device_t dev) 591 { 592 return pci_read_config(dev, PCI_subsystem_vendor_id, 2); 593 } 594 595 596 uint16_t 597 pci_get_subdevice(device_t dev) 598 { 599 return pci_read_config(dev, PCI_subsystem_id, 2); 600 } 601 602 603 uint8_t 604 pci_get_revid(device_t dev) 605 { 606 return pci_read_config(dev, PCI_revision, 1); 607 } 608 609 610 uint32_t 611 pci_get_domain(device_t dev) 612 { 613 return 0; 614 } 615 616 uint32_t 617 pci_get_devid(device_t dev) 618 { 619 return pci_read_config(dev, PCI_device_id, 2) << 16 | 620 pci_read_config(dev, PCI_vendor_id, 2); 621 } 622 623 uint8_t 624 pci_get_cachelnsz(device_t dev) 625 { 626 return pci_read_config(dev, PCI_line_size, 1); 627 } 628 629 uint8_t * 630 pci_get_ether(device_t dev) 631 { 632 /* used in if_dc to get the MAC from CardBus CIS for Xircom card */ 633 return NULL; /* NULL is handled in the caller correctly */ 634 } 635 636 uint8_t 637 pci_get_bus(device_t dev) 638 { 639 pci_info *info 640 = &((struct root_device_softc *)dev->root->softc)->pci_info; 641 return info->bus; 642 } 643 644 645 uint8_t 646 pci_get_slot(device_t dev) 647 { 648 pci_info *info 649 = &((struct root_device_softc *)dev->root->softc)->pci_info; 650 return info->device; 651 } 652 653 654 uint8_t 655 pci_get_function(device_t dev) 656 { 657 pci_info *info 658 = &((struct root_device_softc *)dev->root->softc)->pci_info; 659 return info->function; 660 } 661 662 663 device_t 664 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 665 { 666 // We don't support that yet - if we want to support the multi port 667 // feature of the Broadcom BCM 570x driver, we would have to change 668 // that. 669 return NULL; 670 } 671 672 673 static void 674 pci_set_command_bit(device_t dev, uint16_t bit) 675 { 676 uint16_t command = pci_read_config(dev, PCI_command, 2); 677 pci_write_config(dev, PCI_command, command | bit, 2); 678 } 679 680 681 int 682 pci_enable_busmaster(device_t dev) 683 { 684 pci_set_command_bit(dev, PCI_command_master); 685 return 0; 686 } 687 688 689 int 690 pci_enable_io(device_t dev, int space) 691 { 692 /* adapted from FreeBSD's pci_enable_io_method */ 693 int bit = 0; 694 695 switch (space) { 696 case SYS_RES_IOPORT: 697 bit = PCI_command_io; 698 break; 699 case SYS_RES_MEMORY: 700 bit = PCI_command_memory; 701 break; 702 default: 703 return EINVAL; 704 } 705 706 pci_set_command_bit(dev, bit); 707 if (pci_read_config(dev, PCI_command, 2) & bit) 708 return 0; 709 710 device_printf(dev, "pci_enable_io(%d) failed.\n", space); 711 712 return ENXIO; 713 } 714 715 716 int 717 pci_find_cap(device_t dev, int capability, int *capreg) 718 { 719 return pci_find_extcap(dev, capability, capreg); 720 } 721 722 723 int 724 pci_find_extcap(device_t child, int capability, int *_capabilityRegister) 725 { 726 uint8 capabilityPointer; 727 uint8 headerType; 728 uint16 status; 729 730 status = pci_read_config(child, PCIR_STATUS, 2); 731 if ((status & PCIM_STATUS_CAPPRESENT) == 0) 732 return ENXIO; 733 734 headerType = pci_read_config(child, PCI_header_type, 1); 735 switch (headerType & PCIM_HDRTYPE) { 736 case 0: 737 case 1: 738 capabilityPointer = PCIR_CAP_PTR; 739 break; 740 case 2: 741 capabilityPointer = PCIR_CAP_PTR_2; 742 break; 743 default: 744 return ENXIO; 745 } 746 capabilityPointer = pci_read_config(child, capabilityPointer, 1); 747 748 while (capabilityPointer != 0) { 749 if (pci_read_config(child, capabilityPointer + PCICAP_ID, 1) 750 == capability) { 751 if (_capabilityRegister != NULL) 752 *_capabilityRegister = capabilityPointer; 753 return 0; 754 } 755 capabilityPointer = pci_read_config(child, 756 capabilityPointer + PCICAP_NEXTPTR, 1); 757 } 758 759 return ENOENT; 760 } 761 762 763 int 764 pci_msi_count(device_t dev) 765 { 766 pci_info *info; 767 if (gPCIx86 == NULL) 768 return 0; 769 770 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 771 return gPCIx86->get_msi_count(info->bus, info->device, info->function); 772 } 773 774 775 int 776 pci_alloc_msi(device_t dev, int *count) 777 { 778 pci_info *info; 779 uint8 startVector = 0; 780 if (gPCIx86 == NULL) 781 return ENODEV; 782 783 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 784 785 if (gPCIx86->configure_msi(info->bus, info->device, info->function, *count, 786 &startVector) != B_OK) { 787 return ENODEV; 788 } 789 790 info->u.h0.interrupt_line = startVector; 791 return EOK; 792 } 793 794 795 int 796 pci_release_msi(device_t dev) 797 { 798 pci_info *info; 799 if (gPCIx86 == NULL) 800 return ENODEV; 801 802 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 803 gPCIx86->unconfigure_msi(info->bus, info->device, info->function); 804 return EOK; 805 } 806 807 808 int 809 pci_msix_count(device_t dev) 810 { 811 return 0; 812 } 813 814 815 int 816 pci_alloc_msix(device_t dev, int *count) 817 { 818 return ENODEV; 819 } 820 821 822 int 823 pci_get_max_read_req(device_t dev) 824 { 825 int cap; 826 uint16_t val; 827 828 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 829 return (0); 830 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 831 val &= PCIM_EXP_CTL_MAX_READ_REQUEST; 832 val >>= 12; 833 return (1 << (val + 7)); 834 } 835 836 837 int 838 pci_set_max_read_req(device_t dev, int size) 839 { 840 int cap; 841 uint16_t val; 842 843 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 844 return (0); 845 if (size < 128) 846 size = 128; 847 if (size > 4096) 848 size = 4096; 849 size = (1 << (fls(size) - 1)); 850 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 851 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 852 val |= (fls(size) - 8) << 12; 853 pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2); 854 return (size); 855 } 856 857 858 int 859 pci_get_powerstate(device_t dev) 860 { 861 int capabilityRegister; 862 uint16 status; 863 int powerState = PCI_POWERSTATE_D0; 864 865 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 866 return powerState; 867 868 status = pci_read_config(dev, capabilityRegister + PCIR_POWER_STATUS, 2); 869 switch (status & PCI_pm_mask) { 870 case PCI_pm_state_d0: 871 break; 872 case PCI_pm_state_d1: 873 powerState = PCI_POWERSTATE_D1; 874 break; 875 case PCI_pm_state_d2: 876 powerState = PCI_POWERSTATE_D2; 877 break; 878 case PCI_pm_state_d3: 879 powerState = PCI_POWERSTATE_D3; 880 break; 881 default: 882 powerState = PCI_POWERSTATE_UNKNOWN; 883 break; 884 } 885 886 TRACE_PCI(dev, "%s: D%i\n", __func__, powerState); 887 return powerState; 888 } 889 890 891 int 892 pci_set_powerstate(device_t dev, int newPowerState) 893 { 894 int capabilityRegister; 895 int oldPowerState; 896 uint8 currentPowerManagementStatus; 897 uint8 newPowerManagementStatus; 898 uint16 powerManagementCapabilities; 899 bigtime_t stateTransitionDelayInUs = 0; 900 901 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 902 return EOPNOTSUPP; 903 904 oldPowerState = pci_get_powerstate(dev); 905 if (oldPowerState == newPowerState) 906 return EOK; 907 908 switch (std::max(oldPowerState, newPowerState)) { 909 case PCI_POWERSTATE_D2: 910 stateTransitionDelayInUs = 200; 911 break; 912 case PCI_POWERSTATE_D3: 913 stateTransitionDelayInUs = 10000; 914 break; 915 } 916 917 currentPowerManagementStatus = pci_read_config(dev, capabilityRegister 918 + PCIR_POWER_STATUS, 2); 919 newPowerManagementStatus = currentPowerManagementStatus & ~PCI_pm_mask; 920 powerManagementCapabilities = pci_read_config(dev, capabilityRegister 921 + PCIR_POWER_CAP, 2); 922 923 switch (newPowerState) { 924 case PCI_POWERSTATE_D0: 925 newPowerManagementStatus |= PCIM_PSTAT_D0; 926 break; 927 case PCI_POWERSTATE_D1: 928 if ((powerManagementCapabilities & PCI_pm_d1supp) == 0) 929 return EOPNOTSUPP; 930 newPowerManagementStatus |= PCIM_PSTAT_D1; 931 break; 932 case PCI_POWERSTATE_D2: 933 if ((powerManagementCapabilities & PCI_pm_d2supp) == 0) 934 return EOPNOTSUPP; 935 newPowerManagementStatus |= PCIM_PSTAT_D2; 936 break; 937 case PCI_POWERSTATE_D3: 938 newPowerManagementStatus |= PCIM_PSTAT_D3; 939 break; 940 default: 941 return EINVAL; 942 } 943 944 TRACE_PCI(dev, "%s: D%i -> D%i\n", __func__, oldPowerState, newPowerState); 945 pci_write_config(dev, capabilityRegister + PCIR_POWER_STATUS, newPowerState, 946 2); 947 if (stateTransitionDelayInUs != 0) 948 snooze(stateTransitionDelayInUs); 949 950 return EOK; 951 } 952