1 /* 2 * Copyright 2007, Hugo Santos. All Rights Reserved. 3 * Copyright 2004, Marcus Overhagen. All Rights Reserved. 4 * Distributed under the terms of the MIT License. 5 */ 6 7 8 extern "C" { 9 #include "device.h" 10 } 11 12 #include <stdlib.h> 13 14 #include <algorithm> 15 16 #include <arch/cpu.h> 17 18 extern "C" { 19 #include <compat/dev/pci/pcireg.h> 20 #include <compat/dev/pci/pcivar.h> 21 #include <compat/machine/resource.h> 22 #include <compat/sys/mutex.h> 23 #include <compat/machine/bus.h> 24 #include <compat/sys/rman.h> 25 #include <compat/sys/bus.h> 26 } 27 28 // private kernel header to get B_NO_HANDLED_INFO 29 #include <int.h> 30 31 #include <PCI_x86.h> 32 33 34 //#define DEBUG_BUS_SPACE_RW 35 #ifdef DEBUG_BUS_SPACE_RW 36 # define TRACE_BUS_SPACE_RW(x) driver_printf x 37 #else 38 # define TRACE_BUS_SPACE_RW(x) 39 #endif 40 41 //#define DEBUG_PCI 42 #ifdef DEBUG_PCI 43 # define TRACE_PCI(dev, format, args...) device_printf(dev, format , ##args) 44 #else 45 # define TRACE_PCI(dev, format, args...) do { } while (0) 46 #endif 47 48 49 #define ROUNDUP(a, b) (((a) + ((b)-1)) & ~((b)-1)) 50 51 52 struct internal_intr { 53 device_t dev; 54 driver_filter_t filter; 55 driver_intr_t *handler; 56 void *arg; 57 int irq; 58 uint32 flags; 59 bool is_msi; 60 61 thread_id thread; 62 sem_id sem; 63 int32 handling; 64 }; 65 66 static int32 intr_wrapper(void *data); 67 68 69 static int 70 fls(int mask) 71 { 72 int bit; 73 if (mask == 0) 74 return (0); 75 for (bit = 1; mask != 1; bit++) 76 mask = (unsigned int)mask >> 1; 77 return (bit); 78 } 79 80 81 static area_id 82 map_mem(void **virtualAddr, phys_addr_t _phy, size_t size, uint32 protection, 83 const char *name) 84 { 85 uint32 offset = _phy & (B_PAGE_SIZE - 1); 86 phys_addr_t physicalAddr = _phy - offset; 87 area_id area; 88 89 size = ROUNDUP(size + offset, B_PAGE_SIZE); 90 area = map_physical_memory(name, physicalAddr, size, B_ANY_KERNEL_ADDRESS, 91 protection, virtualAddr); 92 if (area < B_OK) 93 return area; 94 95 *virtualAddr = (uint8 *)(*virtualAddr) + offset; 96 97 return area; 98 } 99 100 101 static int 102 bus_alloc_irq_resource(device_t dev, struct resource *res) 103 { 104 uint8 irq = pci_read_config(dev, PCI_interrupt_line, 1); 105 if (irq == 0 || irq == 0xff) 106 return -1; 107 108 /* TODO: IRQ resources! */ 109 res->r_bustag = 0; 110 res->r_bushandle = irq; 111 112 return 0; 113 } 114 115 116 static int 117 bus_alloc_mem_resource(device_t dev, struct resource *res, int regid) 118 { 119 uint32 addr = pci_read_config(dev, regid, 4) & PCI_address_memory_32_mask; 120 uint32 size = 128 * 1024; /* XXX */ 121 void *virtualAddr; 122 123 res->r_mapped_area = map_mem(&virtualAddr, addr, size, 0, 124 "bus_alloc_resource(MEMORY)"); 125 if (res->r_mapped_area < B_OK) 126 return -1; 127 128 res->r_bustag = I386_BUS_SPACE_MEM; 129 res->r_bushandle = (bus_space_handle_t)virtualAddr; 130 return 0; 131 } 132 133 134 static int 135 bus_alloc_ioport_resource(device_t dev, struct resource *res, int regid) 136 { 137 res->r_bustag = I386_BUS_SPACE_IO; 138 res->r_bushandle = pci_read_config(dev, regid, 4) & PCI_address_io_mask; 139 return 0; 140 } 141 142 143 struct resource * 144 bus_alloc_resource(device_t dev, int type, int *rid, unsigned long start, 145 unsigned long end, unsigned long count, uint32 flags) 146 { 147 struct resource *res; 148 int result = -1; 149 150 if (type != SYS_RES_IRQ && type != SYS_RES_MEMORY 151 && type != SYS_RES_IOPORT) 152 return NULL; 153 154 device_printf(dev, "bus_alloc_resource(%i, [%i], 0x%lx, 0x%lx, 0x%lx," 155 "0x%" B_PRIx32 ")\n", type, *rid, start, end, count, flags); 156 157 // maybe a local array of resources is enough 158 res = (struct resource *)malloc(sizeof(struct resource)); 159 if (res == NULL) 160 return NULL; 161 162 if (type == SYS_RES_IRQ) { 163 if (*rid == 0) { 164 // pinned interrupt 165 result = bus_alloc_irq_resource(dev, res); 166 } else { 167 // msi or msi-x interrupt at index *rid - 1 168 pci_info *info; 169 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 170 res->r_bustag = 1; 171 res->r_bushandle = info->u.h0.interrupt_line + *rid - 1; 172 result = 0; 173 174 // TODO: msi-x interrupts 175 } 176 } else if (type == SYS_RES_MEMORY) 177 result = bus_alloc_mem_resource(dev, res, *rid); 178 else if (type == SYS_RES_IOPORT) 179 result = bus_alloc_ioport_resource(dev, res, *rid); 180 181 if (result < 0) { 182 free(res); 183 return NULL; 184 } 185 186 res->r_type = type; 187 return res; 188 } 189 190 191 int 192 bus_release_resource(device_t dev, int type, int rid, struct resource *res) 193 { 194 if (res->r_type != type) 195 panic("bus_release_resource: mismatch"); 196 197 if (type == SYS_RES_MEMORY) 198 delete_area(res->r_mapped_area); 199 200 free(res); 201 return 0; 202 } 203 204 205 int 206 bus_alloc_resources(device_t dev, struct resource_spec *resourceSpec, 207 struct resource **resources) 208 { 209 int i; 210 211 for (i = 0; resourceSpec[i].type != -1; i++) { 212 resources[i] = bus_alloc_resource_any(dev, 213 resourceSpec[i].type, &resourceSpec[i].rid, resourceSpec[i].flags); 214 if (resources[i] == NULL 215 && (resourceSpec[i].flags & RF_OPTIONAL) == 0) { 216 for (++i; resourceSpec[i].type != -1; i++) { 217 resources[i] = NULL; 218 } 219 220 bus_release_resources(dev, resourceSpec, resources); 221 return ENXIO; 222 } 223 } 224 return 0; 225 } 226 227 228 void 229 bus_release_resources(device_t dev, const struct resource_spec *resourceSpec, 230 struct resource **resources) 231 { 232 int i; 233 234 for (i = 0; resourceSpec[i].type != -1; i++) { 235 if (resources[i] == NULL) 236 continue; 237 238 bus_release_resource(dev, resourceSpec[i].type, resourceSpec[i].rid, 239 resources[i]); 240 resources[i] = NULL; 241 } 242 } 243 244 245 bus_space_handle_t 246 rman_get_bushandle(struct resource *res) 247 { 248 return res->r_bushandle; 249 } 250 251 252 bus_space_tag_t 253 rman_get_bustag(struct resource *res) 254 { 255 return res->r_bustag; 256 } 257 258 259 // #pragma mark - Interrupt handling 260 261 262 static int32 263 intr_wrapper(void *data) 264 { 265 struct internal_intr *intr = (struct internal_intr *)data; 266 267 //device_printf(intr->dev, "in interrupt handler.\n"); 268 269 if (!HAIKU_CHECK_DISABLE_INTERRUPTS(intr->dev)) 270 return B_UNHANDLED_INTERRUPT; 271 272 release_sem_etc(intr->sem, 1, B_DO_NOT_RESCHEDULE); 273 return intr->handling ? B_HANDLED_INTERRUPT : B_INVOKE_SCHEDULER; 274 } 275 276 277 static int32 278 intr_fast_wrapper(void *data) 279 { 280 struct internal_intr *intr = (struct internal_intr *)data; 281 282 intr->handler(intr->arg); 283 284 // We don't know if the interrupt has been handled. 285 return B_UNHANDLED_INTERRUPT; 286 } 287 288 289 static int32 290 intr_handler(void *data) 291 { 292 struct internal_intr *intr = (struct internal_intr *)data; 293 status_t status; 294 295 while (1) { 296 status = acquire_sem(intr->sem); 297 if (status < B_OK) 298 break; 299 300 //device_printf(intr->dev, "in soft interrupt handler.\n"); 301 302 atomic_or(&intr->handling, 1); 303 intr->handler(intr->arg); 304 atomic_and(&intr->handling, 0); 305 HAIKU_REENABLE_INTERRUPTS(intr->dev); 306 } 307 308 return 0; 309 } 310 311 312 static void 313 free_internal_intr(struct internal_intr *intr) 314 { 315 if (intr->sem >= B_OK) { 316 status_t status; 317 delete_sem(intr->sem); 318 wait_for_thread(intr->thread, &status); 319 } 320 321 free(intr); 322 } 323 324 325 int 326 bus_setup_intr(device_t dev, struct resource *res, int flags, 327 driver_filter_t filter, driver_intr_t handler, void *arg, void **_cookie) 328 { 329 /* TODO check MPSAFE etc */ 330 331 struct internal_intr *intr = (struct internal_intr *)malloc( 332 sizeof(struct internal_intr)); 333 char semName[64]; 334 status_t status; 335 336 if (intr == NULL) 337 return B_NO_MEMORY; 338 339 intr->dev = dev; 340 intr->filter = filter; 341 intr->handler = handler; 342 intr->arg = arg; 343 intr->irq = res->r_bushandle; 344 intr->flags = flags; 345 intr->is_msi = false; 346 intr->sem = -1; 347 intr->thread = -1; 348 349 if (filter != NULL) { 350 status = install_io_interrupt_handler(intr->irq, 351 (interrupt_handler)intr->filter, intr->arg, 0); 352 } else if ((flags & INTR_FAST) != 0) { 353 status = install_io_interrupt_handler(intr->irq, 354 intr_fast_wrapper, intr, B_NO_HANDLED_INFO); 355 } else { 356 snprintf(semName, sizeof(semName), "%s intr", dev->device_name); 357 358 intr->sem = create_sem(0, semName); 359 if (intr->sem < B_OK) { 360 free(intr); 361 return B_NO_MEMORY; 362 } 363 364 snprintf(semName, sizeof(semName), "%s intr handler", dev->device_name); 365 366 intr->thread = spawn_kernel_thread(intr_handler, semName, 367 B_REAL_TIME_DISPLAY_PRIORITY, intr); 368 if (intr->thread < B_OK) { 369 delete_sem(intr->sem); 370 free(intr); 371 return B_NO_MEMORY; 372 } 373 374 status = install_io_interrupt_handler(intr->irq, 375 intr_wrapper, intr, B_NO_HANDLED_INFO); 376 } 377 378 if (status == B_OK && res->r_bustag == 1 && gPCIx86 != NULL) { 379 // this is an msi, enable it 380 pci_info *info 381 = &((struct root_device_softc *)dev->root->softc)->pci_info; 382 if (gPCIx86->enable_msi(info->bus, info->device, 383 info->function) != B_OK) { 384 device_printf(dev, "enabling msi failed\n"); 385 bus_teardown_intr(dev, res, intr); 386 return ENODEV; 387 } 388 389 intr->is_msi = true; 390 } 391 392 if (status < B_OK) { 393 free_internal_intr(intr); 394 return status; 395 } 396 397 resume_thread(intr->thread); 398 399 *_cookie = intr; 400 return 0; 401 } 402 403 404 int 405 bus_teardown_intr(device_t dev, struct resource *res, void *arg) 406 { 407 struct internal_intr *intr = (struct internal_intr *)arg; 408 409 if (intr->is_msi && gPCIx86 != NULL) { 410 // disable msi generation 411 pci_info *info 412 = &((struct root_device_softc *)dev->root->softc)->pci_info; 413 gPCIx86->disable_msi(info->bus, info->device, info->function); 414 } 415 416 if (intr->filter != NULL) { 417 remove_io_interrupt_handler(intr->irq, (interrupt_handler)intr->filter, 418 intr->arg); 419 } else if (intr->flags & INTR_FAST) { 420 remove_io_interrupt_handler(intr->irq, intr_fast_wrapper, intr); 421 } else { 422 remove_io_interrupt_handler(intr->irq, intr_wrapper, intr); 423 } 424 425 free_internal_intr(intr); 426 return 0; 427 } 428 429 430 // #pragma mark - bus functions 431 432 433 bus_dma_tag_t 434 bus_get_dma_tag(device_t dev) 435 { 436 return NULL; 437 } 438 439 440 int 441 bus_generic_suspend(device_t dev) 442 { 443 UNIMPLEMENTED(); 444 return B_ERROR; 445 } 446 447 448 int 449 bus_generic_resume(device_t dev) 450 { 451 UNIMPLEMENTED(); 452 return B_ERROR; 453 } 454 455 456 void 457 bus_generic_shutdown(device_t dev) 458 { 459 UNIMPLEMENTED(); 460 } 461 462 463 int 464 bus_print_child_header(device_t dev, device_t child) 465 { 466 UNIMPLEMENTED(); 467 return B_ERROR; 468 } 469 470 471 int 472 bus_print_child_footer(device_t dev, device_t child) 473 { 474 UNIMPLEMENTED(); 475 return B_ERROR; 476 } 477 478 479 int 480 bus_generic_print_child(device_t dev, device_t child) 481 { 482 UNIMPLEMENTED(); 483 return B_ERROR; 484 } 485 486 487 void 488 bus_generic_driver_added(device_t dev, driver_t *driver) 489 { 490 UNIMPLEMENTED(); 491 } 492 493 494 #define BUS_SPACE_READ(size, type, fun) \ 495 type bus_space_read_##size(bus_space_tag_t tag, \ 496 bus_space_handle_t handle, bus_size_t offset) \ 497 { \ 498 type value; \ 499 if (tag == I386_BUS_SPACE_IO) \ 500 value = fun(handle + offset); \ 501 else \ 502 value = *(volatile type *)(handle + offset); \ 503 if (tag == I386_BUS_SPACE_IO) \ 504 TRACE_BUS_SPACE_RW(("bus_space_read_%s(0x%lx, 0x%lx, 0x%lx) = 0x%lx\n", \ 505 #size, (uint32)tag, (uint32)handle, (uint32)offset, (uint32)value)); \ 506 return value; \ 507 } 508 509 #define BUS_SPACE_WRITE(size, type, fun) \ 510 void bus_space_write_##size(bus_space_tag_t tag, \ 511 bus_space_handle_t handle, bus_size_t offset, type value) \ 512 { \ 513 if (tag == I386_BUS_SPACE_IO) \ 514 TRACE_BUS_SPACE_RW(("bus_space_write_%s(0x%lx, 0x%lx, 0x%lx, 0x%lx)\n", \ 515 #size, (uint32)tag, (uint32)handle, (uint32)offset, (uint32)value)); \ 516 if (tag == I386_BUS_SPACE_IO) \ 517 fun(value, handle + offset); \ 518 else \ 519 *(volatile type *)(handle + offset) = value; \ 520 } 521 522 BUS_SPACE_READ(1, uint8_t, in8) 523 BUS_SPACE_READ(2, uint16_t, in16) 524 BUS_SPACE_READ(4, uint32_t, in32) 525 526 BUS_SPACE_WRITE(1, uint8_t, out8) 527 BUS_SPACE_WRITE(2, uint16_t, out16) 528 BUS_SPACE_WRITE(4, uint32_t, out32) 529 530 int 531 bus_child_present(device_t child) 532 { 533 device_t parent = device_get_parent(child); 534 if (parent == NULL) 535 return 0; 536 537 return bus_child_present(parent); 538 } 539 540 541 // #pragma mark - PCI functions 542 543 544 uint32_t 545 pci_read_config(device_t dev, int offset, int size) 546 { 547 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 548 549 uint32_t value = gPci->read_pci_config(info->bus, info->device, 550 info->function, offset, size); 551 TRACE_PCI(dev, "pci_read_config(%i, %i) = 0x%x\n", offset, size, value); 552 return value; 553 } 554 555 556 void 557 pci_write_config(device_t dev, int offset, uint32_t value, int size) 558 { 559 pci_info *info = &((struct root_device_softc *)dev->root->softc)->pci_info; 560 561 TRACE_PCI(dev, "pci_write_config(%i, 0x%x, %i)\n", offset, value, size); 562 563 gPci->write_pci_config(info->bus, info->device, info->function, offset, 564 size, value); 565 } 566 567 568 uint16_t 569 pci_get_vendor(device_t dev) 570 { 571 return pci_read_config(dev, PCI_vendor_id, 2); 572 } 573 574 575 uint16_t 576 pci_get_device(device_t dev) 577 { 578 return pci_read_config(dev, PCI_device_id, 2); 579 } 580 581 582 uint16_t 583 pci_get_subvendor(device_t dev) 584 { 585 return pci_read_config(dev, PCI_subsystem_vendor_id, 2); 586 } 587 588 589 uint16_t 590 pci_get_subdevice(device_t dev) 591 { 592 return pci_read_config(dev, PCI_subsystem_id, 2); 593 } 594 595 596 uint8_t 597 pci_get_revid(device_t dev) 598 { 599 return pci_read_config(dev, PCI_revision, 1); 600 } 601 602 603 uint32_t 604 pci_get_domain(device_t dev) 605 { 606 return 0; 607 } 608 609 uint32_t 610 pci_get_devid(device_t dev) 611 { 612 return pci_read_config(dev, PCI_device_id, 2) << 16 | 613 pci_read_config(dev, PCI_vendor_id, 2); 614 } 615 616 uint8_t 617 pci_get_cachelnsz(device_t dev) 618 { 619 return pci_read_config(dev, PCI_line_size, 1); 620 } 621 622 uint8_t * 623 pci_get_ether(device_t dev) 624 { 625 /* used in if_dc to get the MAC from CardBus CIS for Xircom card */ 626 return NULL; /* NULL is handled in the caller correctly */ 627 } 628 629 uint8_t 630 pci_get_bus(device_t dev) 631 { 632 pci_info *info 633 = &((struct root_device_softc *)dev->root->softc)->pci_info; 634 return info->bus; 635 } 636 637 638 uint8_t 639 pci_get_slot(device_t dev) 640 { 641 pci_info *info 642 = &((struct root_device_softc *)dev->root->softc)->pci_info; 643 return info->device; 644 } 645 646 647 uint8_t 648 pci_get_function(device_t dev) 649 { 650 pci_info *info 651 = &((struct root_device_softc *)dev->root->softc)->pci_info; 652 return info->function; 653 } 654 655 656 device_t 657 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 658 { 659 // We don't support that yet - if we want to support the multi port 660 // feature of the Broadcom BCM 570x driver, we would have to change 661 // that. 662 return NULL; 663 } 664 665 666 static void 667 pci_set_command_bit(device_t dev, uint16_t bit) 668 { 669 uint16_t command = pci_read_config(dev, PCI_command, 2); 670 pci_write_config(dev, PCI_command, command | bit, 2); 671 } 672 673 674 int 675 pci_enable_busmaster(device_t dev) 676 { 677 pci_set_command_bit(dev, PCI_command_master); 678 return 0; 679 } 680 681 682 int 683 pci_enable_io(device_t dev, int space) 684 { 685 /* adapted from FreeBSD's pci_enable_io_method */ 686 int bit = 0; 687 688 switch (space) { 689 case SYS_RES_IOPORT: 690 bit = PCI_command_io; 691 break; 692 case SYS_RES_MEMORY: 693 bit = PCI_command_memory; 694 break; 695 default: 696 return EINVAL; 697 } 698 699 pci_set_command_bit(dev, bit); 700 if (pci_read_config(dev, PCI_command, 2) & bit) 701 return 0; 702 703 device_printf(dev, "pci_enable_io(%d) failed.\n", space); 704 705 return ENXIO; 706 } 707 708 709 int 710 pci_find_cap(device_t dev, int capability, int *capreg) 711 { 712 return pci_find_extcap(dev, capability, capreg); 713 } 714 715 716 int 717 pci_find_extcap(device_t child, int capability, int *_capabilityRegister) 718 { 719 uint8 capabilityPointer; 720 uint8 headerType; 721 uint16 status; 722 723 status = pci_read_config(child, PCIR_STATUS, 2); 724 if ((status & PCIM_STATUS_CAPPRESENT) == 0) 725 return ENXIO; 726 727 headerType = pci_read_config(child, PCI_header_type, 1); 728 switch (headerType & PCIM_HDRTYPE) { 729 case 0: 730 case 1: 731 capabilityPointer = PCIR_CAP_PTR; 732 break; 733 case 2: 734 capabilityPointer = PCIR_CAP_PTR_2; 735 break; 736 default: 737 return ENXIO; 738 } 739 capabilityPointer = pci_read_config(child, capabilityPointer, 1); 740 741 while (capabilityPointer != 0) { 742 if (pci_read_config(child, capabilityPointer + PCICAP_ID, 1) 743 == capability) { 744 if (_capabilityRegister != NULL) 745 *_capabilityRegister = capabilityPointer; 746 return 0; 747 } 748 capabilityPointer = pci_read_config(child, 749 capabilityPointer + PCICAP_NEXTPTR, 1); 750 } 751 752 return ENOENT; 753 } 754 755 756 int 757 pci_msi_count(device_t dev) 758 { 759 pci_info *info; 760 if (gPCIx86 == NULL) 761 return 0; 762 763 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 764 return gPCIx86->get_msi_count(info->bus, info->device, info->function); 765 } 766 767 768 int 769 pci_alloc_msi(device_t dev, int *count) 770 { 771 pci_info *info; 772 uint8 startVector = 0; 773 if (gPCIx86 == NULL) 774 return ENODEV; 775 776 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 777 778 if (gPCIx86->configure_msi(info->bus, info->device, info->function, *count, 779 &startVector) != B_OK) { 780 return ENODEV; 781 } 782 783 info->u.h0.interrupt_line = startVector; 784 return EOK; 785 } 786 787 788 int 789 pci_release_msi(device_t dev) 790 { 791 pci_info *info; 792 if (gPCIx86 == NULL) 793 return ENODEV; 794 795 info = &((struct root_device_softc *)dev->root->softc)->pci_info; 796 gPCIx86->unconfigure_msi(info->bus, info->device, info->function); 797 return EOK; 798 } 799 800 801 int 802 pci_msix_count(device_t dev) 803 { 804 return 0; 805 } 806 807 808 int 809 pci_alloc_msix(device_t dev, int *count) 810 { 811 return ENODEV; 812 } 813 814 815 int 816 pci_get_max_read_req(device_t dev) 817 { 818 int cap; 819 uint16_t val; 820 821 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 822 return (0); 823 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 824 val &= PCIM_EXP_CTL_MAX_READ_REQUEST; 825 val >>= 12; 826 return (1 << (val + 7)); 827 } 828 829 830 int 831 pci_set_max_read_req(device_t dev, int size) 832 { 833 int cap; 834 uint16_t val; 835 836 if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) 837 return (0); 838 if (size < 128) 839 size = 128; 840 if (size > 4096) 841 size = 4096; 842 size = (1 << (fls(size) - 1)); 843 val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); 844 val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; 845 val |= (fls(size) - 8) << 12; 846 pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2); 847 return (size); 848 } 849 850 851 int 852 pci_get_powerstate(device_t dev) 853 { 854 int capabilityRegister; 855 uint16 status; 856 int powerState = PCI_POWERSTATE_D0; 857 858 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 859 return powerState; 860 861 status = pci_read_config(dev, capabilityRegister + PCIR_POWER_STATUS, 2); 862 switch (status & PCI_pm_mask) { 863 case PCI_pm_state_d0: 864 break; 865 case PCI_pm_state_d1: 866 powerState = PCI_POWERSTATE_D1; 867 break; 868 case PCI_pm_state_d2: 869 powerState = PCI_POWERSTATE_D2; 870 break; 871 case PCI_pm_state_d3: 872 powerState = PCI_POWERSTATE_D3; 873 break; 874 default: 875 powerState = PCI_POWERSTATE_UNKNOWN; 876 break; 877 } 878 879 TRACE_PCI(dev, "%s: D%i\n", __func__, powerState); 880 return powerState; 881 } 882 883 884 int 885 pci_set_powerstate(device_t dev, int newPowerState) 886 { 887 int capabilityRegister; 888 int oldPowerState; 889 uint8 currentPowerManagementStatus; 890 uint8 newPowerManagementStatus; 891 uint16 powerManagementCapabilities; 892 bigtime_t stateTransitionDelayInUs = 0; 893 894 if (pci_find_extcap(dev, PCIY_PMG, &capabilityRegister) != EOK) 895 return EOPNOTSUPP; 896 897 oldPowerState = pci_get_powerstate(dev); 898 if (oldPowerState == newPowerState) 899 return EOK; 900 901 switch (std::max(oldPowerState, newPowerState)) { 902 case PCI_POWERSTATE_D2: 903 stateTransitionDelayInUs = 200; 904 break; 905 case PCI_POWERSTATE_D3: 906 stateTransitionDelayInUs = 10000; 907 break; 908 } 909 910 currentPowerManagementStatus = pci_read_config(dev, capabilityRegister 911 + PCIR_POWER_STATUS, 2); 912 newPowerManagementStatus = currentPowerManagementStatus & ~PCI_pm_mask; 913 powerManagementCapabilities = pci_read_config(dev, capabilityRegister 914 + PCIR_POWER_CAP, 2); 915 916 switch (newPowerState) { 917 case PCI_POWERSTATE_D0: 918 newPowerManagementStatus |= PCIM_PSTAT_D0; 919 break; 920 case PCI_POWERSTATE_D1: 921 if ((powerManagementCapabilities & PCI_pm_d1supp) == 0) 922 return EOPNOTSUPP; 923 newPowerManagementStatus |= PCIM_PSTAT_D1; 924 break; 925 case PCI_POWERSTATE_D2: 926 if ((powerManagementCapabilities & PCI_pm_d2supp) == 0) 927 return EOPNOTSUPP; 928 newPowerManagementStatus |= PCIM_PSTAT_D2; 929 break; 930 case PCI_POWERSTATE_D3: 931 newPowerManagementStatus |= PCIM_PSTAT_D3; 932 break; 933 default: 934 return EINVAL; 935 } 936 937 TRACE_PCI(dev, "%s: D%i -> D%i\n", __func__, oldPowerState, newPowerState); 938 pci_write_config(dev, capabilityRegister + PCIR_POWER_STATUS, newPowerState, 939 2); 940 if (stateTransitionDelayInUs != 0) 941 snooze(stateTransitionDelayInUs); 942 943 return EOK; 944 } 945