1 /* 2 * Copyright 2011-2019, Haiku, Inc. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Augustin Cavalier <waddlesplash> 7 * Jian Chiang <j.jian.chiang@gmail.com> 8 * Jérôme Duval <jerome.duval@gmail.com> 9 * Akshay Jaggi <akshay1994.leo@gmail.com> 10 * Michael Lotz <mmlr@mlotz.ch> 11 */ 12 13 14 #include <module.h> 15 #include <PCI.h> 16 #include <PCI_x86.h> 17 #include <USB3.h> 18 #include <KernelExport.h> 19 20 #include <util/AutoLock.h> 21 22 #include "xhci.h" 23 24 #define USB_MODULE_NAME "xhci" 25 26 pci_module_info *XHCI::sPCIModule = NULL; 27 pci_x86_module_info *XHCI::sPCIx86Module = NULL; 28 29 30 static int32 31 xhci_std_ops(int32 op, ...) 32 { 33 switch (op) { 34 case B_MODULE_INIT: 35 TRACE_MODULE("xhci init module\n"); 36 return B_OK; 37 case B_MODULE_UNINIT: 38 TRACE_MODULE("xhci uninit module\n"); 39 return B_OK; 40 } 41 42 return EINVAL; 43 } 44 45 46 static const char* 47 xhci_error_string(uint32 error) 48 { 49 switch (error) { 50 case COMP_INVALID: return "Invalid"; 51 case COMP_SUCCESS: return "Success"; 52 case COMP_DATA_BUFFER: return "Data buffer"; 53 case COMP_BABBLE: return "Babble detected"; 54 case COMP_USB_TRANSACTION: return "USB transaction"; 55 case COMP_TRB: return "TRB"; 56 case COMP_STALL: return "Stall"; 57 case COMP_RESOURCE: return "Resource"; 58 case COMP_BANDWIDTH: return "Bandwidth"; 59 case COMP_NO_SLOTS: return "No slots"; 60 case COMP_INVALID_STREAM: return "Invalid stream"; 61 case COMP_SLOT_NOT_ENABLED: return "Slot not enabled"; 62 case COMP_ENDPOINT_NOT_ENABLED: return "Endpoint not enabled"; 63 case COMP_SHORT_PACKET: return "Short packet"; 64 case COMP_RING_UNDERRUN: return "Ring underrun"; 65 case COMP_RING_OVERRUN: return "Ring overrun"; 66 case COMP_VF_RING_FULL: return "VF Event Ring Full"; 67 case COMP_PARAMETER: return "Parameter"; 68 case COMP_BANDWIDTH_OVERRUN: return "Bandwidth overrun"; 69 case COMP_CONTEXT_STATE: return "Context state"; 70 case COMP_NO_PING_RESPONSE: return "No ping response"; 71 case COMP_EVENT_RING_FULL: return "Event ring full"; 72 case COMP_INCOMPATIBLE_DEVICE: return "Incompatible device"; 73 case COMP_MISSED_SERVICE: return "Missed service"; 74 case COMP_COMMAND_RING_STOPPED: return "Command ring stopped"; 75 case COMP_COMMAND_ABORTED: return "Command aborted"; 76 case COMP_STOPPED: return "Stopped"; 77 case COMP_LENGTH_INVALID: return "Length invalid"; 78 case COMP_MAX_EXIT_LATENCY: return "Max exit latency too large"; 79 case COMP_ISOC_OVERRUN: return "Isoch buffer overrun"; 80 case COMP_EVENT_LOST: return "Event lost"; 81 case COMP_UNDEFINED: return "Undefined"; 82 case COMP_INVALID_STREAM_ID: return "Invalid stream ID"; 83 case COMP_SECONDARY_BANDWIDTH: return "Secondary bandwidth"; 84 case COMP_SPLIT_TRANSACTION: return "Split transaction"; 85 86 default: return "Undefined"; 87 } 88 } 89 90 91 usb_host_controller_info xhci_module = { 92 { 93 "busses/usb/xhci", 94 0, 95 xhci_std_ops 96 }, 97 NULL, 98 XHCI::AddTo 99 }; 100 101 102 module_info *modules[] = { 103 (module_info *)&xhci_module, 104 NULL 105 }; 106 107 108 status_t 109 XHCI::AddTo(Stack *stack) 110 { 111 if (!sPCIModule) { 112 status_t status = get_module(B_PCI_MODULE_NAME, 113 (module_info **)&sPCIModule); 114 if (status < B_OK) { 115 TRACE_MODULE_ERROR("getting pci module failed! 0x%08" B_PRIx32 116 "\n", status); 117 return status; 118 } 119 } 120 121 TRACE_MODULE("searching devices\n"); 122 bool found = false; 123 pci_info *item = new(std::nothrow) pci_info; 124 if (item == NULL) { 125 sPCIModule = NULL; 126 put_module(B_PCI_MODULE_NAME); 127 return B_NO_MEMORY; 128 } 129 130 // Try to get the PCI x86 module as well so we can enable possible MSIs. 131 if (sPCIx86Module == NULL && get_module(B_PCI_X86_MODULE_NAME, 132 (module_info **)&sPCIx86Module) != B_OK) { 133 // If it isn't there, that's not critical though. 134 TRACE_MODULE_ERROR("failed to get pci x86 module\n"); 135 sPCIx86Module = NULL; 136 } 137 138 for (int32 i = 0; sPCIModule->get_nth_pci_info(i, item) >= B_OK; i++) { 139 if (item->class_base == PCI_serial_bus && item->class_sub == PCI_usb 140 && item->class_api == PCI_usb_xhci) { 141 TRACE_MODULE("found device at PCI:%d:%d:%d\n", 142 item->bus, item->device, item->function); 143 XHCI *bus = new(std::nothrow) XHCI(item, stack); 144 if (bus == NULL) { 145 delete item; 146 sPCIModule = NULL; 147 put_module(B_PCI_MODULE_NAME); 148 if (sPCIx86Module != NULL) 149 put_module(B_PCI_X86_MODULE_NAME); 150 return B_NO_MEMORY; 151 } 152 153 // The bus will put the PCI modules when it is destroyed, so get 154 // them again to increase their reference count. 155 get_module(B_PCI_MODULE_NAME, (module_info **)&sPCIModule); 156 if (sPCIx86Module != NULL) 157 get_module(B_PCI_X86_MODULE_NAME, (module_info **)&sPCIx86Module); 158 159 if (bus->InitCheck() < B_OK) { 160 TRACE_MODULE_ERROR("bus failed init check\n"); 161 delete bus; 162 continue; 163 } 164 165 // the bus took it away 166 item = new(std::nothrow) pci_info; 167 168 if (bus->Start() != B_OK) { 169 delete bus; 170 continue; 171 } 172 found = true; 173 } 174 } 175 176 // The modules will have been gotten again if we successfully 177 // initialized a bus, so we should put them here. 178 put_module(B_PCI_MODULE_NAME); 179 if (sPCIx86Module != NULL) 180 put_module(B_PCI_X86_MODULE_NAME); 181 182 if (!found) 183 TRACE_MODULE_ERROR("no devices found\n"); 184 delete item; 185 return found ? B_OK : ENODEV; 186 } 187 188 189 XHCI::XHCI(pci_info *info, Stack *stack) 190 : BusManager(stack), 191 fRegisterArea(-1), 192 fRegisters(NULL), 193 fPCIInfo(info), 194 fStack(stack), 195 fIRQ(0), 196 fUseMSI(false), 197 fErstArea(-1), 198 fDcbaArea(-1), 199 fCmdCompSem(-1), 200 fStopThreads(false), 201 fRootHub(NULL), 202 fRootHubAddress(0), 203 fPortCount(0), 204 fSlotCount(0), 205 fScratchpadCount(0), 206 fContextSizeShift(0), 207 fFinishedHead(NULL), 208 fFinishTransfersSem(-1), 209 fFinishThread(-1), 210 fEventSem(-1), 211 fEventThread(-1), 212 fEventIdx(0), 213 fCmdIdx(0), 214 fEventCcs(1), 215 fCmdCcs(1) 216 { 217 B_INITIALIZE_SPINLOCK(&fSpinlock); 218 mutex_init(&fFinishedLock, "XHCI finished transfers"); 219 mutex_init(&fEventLock, "XHCI event handler"); 220 221 if (BusManager::InitCheck() < B_OK) { 222 TRACE_ERROR("bus manager failed to init\n"); 223 return; 224 } 225 226 TRACE("constructing new XHCI host controller driver\n"); 227 fInitOK = false; 228 229 // enable busmaster and memory mapped access 230 uint16 command = sPCIModule->read_pci_config(fPCIInfo->bus, 231 fPCIInfo->device, fPCIInfo->function, PCI_command, 2); 232 command &= ~(PCI_command_io | PCI_command_int_disable); 233 command |= PCI_command_master | PCI_command_memory; 234 235 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 236 fPCIInfo->function, PCI_command, 2, command); 237 238 // map the registers (low + high for 64-bit when requested) 239 phys_addr_t physicalAddress = fPCIInfo->u.h0.base_registers[0]; 240 physicalAddress &= PCI_address_memory_32_mask; 241 if ((fPCIInfo->u.h0.base_register_flags[0] & 0xC) == PCI_address_type_64) 242 physicalAddress += (phys_addr_t)fPCIInfo->u.h0.base_registers[1] << 32; 243 244 size_t mapSize = fPCIInfo->u.h0.base_register_sizes[0]; 245 246 TRACE("map physical memory %08" B_PRIxPHYSADDR ", size: %" B_PRIuSIZE "\n", 247 physicalAddress, mapSize); 248 249 fRegisterArea = map_physical_memory("XHCI memory mapped registers", 250 physicalAddress, mapSize, B_ANY_KERNEL_BLOCK_ADDRESS, 251 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 252 (void **)&fRegisters); 253 if (fRegisterArea < B_OK) { 254 TRACE_ERROR("failed to map register memory\n"); 255 return; 256 } 257 258 // determine the register offsets 259 fCapabilityRegisterOffset = 0; 260 fOperationalRegisterOffset = HCI_CAPLENGTH(ReadCapReg32(XHCI_HCI_CAPLENGTH)); 261 fRuntimeRegisterOffset = ReadCapReg32(XHCI_RTSOFF) & ~0x1F; 262 fDoorbellRegisterOffset = ReadCapReg32(XHCI_DBOFF) & ~0x3; 263 264 TRACE("mapped registers: %p\n", fRegisters); 265 TRACE("operational register offset: %" B_PRId32 "\n", fOperationalRegisterOffset); 266 TRACE("runtime register offset: %" B_PRId32 "\n", fRuntimeRegisterOffset); 267 TRACE("doorbell register offset: %" B_PRId32 "\n", fDoorbellRegisterOffset); 268 269 TRACE_ALWAYS("interface version: 0x%04" B_PRIx32 "\n", 270 HCI_VERSION(ReadCapReg32(XHCI_HCI_VERSION))); 271 TRACE_ALWAYS("structural parameters: 1:0x%08" B_PRIx32 " 2:0x%08" 272 B_PRIx32 " 3:0x%08" B_PRIx32 "\n", ReadCapReg32(XHCI_HCSPARAMS1), 273 ReadCapReg32(XHCI_HCSPARAMS2), ReadCapReg32(XHCI_HCSPARAMS3)); 274 275 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 276 if (cparams == 0xffffffff) 277 return; 278 TRACE_ALWAYS("capability params: 0x%08" B_PRIx32 "\n", cparams); 279 280 // if 64 bytes context structures, then 1 281 fContextSizeShift = HCC_CSZ(cparams); 282 283 // Assume ownership of the controller from the BIOS. 284 uint32 eec = 0xffffffff; 285 uint32 eecp = HCS0_XECP(cparams) << 2; 286 for (; eecp != 0 && XECP_NEXT(eec); eecp += XECP_NEXT(eec) << 2) { 287 TRACE("eecp register: 0x%08" B_PRIx32 "\n", eecp); 288 289 eec = ReadCapReg32(eecp); 290 if (XECP_ID(eec) != XHCI_LEGSUP_CAPID) 291 continue; 292 293 if (eec & XHCI_LEGSUP_BIOSOWNED) { 294 TRACE_ALWAYS("the host controller is bios owned, claiming" 295 " ownership\n"); 296 WriteCapReg32(eecp, eec | XHCI_LEGSUP_OSOWNED); 297 298 for (int32 i = 0; i < 20; i++) { 299 eec = ReadCapReg32(eecp); 300 301 if ((eec & XHCI_LEGSUP_BIOSOWNED) == 0) 302 break; 303 304 TRACE_ALWAYS("controller is still bios owned, waiting\n"); 305 snooze(50000); 306 } 307 308 if (eec & XHCI_LEGSUP_BIOSOWNED) { 309 TRACE_ERROR("bios won't give up control over the host " 310 "controller (ignoring)\n"); 311 } else if (eec & XHCI_LEGSUP_OSOWNED) { 312 TRACE_ALWAYS("successfully took ownership of the host " 313 "controller\n"); 314 } 315 316 // Force off the BIOS owned flag, and clear all SMIs. Some BIOSes 317 // do indicate a successful handover but do not remove their SMIs 318 // and then freeze the system when interrupts are generated. 319 WriteCapReg32(eecp, eec & ~XHCI_LEGSUP_BIOSOWNED); 320 } 321 break; 322 } 323 uint32 legctlsts = ReadCapReg32(eecp + XHCI_LEGCTLSTS); 324 legctlsts &= XHCI_LEGCTLSTS_DISABLE_SMI; 325 legctlsts |= XHCI_LEGCTLSTS_EVENTS_SMI; 326 WriteCapReg32(eecp + XHCI_LEGCTLSTS, legctlsts); 327 328 // We need to explicitly take ownership of EHCI ports on earlier Intel chipsets. 329 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL) { 330 switch (fPCIInfo->device_id) { 331 case PCI_DEVICE_INTEL_PANTHER_POINT_XHCI: 332 case PCI_DEVICE_INTEL_LYNX_POINT_XHCI: 333 case PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI: 334 case PCI_DEVICE_INTEL_BAYTRAIL_XHCI: 335 case PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI: 336 case PCI_DEVICE_INTEL_WILDCAT_POINT_LP_XHCI: 337 _SwitchIntelPorts(); 338 break; 339 } 340 } 341 342 // halt the host controller 343 if (ControllerHalt() < B_OK) { 344 return; 345 } 346 347 // reset the host controller 348 if (ControllerReset() < B_OK) { 349 TRACE_ERROR("host controller failed to reset\n"); 350 return; 351 } 352 353 fCmdCompSem = create_sem(0, "XHCI Command Complete"); 354 fFinishTransfersSem = create_sem(0, "XHCI Finish Transfers"); 355 fEventSem = create_sem(0, "XHCI Event"); 356 if (fFinishTransfersSem < B_OK || fCmdCompSem < B_OK || fEventSem < B_OK) { 357 TRACE_ERROR("failed to create semaphores\n"); 358 return; 359 } 360 361 // create event handler thread 362 fEventThread = spawn_kernel_thread(EventThread, "xhci event thread", 363 B_URGENT_PRIORITY, (void *)this); 364 resume_thread(fEventThread); 365 366 // create finisher service thread 367 fFinishThread = spawn_kernel_thread(FinishThread, "xhci finish thread", 368 B_URGENT_PRIORITY - 1, (void *)this); 369 resume_thread(fFinishThread); 370 371 // Find the right interrupt vector, using MSIs if available. 372 fIRQ = fPCIInfo->u.h0.interrupt_line; 373 if (sPCIx86Module != NULL && sPCIx86Module->get_msi_count(fPCIInfo->bus, 374 fPCIInfo->device, fPCIInfo->function) >= 1) { 375 uint8 msiVector = 0; 376 if (sPCIx86Module->configure_msi(fPCIInfo->bus, fPCIInfo->device, 377 fPCIInfo->function, 1, &msiVector) == B_OK 378 && sPCIx86Module->enable_msi(fPCIInfo->bus, fPCIInfo->device, 379 fPCIInfo->function) == B_OK) { 380 TRACE_ALWAYS("using message signaled interrupts\n"); 381 fIRQ = msiVector; 382 fUseMSI = true; 383 } 384 } 385 386 if (fIRQ == 0 || fIRQ == 0xFF) { 387 TRACE_MODULE_ERROR("device PCI:%d:%d:%d was assigned an invalid IRQ\n", 388 fPCIInfo->bus, fPCIInfo->device, fPCIInfo->function); 389 return; 390 } 391 392 // Install the interrupt handler 393 TRACE("installing interrupt handler\n"); 394 install_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this, 0); 395 396 memset(fPortSpeeds, 0, sizeof(fPortSpeeds)); 397 memset(fPortSlots, 0, sizeof(fPortSlots)); 398 memset(fDevices, 0, sizeof(fDevices)); 399 400 fInitOK = true; 401 TRACE("XHCI host controller driver constructed\n"); 402 } 403 404 405 XHCI::~XHCI() 406 { 407 TRACE("tear down XHCI host controller driver\n"); 408 409 WriteOpReg(XHCI_CMD, 0); 410 411 int32 result = 0; 412 fStopThreads = true; 413 delete_sem(fCmdCompSem); 414 delete_sem(fFinishTransfersSem); 415 delete_sem(fEventSem); 416 wait_for_thread(fFinishThread, &result); 417 wait_for_thread(fEventThread, &result); 418 419 mutex_destroy(&fFinishedLock); 420 mutex_destroy(&fEventLock); 421 422 remove_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this); 423 424 delete_area(fRegisterArea); 425 delete_area(fErstArea); 426 for (uint32 i = 0; i < fScratchpadCount; i++) 427 delete_area(fScratchpadArea[i]); 428 delete_area(fDcbaArea); 429 430 if (fUseMSI && sPCIx86Module != NULL) { 431 sPCIx86Module->disable_msi(fPCIInfo->bus, 432 fPCIInfo->device, fPCIInfo->function); 433 sPCIx86Module->unconfigure_msi(fPCIInfo->bus, 434 fPCIInfo->device, fPCIInfo->function); 435 } 436 put_module(B_PCI_MODULE_NAME); 437 if (sPCIx86Module != NULL) 438 put_module(B_PCI_X86_MODULE_NAME); 439 } 440 441 442 void 443 XHCI::_SwitchIntelPorts() 444 { 445 TRACE("Intel xHC Controller\n"); 446 TRACE("Looking for EHCI owned ports\n"); 447 uint32 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 448 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3PRM, 4); 449 TRACE("Superspeed Ports: 0x%" B_PRIx32 "\n", ports); 450 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 451 fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4, ports); 452 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 453 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4); 454 TRACE("Superspeed ports now under XHCI : 0x%" B_PRIx32 "\n", ports); 455 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 456 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB2PRM, 4); 457 TRACE("USB 2.0 Ports : 0x%" B_PRIx32 "\n", ports); 458 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 459 fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4, ports); 460 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 461 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4); 462 TRACE("USB 2.0 ports now under XHCI: 0x%" B_PRIx32 "\n", ports); 463 } 464 465 466 status_t 467 XHCI::Start() 468 { 469 TRACE_ALWAYS("starting XHCI host controller\n"); 470 TRACE("usbcmd: 0x%08" B_PRIx32 "; usbsts: 0x%08" B_PRIx32 "\n", 471 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 472 473 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 474 TRACE("Start() failed STS_CNR\n"); 475 } 476 477 if ((ReadOpReg(XHCI_CMD) & CMD_RUN) != 0) { 478 TRACE_ERROR("Start() warning, starting running XHCI controller!\n"); 479 } 480 481 if ((ReadOpReg(XHCI_PAGESIZE) & (1 << 0)) == 0) { 482 TRACE_ERROR("Controller does not support 4K page size.\n"); 483 return B_ERROR; 484 } 485 486 // read port count from capability register 487 uint32 capabilities = ReadCapReg32(XHCI_HCSPARAMS1); 488 fPortCount = HCS_MAX_PORTS(capabilities); 489 if (fPortCount == 0) { 490 TRACE_ERROR("Invalid number of ports: %u\n", fPortCount); 491 return B_ERROR; 492 } 493 494 fSlotCount = HCS_MAX_SLOTS(capabilities); 495 if (fSlotCount > XHCI_MAX_DEVICES) 496 fSlotCount = XHCI_MAX_DEVICES; 497 WriteOpReg(XHCI_CONFIG, fSlotCount); 498 499 // find out which protocol is used for each port 500 uint8 portFound = 0; 501 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 502 uint32 eec = 0xffffffff; 503 uint32 eecp = HCS0_XECP(cparams) << 2; 504 for (; eecp != 0 && XECP_NEXT(eec) && portFound < fPortCount; 505 eecp += XECP_NEXT(eec) << 2) { 506 eec = ReadCapReg32(eecp); 507 if (XECP_ID(eec) != XHCI_SUPPORTED_PROTOCOLS_CAPID) 508 continue; 509 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) > 3) 510 continue; 511 uint32 temp = ReadCapReg32(eecp + 8); 512 uint32 offset = XHCI_SUPPORTED_PROTOCOLS_1_OFFSET(temp); 513 uint32 count = XHCI_SUPPORTED_PROTOCOLS_1_COUNT(temp); 514 if (offset == 0 || count == 0) 515 continue; 516 offset--; 517 for (uint32 i = offset; i < offset + count; i++) { 518 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) == 0x3) 519 fPortSpeeds[i] = USB_SPEED_SUPERSPEED; 520 else 521 fPortSpeeds[i] = USB_SPEED_HIGHSPEED; 522 523 TRACE("speed for port %" B_PRId32 " is %s\n", i, 524 fPortSpeeds[i] == USB_SPEED_SUPERSPEED ? "super" : "high"); 525 } 526 portFound += count; 527 } 528 529 uint32 params2 = ReadCapReg32(XHCI_HCSPARAMS2); 530 fScratchpadCount = HCS_MAX_SC_BUFFERS(params2); 531 if (fScratchpadCount > XHCI_MAX_SCRATCHPADS) { 532 TRACE_ERROR("Invalid number of scratchpads: %" B_PRIu32 "\n", 533 fScratchpadCount); 534 return B_ERROR; 535 } 536 537 uint32 params3 = ReadCapReg32(XHCI_HCSPARAMS3); 538 fExitLatMax = HCS_U1_DEVICE_LATENCY(params3) 539 + HCS_U2_DEVICE_LATENCY(params3); 540 541 // clear interrupts & disable device notifications 542 WriteOpReg(XHCI_STS, ReadOpReg(XHCI_STS)); 543 WriteOpReg(XHCI_DNCTRL, 0); 544 545 // allocate Device Context Base Address array 546 phys_addr_t dmaAddress; 547 fDcbaArea = fStack->AllocateArea((void **)&fDcba, &dmaAddress, 548 sizeof(*fDcba), "DCBA Area"); 549 if (fDcbaArea < B_OK) { 550 TRACE_ERROR("unable to create the DCBA area\n"); 551 return B_ERROR; 552 } 553 memset(fDcba, 0, sizeof(*fDcba)); 554 memset(fScratchpadArea, 0, sizeof(fScratchpadArea)); 555 memset(fScratchpad, 0, sizeof(fScratchpad)); 556 557 // setting the first address to the scratchpad array address 558 fDcba->baseAddress[0] = dmaAddress 559 + offsetof(struct xhci_device_context_array, scratchpad); 560 561 // fill up the scratchpad array with scratchpad pages 562 for (uint32 i = 0; i < fScratchpadCount; i++) { 563 phys_addr_t scratchDmaAddress; 564 fScratchpadArea[i] = fStack->AllocateArea((void **)&fScratchpad[i], 565 &scratchDmaAddress, B_PAGE_SIZE, "Scratchpad Area"); 566 if (fScratchpadArea[i] < B_OK) { 567 TRACE_ERROR("unable to create the scratchpad area\n"); 568 return B_ERROR; 569 } 570 fDcba->scratchpad[i] = scratchDmaAddress; 571 } 572 573 TRACE("setting DCBAAP %" B_PRIxPHYSADDR "\n", dmaAddress); 574 WriteOpReg(XHCI_DCBAAP_LO, (uint32)dmaAddress); 575 WriteOpReg(XHCI_DCBAAP_HI, (uint32)(dmaAddress >> 32)); 576 577 // allocate Event Ring Segment Table 578 uint8 *addr; 579 fErstArea = fStack->AllocateArea((void **)&addr, &dmaAddress, 580 (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 581 + sizeof(xhci_erst_element), 582 "USB XHCI ERST CMD_RING and EVENT_RING Area"); 583 584 if (fErstArea < B_OK) { 585 TRACE_ERROR("unable to create the ERST AND RING area\n"); 586 delete_area(fDcbaArea); 587 return B_ERROR; 588 } 589 fErst = (xhci_erst_element *)addr; 590 memset(fErst, 0, (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 591 + sizeof(xhci_erst_element)); 592 593 // fill with Event Ring Segment Base Address and Event Ring Segment Size 594 fErst->rs_addr = dmaAddress + sizeof(xhci_erst_element); 595 fErst->rs_size = XHCI_MAX_EVENTS; 596 fErst->rsvdz = 0; 597 598 addr += sizeof(xhci_erst_element); 599 fEventRing = (xhci_trb *)addr; 600 addr += XHCI_MAX_EVENTS * sizeof(xhci_trb); 601 fCmdRing = (xhci_trb *)addr; 602 603 TRACE("setting ERST size\n"); 604 WriteRunReg32(XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 605 606 TRACE("setting ERDP addr = 0x%" B_PRIx64 "\n", fErst->rs_addr); 607 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)fErst->rs_addr); 608 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(fErst->rs_addr >> 32)); 609 610 TRACE("setting ERST base addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 611 WriteRunReg32(XHCI_ERSTBA_LO(0), (uint32)dmaAddress); 612 WriteRunReg32(XHCI_ERSTBA_HI(0), (uint32)(dmaAddress >> 32)); 613 614 dmaAddress += sizeof(xhci_erst_element) + XHCI_MAX_EVENTS 615 * sizeof(xhci_trb); 616 617 // Make sure the Command Ring is stopped 618 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 619 TRACE_ALWAYS("Command Ring is running, send stop/cancel\n"); 620 WriteOpReg(XHCI_CRCR_LO, CRCR_CS); 621 WriteOpReg(XHCI_CRCR_HI, 0); 622 WriteOpReg(XHCI_CRCR_LO, CRCR_CA); 623 WriteOpReg(XHCI_CRCR_HI, 0); 624 snooze(1000); 625 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 626 TRACE_ERROR("Command Ring still running after stop/cancel\n"); 627 } 628 } 629 TRACE("setting CRCR addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 630 WriteOpReg(XHCI_CRCR_LO, (uint32)dmaAddress | CRCR_RCS); 631 WriteOpReg(XHCI_CRCR_HI, (uint32)(dmaAddress >> 32)); 632 // link trb 633 fCmdRing[XHCI_MAX_COMMANDS - 1].address = dmaAddress; 634 635 TRACE("setting interrupt rate\n"); 636 637 // Setting IMOD below 0x3F8 on Intel Lynx Point can cause IRQ lockups 638 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL 639 && (fPCIInfo->device_id == PCI_DEVICE_INTEL_PANTHER_POINT_XHCI 640 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_XHCI 641 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI 642 || fPCIInfo->device_id == PCI_DEVICE_INTEL_BAYTRAIL_XHCI 643 || fPCIInfo->device_id == PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI)) { 644 WriteRunReg32(XHCI_IMOD(0), 0x000003f8); // 4000 irq/s 645 } else { 646 WriteRunReg32(XHCI_IMOD(0), 0x000001f4); // 8000 irq/s 647 } 648 649 TRACE("enabling interrupt\n"); 650 WriteRunReg32(XHCI_IMAN(0), ReadRunReg32(XHCI_IMAN(0)) | IMAN_INTR_ENA); 651 652 WriteOpReg(XHCI_CMD, CMD_RUN | CMD_INTE | CMD_HSEE); 653 654 // wait for start up state 655 if (WaitOpBits(XHCI_STS, STS_HCH, 0) != B_OK) { 656 TRACE_ERROR("HCH start up timeout\n"); 657 } 658 659 fRootHubAddress = AllocateAddress(); 660 fRootHub = new(std::nothrow) XHCIRootHub(RootObject(), fRootHubAddress); 661 if (!fRootHub) { 662 TRACE_ERROR("no memory to allocate root hub\n"); 663 return B_NO_MEMORY; 664 } 665 666 if (fRootHub->InitCheck() < B_OK) { 667 TRACE_ERROR("root hub failed init check\n"); 668 return fRootHub->InitCheck(); 669 } 670 671 SetRootHub(fRootHub); 672 673 TRACE_ALWAYS("successfully started the controller\n"); 674 #ifdef TRACE_USB 675 TRACE("No-Op test...\n"); 676 status_t noopResult = Noop(); 677 TRACE("No-Op %ssuccessful\n", noopResult < B_OK ? "un" : ""); 678 #endif 679 680 //DumpRing(fCmdRing, (XHCI_MAX_COMMANDS - 1)); 681 682 return BusManager::Start(); 683 } 684 685 686 status_t 687 XHCI::SubmitTransfer(Transfer *transfer) 688 { 689 // short circuit the root hub 690 if (transfer->TransferPipe()->DeviceAddress() == fRootHubAddress) 691 return fRootHub->ProcessTransfer(this, transfer); 692 693 TRACE("SubmitTransfer()\n"); 694 Pipe *pipe = transfer->TransferPipe(); 695 if ((pipe->Type() & USB_OBJECT_CONTROL_PIPE) != 0) 696 return SubmitControlRequest(transfer); 697 return SubmitNormalRequest(transfer); 698 } 699 700 701 status_t 702 XHCI::SubmitControlRequest(Transfer *transfer) 703 { 704 Pipe *pipe = transfer->TransferPipe(); 705 usb_request_data *requestData = transfer->RequestData(); 706 bool directionIn = (requestData->RequestType & USB_REQTYPE_DEVICE_IN) != 0; 707 708 TRACE("SubmitControlRequest() length %d\n", requestData->Length); 709 710 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 711 if (endpoint == NULL) { 712 TRACE_ERROR("invalid endpoint!\n"); 713 return B_BAD_VALUE; 714 } 715 status_t status = transfer->InitKernelAccess(); 716 if (status != B_OK) 717 return status; 718 719 xhci_td *descriptor = CreateDescriptor(3, 1, requestData->Length); 720 if (descriptor == NULL) 721 return B_NO_MEMORY; 722 descriptor->transfer = transfer; 723 724 // Setup Stage 725 uint8 index = 0; 726 memcpy(&descriptor->trbs[index].address, requestData, 727 sizeof(usb_request_data)); 728 descriptor->trbs[index].status = TRB_2_IRQ(0) | TRB_2_BYTES(8); 729 descriptor->trbs[index].flags 730 = TRB_3_TYPE(TRB_TYPE_SETUP_STAGE) | TRB_3_IDT_BIT | TRB_3_CYCLE_BIT; 731 if (requestData->Length > 0) { 732 descriptor->trbs[index].flags |= 733 directionIn ? TRB_3_TRT_IN : TRB_3_TRT_OUT; 734 } 735 736 index++; 737 738 // Data Stage (if any) 739 if (requestData->Length > 0) { 740 descriptor->trbs[index].address = descriptor->buffer_addrs[0]; 741 descriptor->trbs[index].status = TRB_2_IRQ(0) 742 | TRB_2_BYTES(requestData->Length) 743 | TRB_2_TD_SIZE(0); 744 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_DATA_STAGE) 745 | (directionIn ? (TRB_3_DIR_IN | TRB_3_ISP_BIT) : 0) 746 | TRB_3_CYCLE_BIT; 747 748 if (!directionIn) { 749 transfer->PrepareKernelAccess(); 750 memcpy(descriptor->buffers[0], 751 (uint8 *)transfer->Vector()[0].iov_base, requestData->Length); 752 } 753 754 index++; 755 } 756 757 // Status Stage 758 descriptor->trbs[index].address = 0; 759 descriptor->trbs[index].status = TRB_2_IRQ(0); 760 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_STATUS_STAGE) 761 | ((directionIn && requestData->Length > 0) ? 0 : TRB_3_DIR_IN) 762 | TRB_3_CHAIN_BIT | TRB_3_ENT_BIT | TRB_3_CYCLE_BIT; 763 // Status Stage is an OUT transfer when the device is sending data 764 // (XHCI 1.2 § 4.11.2.2 Table 4-7 p213), and the CHAIN bit must be 765 // set when using an Event Data TRB (as _LinkDescriptorForPipe does) 766 // (XHCI 1.2 § 6.4.1.2.3 Table 6-31 p472) 767 768 descriptor->trb_used = index + 1; 769 770 status = _LinkDescriptorForPipe(descriptor, endpoint); 771 if (status != B_OK) { 772 FreeDescriptor(descriptor); 773 return status; 774 } 775 TRACE("SubmitControlRequest() request linked\n"); 776 777 return B_OK; 778 } 779 780 781 status_t 782 XHCI::SubmitNormalRequest(Transfer *transfer) 783 { 784 TRACE("SubmitNormalRequest() length %ld\n", transfer->DataLength()); 785 786 Pipe *pipe = transfer->TransferPipe(); 787 usb_isochronous_data *isochronousData = transfer->IsochronousData(); 788 bool directionIn = (pipe->Direction() == Pipe::In); 789 790 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 791 if (endpoint == NULL) 792 return B_BAD_VALUE; 793 794 status_t status = transfer->InitKernelAccess(); 795 if (status != B_OK) 796 return status; 797 798 // Compute the size to use for the TRBs, and then how many TRBs 799 // of this size we will need. We always need at least 1, of course. 800 size_t dataLength = transfer->DataLength(), 801 packetSize = pipe->MaxPacketSize(), 802 packetsPerTrb = 4; 803 804 if (isochronousData != NULL) { 805 if (isochronousData->packet_count == 0) 806 return B_BAD_VALUE; 807 808 // Isochronous transfers use more specifically sized packets. 809 packetSize = transfer->DataLength() / isochronousData->packet_count; 810 if (packetSize > pipe->MaxPacketSize() || packetSize 811 != (size_t)isochronousData->packet_descriptors[0].request_length) 812 return B_BAD_VALUE; 813 packetsPerTrb = 1; 814 } 815 816 // Now that we know packetSize & packetsPerTrb, compute TRB size and count. 817 const size_t trbSize = packetsPerTrb * packetSize; 818 const int32 trbCount = (dataLength + trbSize - 1) / trbSize; 819 820 xhci_td *td = CreateDescriptor(trbCount, trbCount, trbSize); 821 if (td == NULL) 822 return B_NO_MEMORY; 823 824 // Normal Stage 825 size_t remaining = dataLength; 826 int32 remainingPackets = (remaining - trbSize) / packetSize; 827 for (int32 i = 0; i < trbCount; i++) { 828 // The "TD Size" field of a transfer TRB indicates the number of 829 // remaining maximum-size *packets* in this TD, *not* including the 830 // packets in the current TRB, and capped at 31 if there are more 831 // than 31 packets remaining in the TD. (XHCI 1.1 § 4.11.2.4 p210.) 832 int32 tdSize = remainingPackets > 31 ? 31 : remainingPackets; 833 if (tdSize < 0) 834 tdSize = 0; 835 int32 trbLength = remaining < trbSize ? remaining : trbSize; 836 837 td->trbs[i].address = td->buffer_addrs[i]; 838 td->trbs[i].status = TRB_2_IRQ(0) 839 | TRB_2_BYTES(trbLength) 840 | TRB_2_TD_SIZE(tdSize); 841 td->trbs[i].flags = TRB_3_TYPE(TRB_TYPE_NORMAL) 842 | TRB_3_CYCLE_BIT | TRB_3_CHAIN_BIT 843 | (directionIn ? TRB_3_ISP_BIT : 0); 844 845 td->trb_used++; 846 remaining -= trbLength; 847 remainingPackets -= packetsPerTrb; 848 } 849 850 // Isochronous-specific 851 if (isochronousData != NULL) { 852 // This is an isochronous transfer; we need to make the first TRB 853 // an isochronous TRB. 854 td->trbs[0].flags &= ~(TRB_3_TYPE(TRB_TYPE_NORMAL)); 855 td->trbs[0].flags |= TRB_3_TYPE(TRB_TYPE_ISOCH); 856 857 // Isochronous pipes are scheduled by microframes, one of which 858 // is 125us for USB 2 and above. But for USB 1 it was 1ms, so 859 // we need to use a different frame delta for that case. 860 uint8 frameDelta = 1; 861 if (transfer->TransferPipe()->Speed() == USB_SPEED_FULLSPEED) 862 frameDelta = 8; 863 864 // TODO: We do not currently take Mult into account at all! 865 // How are we supposed to do that here? 866 867 // Determine the (starting) frame number: if ISO_ASAP is set, 868 // we are queueing this "right away", and so want to reset 869 // the starting_frame_number. Otherwise we use the passed one. 870 uint32 frame; 871 if ((isochronousData->flags & USB_ISO_ASAP) != 0 872 || isochronousData->starting_frame_number == NULL) { 873 frame = ReadRunReg32(XHCI_MFINDEX) + 1; 874 // TODO: The +1 comes from the XHCI spec; document that. 875 td->trbs[0].flags |= TRB_3_ISO_SIA_BIT; 876 } else { 877 frame = *isochronousData->starting_frame_number; 878 td->trbs[0].flags |= TRB_3_FRID(frame); 879 } 880 frame = (frame + frameDelta) % 2048; 881 if (isochronousData->starting_frame_number != NULL) 882 *isochronousData->starting_frame_number = frame; 883 884 // TODO: The OHCI bus driver seems to also do this for inbound 885 // isochronous transfers. Perhaps it should be moved into the stack? 886 if (directionIn) { 887 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 888 isochronousData->packet_descriptors[i].actual_length = 0; 889 isochronousData->packet_descriptors[i].status = B_NO_INIT; 890 } 891 } 892 } 893 894 // Set the ENT (Evaluate Next TRB) bit, so that the HC will not switch 895 // contexts before evaluating the Link TRB that _LinkDescriptorForPipe 896 // will insert, as otherwise there would be a race between us freeing 897 // and unlinking the descriptor, and the controller evaluating the Link TRB 898 // and thus getting back onto the main ring and executing the Event Data 899 // TRB that generates the interrupt for this transfer. 900 // 901 // Note that we *do not* unset the CHAIN bit in this TRB, thus including 902 // the Link TRB in this TD formally, which is required when using the 903 // ENT bit. (XHCI 1.2 § 4.12.3 p250.) 904 td->trbs[td->trb_used - 1].flags |= TRB_3_ENT_BIT; 905 906 if (!directionIn) { 907 TRACE("copying out iov count %ld\n", transfer->VectorCount()); 908 status_t status = transfer->PrepareKernelAccess(); 909 if (status != B_OK) { 910 FreeDescriptor(td); 911 return status; 912 } 913 WriteDescriptor(td, transfer->Vector(), transfer->VectorCount()); 914 } 915 916 td->transfer = transfer; 917 status = _LinkDescriptorForPipe(td, endpoint); 918 if (status != B_OK) { 919 FreeDescriptor(td); 920 return status; 921 } 922 TRACE("SubmitNormalRequest() request linked\n"); 923 924 return B_OK; 925 } 926 927 928 status_t 929 XHCI::CancelQueuedTransfers(Pipe *pipe, bool force) 930 { 931 xhci_endpoint* endpoint = (xhci_endpoint*)pipe->ControllerCookie(); 932 if (endpoint == NULL || endpoint->trbs == NULL) { 933 // Someone's de-allocated this pipe or endpoint in the meantime. 934 // (Possibly AllocateDevice failed, and we were the temporary pipe.) 935 return B_NO_INIT; 936 } 937 938 TRACE_ALWAYS("cancel queued transfers (%" B_PRId8 ") for pipe %p (%d)\n", 939 endpoint->used, pipe, pipe->EndpointAddress()); 940 941 MutexLocker endpointLocker(endpoint->lock); 942 943 if (endpoint->td_head == NULL) { 944 // There aren't any currently pending transfers to cancel. 945 return B_OK; 946 } 947 948 // Get the head TD from the endpoint. 949 xhci_td* td_head = endpoint->td_head; 950 endpoint->td_head = NULL; 951 952 // We don't want to call the callbacks while holding the endpoint lock, 953 // as they could potentially cause deadlocks, so we instead store 954 // them in a pointer array. We need to do this separately from freeing 955 // the TDs, for in the case we fail to stop the endpoint, we cancel 956 // the transfers but do not free the TDs. 957 Transfer* transfers[XHCI_MAX_TRANSFERS]; 958 int32 transfersCount = 0; 959 960 // We can't cancel or delete transfers under "force", as they probably 961 // are not safe to use anymore. 962 for (xhci_td* td = td_head; td != NULL; td = td->next) { 963 if (td->transfer == NULL) 964 continue; 965 if (!force) { 966 transfers[transfersCount] = td->transfer; 967 transfersCount++; 968 } 969 td->transfer = NULL; 970 } 971 972 status_t status = StopEndpoint(false, endpoint->id + 1, 973 endpoint->device->slot); 974 if (status == B_OK) { 975 // Clear the endpoint's TRBs. 976 memset(endpoint->trbs, 0, sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 977 endpoint->used = 0; 978 endpoint->current = 0; 979 980 // Set dequeue pointer location to the beginning of the ring. 981 SetTRDequeue(endpoint->trb_addr, 0, endpoint->id + 1, 982 endpoint->device->slot); 983 984 // We don't need to do anything else to restart the ring, as it will resume 985 // operation as normal upon the next doorbell. (XHCI 1.1 § 4.6.9 p132.) 986 } else { 987 // We couldn't stop the endpoint. Most likely the device has been 988 // removed and the endpoint was stopped by the hardware, or is 989 // for some reason busy and cannot be stopped. 990 TRACE_ERROR("cancel queued transfers: could not stop endpoint: %s!\n", 991 strerror(status)); 992 993 // Instead of freeing the TDs, we want to leave them in the endpoint 994 // so that when/if the hardware returns, they can be properly unlinked, 995 // as otherwise the endpoint could get "stuck" by having the "used" 996 // slowly accumulate due to "dead" transfers. 997 endpoint->td_head = td_head; 998 td_head = NULL; 999 } 1000 1001 endpointLocker.Unlock(); 1002 1003 for (int32 i = 0; i < transfersCount; i++) { 1004 transfers[i]->Finished(B_CANCELED, 0); 1005 delete transfers[i]; 1006 } 1007 1008 // This loop looks a bit strange because we need to store the "next" 1009 // pointer before freeing the descriptor. 1010 xhci_td* td; 1011 while ((td = td_head) != NULL) { 1012 td_head = td_head->next; 1013 FreeDescriptor(td); 1014 } 1015 1016 return B_OK; 1017 } 1018 1019 1020 status_t 1021 XHCI::StartDebugTransfer(Transfer *transfer) 1022 { 1023 Pipe *pipe = transfer->TransferPipe(); 1024 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1025 if (endpoint == NULL) 1026 return B_BAD_VALUE; 1027 1028 // Check all locks that we are going to hit when running transfers. 1029 if (mutex_trylock(&endpoint->lock) != B_OK) 1030 return B_WOULD_BLOCK; 1031 if (mutex_trylock(&fFinishedLock) != B_OK) { 1032 mutex_unlock(&endpoint->lock); 1033 return B_WOULD_BLOCK; 1034 } 1035 if (mutex_trylock(&fEventLock) != B_OK) { 1036 mutex_unlock(&endpoint->lock); 1037 mutex_unlock(&fFinishedLock); 1038 return B_WOULD_BLOCK; 1039 } 1040 mutex_unlock(&endpoint->lock); 1041 mutex_unlock(&fFinishedLock); 1042 mutex_unlock(&fEventLock); 1043 1044 status_t status = SubmitTransfer(transfer); 1045 if (status != B_OK) 1046 return status; 1047 1048 // The endpoint's head TD is the TD of the just-submitted transfer. 1049 // Just like EHCI, abuse the callback cookie to hold the TD pointer. 1050 transfer->SetCallback(NULL, endpoint->td_head); 1051 1052 return B_OK; 1053 } 1054 1055 1056 status_t 1057 XHCI::CheckDebugTransfer(Transfer *transfer) 1058 { 1059 xhci_td *transfer_td = (xhci_td *)transfer->CallbackCookie(); 1060 if (transfer_td == NULL) 1061 return B_NO_INIT; 1062 1063 // Process events once, and then look for it in the finished list. 1064 ProcessEvents(); 1065 xhci_td *previous = NULL; 1066 for (xhci_td *td = fFinishedHead; td != NULL; td = td->next) { 1067 if (td != transfer_td) { 1068 previous = td; 1069 continue; 1070 } 1071 1072 // We've found it! 1073 if (previous == NULL) { 1074 fFinishedHead = fFinishedHead->next; 1075 } else { 1076 previous->next = td->next; 1077 } 1078 1079 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 1080 status_t status = (td->trb_completion_code == COMP_SUCCESS 1081 || td->trb_completion_code == COMP_SHORT_PACKET) ? B_OK : B_ERROR; 1082 1083 if (status == B_OK && directionIn) 1084 ReadDescriptor(td, transfer->Vector(), transfer->VectorCount()); 1085 1086 FreeDescriptor(td); 1087 transfer->SetCallback(NULL, NULL); 1088 return status; 1089 } 1090 1091 // We didn't find it. 1092 spin(75); 1093 return B_DEV_PENDING; 1094 } 1095 1096 1097 void 1098 XHCI::CancelDebugTransfer(Transfer *transfer) 1099 { 1100 while (CheckDebugTransfer(transfer) == B_DEV_PENDING) 1101 spin(100); 1102 } 1103 1104 1105 status_t 1106 XHCI::NotifyPipeChange(Pipe *pipe, usb_change change) 1107 { 1108 TRACE("pipe change %d for pipe %p (%d)\n", change, pipe, 1109 pipe->EndpointAddress()); 1110 1111 switch (change) { 1112 case USB_CHANGE_CREATED: 1113 return _InsertEndpointForPipe(pipe); 1114 case USB_CHANGE_DESTROYED: 1115 return _RemoveEndpointForPipe(pipe); 1116 1117 case USB_CHANGE_PIPE_POLICY_CHANGED: 1118 // We don't care about these, at least for now. 1119 return B_OK; 1120 } 1121 1122 TRACE_ERROR("unknown pipe change!\n"); 1123 return B_UNSUPPORTED; 1124 } 1125 1126 1127 xhci_td * 1128 XHCI::CreateDescriptor(uint32 trbCount, uint32 bufferCount, size_t bufferSize) 1129 { 1130 const bool inKDL = debug_debugger_running(); 1131 1132 xhci_td *result; 1133 if (!inKDL) { 1134 result = (xhci_td*)calloc(1, sizeof(xhci_td)); 1135 } else { 1136 // Just use the physical memory allocator while in KDL; it's less 1137 // secure than using the regular heap, but it's easier to deal with. 1138 phys_addr_t dummy; 1139 fStack->AllocateChunk((void **)&result, &dummy, sizeof(xhci_td)); 1140 } 1141 1142 if (result == NULL) { 1143 TRACE_ERROR("failed to allocate a transfer descriptor\n"); 1144 return NULL; 1145 } 1146 1147 // We always allocate 1 more TRB than requested, so that 1148 // _LinkDescriptorForPipe() has room to insert a link TRB. 1149 trbCount++; 1150 if (fStack->AllocateChunk((void **)&result->trbs, &result->trb_addr, 1151 (trbCount * sizeof(xhci_trb))) < B_OK) { 1152 TRACE_ERROR("failed to allocate TRBs\n"); 1153 FreeDescriptor(result); 1154 return NULL; 1155 } 1156 result->trb_count = trbCount; 1157 result->trb_used = 0; 1158 1159 if (bufferSize > 0) { 1160 // Due to how the USB stack allocates physical memory, we can't just 1161 // request one large chunk the size of the transfer, and so instead we 1162 // create a series of buffers as requested by our caller. 1163 1164 // We store the buffer pointers and addresses in one memory block. 1165 if (!inKDL) { 1166 result->buffers = (void**)calloc(bufferCount, 1167 (sizeof(void*) + sizeof(phys_addr_t))); 1168 } else { 1169 phys_addr_t dummy; 1170 fStack->AllocateChunk((void **)&result->buffers, &dummy, 1171 bufferCount * (sizeof(void*) + sizeof(phys_addr_t))); 1172 } 1173 if (result->buffers == NULL) { 1174 TRACE_ERROR("unable to allocate space for buffer infos\n"); 1175 FreeDescriptor(result); 1176 return NULL; 1177 } 1178 result->buffer_addrs = (phys_addr_t*)&result->buffers[bufferCount]; 1179 1180 // Optimization: If the requested total size of all buffers is less 1181 // than 32*B_PAGE_SIZE (the maximum size that the physical memory 1182 // allocator can handle), we allocate only one buffer and segment it. 1183 size_t totalSize = bufferSize * bufferCount; 1184 if (totalSize < (32 * B_PAGE_SIZE)) { 1185 if (fStack->AllocateChunk(&result->buffers[0], 1186 &result->buffer_addrs[0], totalSize) < B_OK) { 1187 TRACE_ERROR("unable to allocate space for large buffer (size %ld)\n", 1188 totalSize); 1189 FreeDescriptor(result); 1190 return NULL; 1191 } 1192 for (uint32 i = 1; i < bufferCount; i++) { 1193 result->buffers[i] = (void*)((addr_t)(result->buffers[i - 1]) 1194 + bufferSize); 1195 result->buffer_addrs[i] = result->buffer_addrs[i - 1] 1196 + bufferSize; 1197 } 1198 } else { 1199 // Otherwise, we allocate each buffer individually. 1200 for (uint32 i = 0; i < bufferCount; i++) { 1201 if (fStack->AllocateChunk(&result->buffers[i], 1202 &result->buffer_addrs[i], bufferSize) < B_OK) { 1203 TRACE_ERROR("unable to allocate space for the buffer (size %ld)\n", 1204 bufferSize); 1205 FreeDescriptor(result); 1206 return NULL; 1207 } 1208 } 1209 } 1210 } else { 1211 result->buffers = NULL; 1212 result->buffer_addrs = NULL; 1213 } 1214 result->buffer_size = bufferSize; 1215 result->buffer_count = bufferCount; 1216 1217 // Initialize all other fields. 1218 result->transfer = NULL; 1219 result->trb_completion_code = 0; 1220 result->trb_left = 0; 1221 result->next = NULL; 1222 1223 TRACE("CreateDescriptor allocated %p, buffer_size %ld, buffer_count %" B_PRIu32 "\n", 1224 result, result->buffer_size, result->buffer_count); 1225 1226 return result; 1227 } 1228 1229 1230 void 1231 XHCI::FreeDescriptor(xhci_td *descriptor) 1232 { 1233 if (descriptor == NULL) 1234 return; 1235 1236 const bool inKDL = debug_debugger_running(); 1237 1238 if (descriptor->trbs != NULL) { 1239 fStack->FreeChunk(descriptor->trbs, descriptor->trb_addr, 1240 (descriptor->trb_count * sizeof(xhci_trb))); 1241 } 1242 if (descriptor->buffers != NULL) { 1243 size_t totalSize = descriptor->buffer_size * descriptor->buffer_count; 1244 if (totalSize < (32 * B_PAGE_SIZE)) { 1245 // This was allocated as one contiguous buffer. 1246 fStack->FreeChunk(descriptor->buffers[0], descriptor->buffer_addrs[0], 1247 totalSize); 1248 } else { 1249 for (uint32 i = 0; i < descriptor->buffer_count; i++) { 1250 if (descriptor->buffers[i] == NULL) 1251 continue; 1252 fStack->FreeChunk(descriptor->buffers[i], descriptor->buffer_addrs[i], 1253 descriptor->buffer_size); 1254 } 1255 } 1256 1257 if (!inKDL) { 1258 free(descriptor->buffers); 1259 } else { 1260 fStack->FreeChunk(descriptor->buffers, 0, 1261 descriptor->buffer_count * (sizeof(void*) + sizeof(phys_addr_t))); 1262 } 1263 } 1264 1265 if (!inKDL) 1266 free(descriptor); 1267 else 1268 fStack->FreeChunk(descriptor, 0, sizeof(xhci_td)); 1269 } 1270 1271 1272 size_t 1273 XHCI::WriteDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1274 { 1275 size_t written = 0; 1276 1277 size_t bufIdx = 0, bufUsed = 0; 1278 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1279 size_t length = vector[vecIdx].iov_len; 1280 1281 while (length > 0 && bufIdx < descriptor->buffer_count) { 1282 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1283 memcpy((uint8 *)descriptor->buffers[bufIdx] + bufUsed, 1284 (uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1285 toCopy); 1286 1287 written += toCopy; 1288 bufUsed += toCopy; 1289 length -= toCopy; 1290 if (bufUsed == descriptor->buffer_size) { 1291 bufIdx++; 1292 bufUsed = 0; 1293 } 1294 } 1295 } 1296 1297 TRACE("wrote descriptor (%" B_PRIuSIZE " bytes)\n", written); 1298 return written; 1299 } 1300 1301 1302 size_t 1303 XHCI::ReadDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1304 { 1305 size_t read = 0; 1306 1307 size_t bufIdx = 0, bufUsed = 0; 1308 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1309 size_t length = vector[vecIdx].iov_len; 1310 1311 while (length > 0 && bufIdx < descriptor->buffer_count) { 1312 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1313 memcpy((uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1314 (uint8 *)descriptor->buffers[bufIdx] + bufUsed, toCopy); 1315 1316 read += toCopy; 1317 bufUsed += toCopy; 1318 length -= toCopy; 1319 if (bufUsed == descriptor->buffer_size) { 1320 bufIdx++; 1321 bufUsed = 0; 1322 } 1323 } 1324 } 1325 1326 TRACE("read descriptor (%" B_PRIuSIZE " bytes)\n", read); 1327 return read; 1328 } 1329 1330 1331 Device * 1332 XHCI::AllocateDevice(Hub *parent, int8 hubAddress, uint8 hubPort, 1333 usb_speed speed) 1334 { 1335 TRACE("AllocateDevice hubAddress %d hubPort %d speed %d\n", hubAddress, 1336 hubPort, speed); 1337 1338 uint8 slot = XHCI_MAX_SLOTS; 1339 if (EnableSlot(&slot) != B_OK) { 1340 TRACE_ERROR("AllocateDevice() failed enable slot\n"); 1341 return NULL; 1342 } 1343 1344 if (slot == 0 || slot > fSlotCount) { 1345 TRACE_ERROR("AllocateDevice() bad slot\n"); 1346 return NULL; 1347 } 1348 1349 if (fDevices[slot].state != XHCI_STATE_DISABLED) { 1350 TRACE_ERROR("AllocateDevice() slot already used\n"); 1351 return NULL; 1352 } 1353 1354 struct xhci_device *device = &fDevices[slot]; 1355 memset(device, 0, sizeof(struct xhci_device)); 1356 device->state = XHCI_STATE_ENABLED; 1357 device->slot = slot; 1358 1359 device->input_ctx_area = fStack->AllocateArea((void **)&device->input_ctx, 1360 &device->input_ctx_addr, sizeof(*device->input_ctx) << fContextSizeShift, 1361 "XHCI input context"); 1362 if (device->input_ctx_area < B_OK) { 1363 TRACE_ERROR("unable to create a input context area\n"); 1364 device->state = XHCI_STATE_DISABLED; 1365 return NULL; 1366 } 1367 1368 memset(device->input_ctx, 0, sizeof(*device->input_ctx) << fContextSizeShift); 1369 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1370 _WriteContext(&device->input_ctx->input.addFlags, 3); 1371 1372 uint32 route = 0; 1373 uint8 routePort = hubPort; 1374 uint8 rhPort = hubPort; 1375 for (Device *hubDevice = parent; hubDevice != RootObject(); 1376 hubDevice = (Device *)hubDevice->Parent()) { 1377 1378 rhPort = routePort; 1379 if (hubDevice->Parent() == RootObject()) 1380 break; 1381 route *= 16; 1382 if (hubPort > 15) 1383 route += 15; 1384 else 1385 route += routePort; 1386 1387 routePort = hubDevice->HubPort(); 1388 } 1389 1390 // Get speed of port, only if device connected to root hub port 1391 // else we have to rely on value reported by the Hub Explore thread 1392 if (route == 0) { 1393 GetPortSpeed(hubPort - 1, &speed); 1394 TRACE("speed updated %d\n", speed); 1395 } 1396 1397 uint32 dwslot0 = SLOT_0_NUM_ENTRIES(1) | SLOT_0_ROUTE(route); 1398 1399 // add the speed 1400 switch (speed) { 1401 case USB_SPEED_LOWSPEED: 1402 dwslot0 |= SLOT_0_SPEED(2); 1403 break; 1404 case USB_SPEED_HIGHSPEED: 1405 dwslot0 |= SLOT_0_SPEED(3); 1406 break; 1407 case USB_SPEED_FULLSPEED: 1408 dwslot0 |= SLOT_0_SPEED(1); 1409 break; 1410 case USB_SPEED_SUPERSPEED: 1411 dwslot0 |= SLOT_0_SPEED(4); 1412 break; 1413 default: 1414 TRACE_ERROR("unknown usb speed\n"); 1415 break; 1416 } 1417 1418 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1419 // TODO enable power save 1420 _WriteContext(&device->input_ctx->slot.dwslot1, SLOT_1_RH_PORT(rhPort)); 1421 uint32 dwslot2 = SLOT_2_IRQ_TARGET(0); 1422 1423 // If LS/FS device connected to non-root HS device 1424 if (route != 0 && parent->Speed() == USB_SPEED_HIGHSPEED 1425 && (speed == USB_SPEED_LOWSPEED || speed == USB_SPEED_FULLSPEED)) { 1426 struct xhci_device *parenthub = (struct xhci_device *) 1427 parent->ControllerCookie(); 1428 dwslot2 |= SLOT_2_PORT_NUM(hubPort); 1429 dwslot2 |= SLOT_2_TT_HUB_SLOT(parenthub->slot); 1430 } 1431 1432 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1433 1434 _WriteContext(&device->input_ctx->slot.dwslot3, SLOT_3_SLOT_STATE(0) 1435 | SLOT_3_DEVICE_ADDRESS(0)); 1436 1437 TRACE("slot 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 1438 "\n", _ReadContext(&device->input_ctx->slot.dwslot0), 1439 _ReadContext(&device->input_ctx->slot.dwslot1), 1440 _ReadContext(&device->input_ctx->slot.dwslot2), 1441 _ReadContext(&device->input_ctx->slot.dwslot3)); 1442 1443 device->device_ctx_area = fStack->AllocateArea((void **)&device->device_ctx, 1444 &device->device_ctx_addr, sizeof(*device->device_ctx) << fContextSizeShift, 1445 "XHCI device context"); 1446 if (device->device_ctx_area < B_OK) { 1447 TRACE_ERROR("unable to create a device context area\n"); 1448 delete_area(device->input_ctx_area); 1449 memset(device, 0, sizeof(xhci_device)); 1450 device->state = XHCI_STATE_DISABLED; 1451 return NULL; 1452 } 1453 memset(device->device_ctx, 0, sizeof(*device->device_ctx) << fContextSizeShift); 1454 1455 device->trb_area = fStack->AllocateArea((void **)&device->trbs, 1456 &device->trb_addr, sizeof(xhci_trb) * (XHCI_MAX_ENDPOINTS - 1) 1457 * XHCI_ENDPOINT_RING_SIZE, "XHCI endpoint trbs"); 1458 if (device->trb_area < B_OK) { 1459 TRACE_ERROR("unable to create a device trbs area\n"); 1460 delete_area(device->input_ctx_area); 1461 delete_area(device->device_ctx_area); 1462 memset(device, 0, sizeof(xhci_device)); 1463 device->state = XHCI_STATE_DISABLED; 1464 return NULL; 1465 } 1466 1467 // set up slot pointer to device context 1468 fDcba->baseAddress[slot] = device->device_ctx_addr; 1469 1470 size_t maxPacketSize; 1471 switch (speed) { 1472 case USB_SPEED_LOWSPEED: 1473 case USB_SPEED_FULLSPEED: 1474 maxPacketSize = 8; 1475 break; 1476 case USB_SPEED_HIGHSPEED: 1477 maxPacketSize = 64; 1478 break; 1479 default: 1480 maxPacketSize = 512; 1481 break; 1482 } 1483 1484 // configure the Control endpoint 0 1485 if (ConfigureEndpoint(slot, 0, USB_OBJECT_CONTROL_PIPE, false, 1486 device->trb_addr, 0, maxPacketSize, speed, 0, 0) != B_OK) { 1487 TRACE_ERROR("unable to configure default control endpoint\n"); 1488 delete_area(device->input_ctx_area); 1489 delete_area(device->device_ctx_area); 1490 delete_area(device->trb_area); 1491 memset(device, 0, sizeof(xhci_device)); 1492 device->state = XHCI_STATE_DISABLED; 1493 return NULL; 1494 } 1495 1496 mutex_init(&device->endpoints[0].lock, "xhci endpoint lock"); 1497 device->endpoints[0].device = device; 1498 device->endpoints[0].id = 0; 1499 device->endpoints[0].td_head = NULL; 1500 device->endpoints[0].used = 0; 1501 device->endpoints[0].current = 0; 1502 device->endpoints[0].trbs = device->trbs; 1503 device->endpoints[0].trb_addr = device->trb_addr; 1504 1505 // device should get to addressed state (bsr = 0) 1506 if (SetAddress(device->input_ctx_addr, false, slot) != B_OK) { 1507 TRACE_ERROR("unable to set address\n"); 1508 delete_area(device->input_ctx_area); 1509 delete_area(device->device_ctx_area); 1510 delete_area(device->trb_area); 1511 memset(device, 0, sizeof(xhci_device)); 1512 device->state = XHCI_STATE_DISABLED; 1513 return NULL; 1514 } 1515 1516 device->state = XHCI_STATE_ADDRESSED; 1517 device->address = SLOT_3_DEVICE_ADDRESS_GET(_ReadContext( 1518 &device->device_ctx->slot.dwslot3)); 1519 1520 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", device->address, 1521 SLOT_3_SLOT_STATE_GET(_ReadContext( 1522 &device->device_ctx->slot.dwslot3))); 1523 TRACE("endpoint0 state 0x%08" B_PRIx32 "\n", 1524 ENDPOINT_0_STATE_GET(_ReadContext( 1525 &device->device_ctx->endpoints[0].dwendpoint0))); 1526 1527 // Create a temporary pipe with the new address 1528 ControlPipe pipe(parent); 1529 pipe.SetControllerCookie(&device->endpoints[0]); 1530 pipe.InitCommon(device->address + 1, 0, speed, Pipe::Default, maxPacketSize, 0, 1531 hubAddress, hubPort); 1532 1533 // Get the device descriptor 1534 // Just retrieve the first 8 bytes of the descriptor -> minimum supported 1535 // size of any device. It is enough because it includes the device type. 1536 1537 size_t actualLength = 0; 1538 usb_device_descriptor deviceDescriptor; 1539 1540 TRACE("getting the device descriptor\n"); 1541 status_t status = pipe.SendRequest( 1542 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_STANDARD, // type 1543 USB_REQUEST_GET_DESCRIPTOR, // request 1544 USB_DESCRIPTOR_DEVICE << 8, // value 1545 0, // index 1546 8, // length 1547 (void *)&deviceDescriptor, // buffer 1548 8, // buffer length 1549 &actualLength); // actual length 1550 1551 if (actualLength != 8) { 1552 TRACE_ERROR("error while getting the device descriptor: %s\n", 1553 strerror(status)); 1554 delete_area(device->input_ctx_area); 1555 delete_area(device->device_ctx_area); 1556 delete_area(device->trb_area); 1557 memset(device, 0, sizeof(xhci_device)); 1558 device->state = XHCI_STATE_DISABLED; 1559 return NULL; 1560 } 1561 1562 TRACE("device_class: %d device_subclass %d device_protocol %d\n", 1563 deviceDescriptor.device_class, deviceDescriptor.device_subclass, 1564 deviceDescriptor.device_protocol); 1565 1566 if (speed == USB_SPEED_FULLSPEED && deviceDescriptor.max_packet_size_0 != 8) { 1567 TRACE("Full speed device with different max packet size for Endpoint 0\n"); 1568 uint32 dwendpoint1 = _ReadContext( 1569 &device->input_ctx->endpoints[0].dwendpoint1); 1570 dwendpoint1 &= ~ENDPOINT_1_MAXPACKETSIZE(0xffff); 1571 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE( 1572 deviceDescriptor.max_packet_size_0); 1573 _WriteContext(&device->input_ctx->endpoints[0].dwendpoint1, 1574 dwendpoint1); 1575 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1576 _WriteContext(&device->input_ctx->input.addFlags, (1 << 1)); 1577 EvaluateContext(device->input_ctx_addr, device->slot); 1578 } 1579 1580 Device *deviceObject = NULL; 1581 if (deviceDescriptor.device_class == 0x09) { 1582 TRACE("creating new Hub\n"); 1583 TRACE("getting the hub descriptor\n"); 1584 size_t actualLength = 0; 1585 usb_hub_descriptor hubDescriptor; 1586 status = pipe.SendRequest( 1587 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_CLASS, // type 1588 USB_REQUEST_GET_DESCRIPTOR, // request 1589 USB_DESCRIPTOR_HUB << 8, // value 1590 0, // index 1591 sizeof(usb_hub_descriptor), // length 1592 (void *)&hubDescriptor, // buffer 1593 sizeof(usb_hub_descriptor), // buffer length 1594 &actualLength); 1595 1596 if (actualLength != sizeof(usb_hub_descriptor)) { 1597 TRACE_ERROR("error while getting the hub descriptor: %s\n", 1598 strerror(status)); 1599 delete_area(device->input_ctx_area); 1600 delete_area(device->device_ctx_area); 1601 delete_area(device->trb_area); 1602 memset(device, 0, sizeof(xhci_device)); 1603 device->state = XHCI_STATE_DISABLED; 1604 return NULL; 1605 } 1606 1607 uint32 dwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1608 dwslot0 |= SLOT_0_HUB_BIT; 1609 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1610 uint32 dwslot1 = _ReadContext(&device->input_ctx->slot.dwslot1); 1611 dwslot1 |= SLOT_1_NUM_PORTS(hubDescriptor.num_ports); 1612 _WriteContext(&device->input_ctx->slot.dwslot1, dwslot1); 1613 if (speed == USB_SPEED_HIGHSPEED) { 1614 uint32 dwslot2 = _ReadContext(&device->input_ctx->slot.dwslot2); 1615 dwslot2 |= SLOT_2_TT_TIME(HUB_TTT_GET(hubDescriptor.characteristics)); 1616 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1617 } 1618 1619 deviceObject = new(std::nothrow) Hub(parent, hubAddress, hubPort, 1620 deviceDescriptor, device->address + 1, speed, false, device); 1621 } else { 1622 TRACE("creating new device\n"); 1623 deviceObject = new(std::nothrow) Device(parent, hubAddress, hubPort, 1624 deviceDescriptor, device->address + 1, speed, false, device); 1625 } 1626 if (deviceObject == NULL || deviceObject->InitCheck() != B_OK) { 1627 if (deviceObject == NULL) { 1628 TRACE_ERROR("no memory to allocate device\n"); 1629 } else { 1630 TRACE_ERROR("device object failed to initialize\n"); 1631 } 1632 delete_area(device->input_ctx_area); 1633 delete_area(device->device_ctx_area); 1634 delete_area(device->trb_area); 1635 memset(device, 0, sizeof(xhci_device)); 1636 device->state = XHCI_STATE_DISABLED; 1637 return NULL; 1638 } 1639 1640 // We don't want to disable the default endpoint, naturally, which would 1641 // otherwise happen when this Pipe object is destroyed. 1642 pipe.SetControllerCookie(NULL); 1643 1644 fPortSlots[hubPort] = slot; 1645 TRACE("AllocateDevice() port %d slot %d\n", hubPort, slot); 1646 return deviceObject; 1647 } 1648 1649 1650 void 1651 XHCI::FreeDevice(Device *device) 1652 { 1653 uint8 hubPort = device->HubPort(); 1654 uint8 slot = fPortSlots[hubPort]; 1655 TRACE("FreeDevice() port %d slot %d\n", hubPort, slot); 1656 1657 // Delete the device first, so it cleans up its pipes and tells us 1658 // what we need to destroy before we tear down our internal state. 1659 delete device; 1660 1661 DisableSlot(slot); 1662 fDcba->baseAddress[slot] = 0; 1663 fPortSlots[hubPort] = 0; 1664 delete_area(fDevices[slot].trb_area); 1665 delete_area(fDevices[slot].input_ctx_area); 1666 delete_area(fDevices[slot].device_ctx_area); 1667 1668 memset(&fDevices[slot], 0, sizeof(xhci_device)); 1669 fDevices[slot].state = XHCI_STATE_DISABLED; 1670 } 1671 1672 1673 status_t 1674 XHCI::_InsertEndpointForPipe(Pipe *pipe) 1675 { 1676 TRACE("insert endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1677 1678 if (pipe->ControllerCookie() != NULL 1679 || pipe->Parent()->Type() != USB_OBJECT_DEVICE) { 1680 // default pipe is already referenced 1681 return B_OK; 1682 } 1683 1684 Device* usbDevice = (Device *)pipe->Parent(); 1685 struct xhci_device *device = (struct xhci_device *) 1686 usbDevice->ControllerCookie(); 1687 if (usbDevice->Parent() == RootObject()) 1688 return B_OK; 1689 if (device == NULL) { 1690 panic("_InsertEndpointForPipe device is NULL\n"); 1691 return B_NO_INIT; 1692 } 1693 1694 uint8 id = (2 * pipe->EndpointAddress() 1695 + (pipe->Direction() != Pipe::Out ? 1 : 0)) - 1; 1696 if (id >= XHCI_MAX_ENDPOINTS - 1) 1697 return B_BAD_VALUE; 1698 1699 if (id > 0) { 1700 uint32 devicedwslot0 = _ReadContext(&device->device_ctx->slot.dwslot0); 1701 if (SLOT_0_NUM_ENTRIES_GET(devicedwslot0) == 1) { 1702 uint32 inputdwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1703 inputdwslot0 &= ~(SLOT_0_NUM_ENTRIES(0x1f)); 1704 inputdwslot0 |= SLOT_0_NUM_ENTRIES(XHCI_MAX_ENDPOINTS - 1); 1705 _WriteContext(&device->input_ctx->slot.dwslot0, inputdwslot0); 1706 EvaluateContext(device->input_ctx_addr, device->slot); 1707 } 1708 1709 mutex_init(&device->endpoints[id].lock, "xhci endpoint lock"); 1710 MutexLocker endpointLocker(device->endpoints[id].lock); 1711 1712 device->endpoints[id].device = device; 1713 device->endpoints[id].id = id; 1714 device->endpoints[id].td_head = NULL; 1715 device->endpoints[id].used = 0; 1716 device->endpoints[id].current = 0; 1717 1718 device->endpoints[id].trbs = device->trbs 1719 + id * XHCI_ENDPOINT_RING_SIZE; 1720 device->endpoints[id].trb_addr = device->trb_addr 1721 + id * XHCI_ENDPOINT_RING_SIZE * sizeof(xhci_trb); 1722 memset(device->endpoints[id].trbs, 0, 1723 sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 1724 1725 TRACE("_InsertEndpointForPipe trbs device %p endpoint %p\n", 1726 device->trbs, device->endpoints[id].trbs); 1727 TRACE("_InsertEndpointForPipe trb_addr device 0x%" B_PRIxPHYSADDR 1728 " endpoint 0x%" B_PRIxPHYSADDR "\n", device->trb_addr, 1729 device->endpoints[id].trb_addr); 1730 1731 uint8 endpoint = id + 1; 1732 1733 TRACE("trb_addr 0x%" B_PRIxPHYSADDR "\n", device->endpoints[id].trb_addr); 1734 1735 status_t status = ConfigureEndpoint(device->slot, id, pipe->Type(), 1736 pipe->Direction() == Pipe::In, device->endpoints[id].trb_addr, 1737 pipe->Interval(), pipe->MaxPacketSize(), usbDevice->Speed(), 1738 pipe->MaxBurst(), pipe->BytesPerInterval()); 1739 if (status != B_OK) { 1740 TRACE_ERROR("unable to configure endpoint\n"); 1741 return status; 1742 } 1743 1744 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1745 _WriteContext(&device->input_ctx->input.addFlags, 1746 (1 << endpoint) | (1 << 0)); 1747 1748 if (endpoint > 1) 1749 ConfigureEndpoint(device->input_ctx_addr, false, device->slot); 1750 else 1751 EvaluateContext(device->input_ctx_addr, device->slot); 1752 1753 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", 1754 device->address, SLOT_3_SLOT_STATE_GET(_ReadContext( 1755 &device->device_ctx->slot.dwslot3))); 1756 TRACE("endpoint[0] state 0x%08" B_PRIx32 "\n", 1757 ENDPOINT_0_STATE_GET(_ReadContext( 1758 &device->device_ctx->endpoints[0].dwendpoint0))); 1759 TRACE("endpoint[%d] state 0x%08" B_PRIx32 "\n", id, 1760 ENDPOINT_0_STATE_GET(_ReadContext( 1761 &device->device_ctx->endpoints[id].dwendpoint0))); 1762 1763 device->state = XHCI_STATE_CONFIGURED; 1764 } 1765 pipe->SetControllerCookie(&device->endpoints[id]); 1766 1767 TRACE("_InsertEndpointForPipe for pipe %p at id %d\n", pipe, id); 1768 1769 return B_OK; 1770 } 1771 1772 1773 status_t 1774 XHCI::_RemoveEndpointForPipe(Pipe *pipe) 1775 { 1776 TRACE("remove endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1777 1778 if (pipe->Parent()->Type() != USB_OBJECT_DEVICE) 1779 return B_OK; 1780 Device* usbDevice = (Device *)pipe->Parent(); 1781 if (usbDevice->Parent() == RootObject()) 1782 return B_BAD_VALUE; 1783 1784 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1785 if (endpoint == NULL || endpoint->trbs == NULL) 1786 return B_NO_INIT; 1787 1788 xhci_device *device = endpoint->device; 1789 1790 if (endpoint->id > 0) { 1791 mutex_lock(&endpoint->lock); 1792 1793 uint8 epNumber = endpoint->id + 1; 1794 StopEndpoint(true, epNumber, device->slot); 1795 1796 // See comment in CancelQueuedTransfers. 1797 xhci_td* td; 1798 while ((td = endpoint->td_head) != NULL) { 1799 endpoint->td_head = endpoint->td_head->next; 1800 FreeDescriptor(td); 1801 } 1802 1803 mutex_destroy(&endpoint->lock); 1804 memset(endpoint, 0, sizeof(xhci_endpoint)); 1805 1806 _WriteContext(&device->input_ctx->input.dropFlags, (1 << epNumber)); 1807 _WriteContext(&device->input_ctx->input.addFlags, 0); 1808 1809 if (epNumber > 1) 1810 ConfigureEndpoint(device->input_ctx_addr, true, device->slot); 1811 else 1812 EvaluateContext(device->input_ctx_addr, device->slot); 1813 1814 device->state = XHCI_STATE_ADDRESSED; 1815 } 1816 pipe->SetControllerCookie(NULL); 1817 1818 return B_OK; 1819 } 1820 1821 1822 status_t 1823 XHCI::_LinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1824 { 1825 TRACE("_LinkDescriptorForPipe\n"); 1826 1827 // We must check this before we lock the endpoint, because if it is 1828 // NULL, the mutex is probably uninitialized, too. 1829 if (endpoint->device == NULL) { 1830 TRACE_ERROR("trying to submit a transfer to a non-existent endpoint!\n"); 1831 return B_NO_INIT; 1832 } 1833 1834 // Use mutex_trylock first, in case we are in KDL. 1835 if (mutex_trylock(&endpoint->lock) != B_OK) 1836 mutex_lock(&endpoint->lock); 1837 1838 // "used" refers to the number of currently linked TDs, not the number of 1839 // used TRBs on the ring (we use 2 TRBs on the ring per transfer.) 1840 if (endpoint->used >= (XHCI_MAX_TRANSFERS - 1)) { 1841 TRACE_ERROR("_LinkDescriptorForPipe max transfers count exceeded\n"); 1842 mutex_unlock(&endpoint->lock); 1843 return B_BAD_VALUE; 1844 } 1845 1846 endpoint->used++; 1847 descriptor->next = endpoint->td_head; 1848 endpoint->td_head = descriptor; 1849 1850 const uint8 current = endpoint->current, 1851 eventdata = current + 1; 1852 uint8 next = eventdata + 1; 1853 1854 TRACE("_LinkDescriptorForPipe current %d, next %d\n", current, next); 1855 1856 // Add a Link TRB to the end of the descriptor. 1857 addr_t addr = endpoint->trb_addr + eventdata * sizeof(xhci_trb); 1858 descriptor->trbs[descriptor->trb_used].address = addr; 1859 descriptor->trbs[descriptor->trb_used].status = TRB_2_IRQ(0); 1860 descriptor->trbs[descriptor->trb_used].flags = TRB_3_TYPE(TRB_TYPE_LINK) 1861 | TRB_3_CHAIN_BIT | TRB_3_CYCLE_BIT; 1862 // It is specified that (XHCI 1.2 § 4.12.3 Note 2 p251) if the TRB 1863 // following one with the ENT bit set is a Link TRB, the Link TRB 1864 // shall be evaluated *and* the subsequent TRB shall be. Thus a 1865 // TRB_3_ENT_BIT is unnecessary here; and from testing seems to 1866 // break all transfers on a (very) small number of controllers. 1867 1868 #if !B_HOST_IS_LENDIAN 1869 // Convert endianness. 1870 for (uint32 i = 0; i <= descriptor->trb_used; i++) { 1871 descriptor->trbs[i].address = 1872 B_HOST_TO_LENDIAN_INT64(descriptor->trbs[i].address); 1873 descriptor->trbs[i].status = 1874 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].status); 1875 descriptor->trbs[i].flags = 1876 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].flags); 1877 } 1878 #endif 1879 1880 // Link the descriptor. 1881 endpoint->trbs[current].address = 1882 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr); 1883 endpoint->trbs[current].status = 1884 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1885 endpoint->trbs[current].flags = 1886 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK)); 1887 1888 // Set up the Event Data TRB (XHCI 1.2 § 4.11.5.2 p230.) 1889 // 1890 // We do this on the main ring for two reasons: first, to avoid a small 1891 // potential race between the interrupt and the controller evaluating 1892 // the link TRB to get back onto the ring; and second, because many 1893 // controllers throw errors if the target of a Link TRB is not valid 1894 // (i.e. does not have its Cycle Bit set.) 1895 // 1896 // We also set the "address" field, which the controller will copy 1897 // verbatim into the TRB it posts to the event ring, to be the last 1898 // "real" TRB in the TD; this will allow us to determine what transfer 1899 // the resulting Transfer Event TRB refers to. 1900 endpoint->trbs[eventdata].address = 1901 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr 1902 + (descriptor->trb_used - 1) * sizeof(xhci_trb)); 1903 endpoint->trbs[eventdata].status = 1904 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1905 endpoint->trbs[eventdata].flags = 1906 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_EVENT_DATA) 1907 | TRB_3_IOC_BIT | TRB_3_CYCLE_BIT); 1908 1909 if (next == (XHCI_ENDPOINT_RING_SIZE - 1)) { 1910 // We always use 2 TRBs per _Link..() call, so if "next" is the last 1911 // TRB in the ring, we need to generate a link TRB at "next", and 1912 // then wrap it to 0. 1913 endpoint->trbs[next].address = 1914 B_HOST_TO_LENDIAN_INT64(endpoint->trb_addr); 1915 endpoint->trbs[next].status = 1916 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1917 endpoint->trbs[next].flags = 1918 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK) | TRB_3_CYCLE_BIT); 1919 1920 next = 0; 1921 } 1922 1923 endpoint->trbs[next].address = 0; 1924 endpoint->trbs[next].status = 0; 1925 endpoint->trbs[next].flags = 0; 1926 1927 // Everything is ready, so write the cycle bit. 1928 endpoint->trbs[current].flags |= B_HOST_TO_LENDIAN_INT32(TRB_3_CYCLE_BIT); 1929 1930 TRACE("_LinkDescriptorForPipe pCurrent %p phys 0x%" B_PRIxPHYSADDR 1931 " 0x%" B_PRIxPHYSADDR " 0x%08" B_PRIx32 "\n", &endpoint->trbs[current], 1932 endpoint->trb_addr + current * sizeof(struct xhci_trb), 1933 endpoint->trbs[current].address, 1934 B_LENDIAN_TO_HOST_INT32(endpoint->trbs[current].flags)); 1935 1936 endpoint->current = next; 1937 mutex_unlock(&endpoint->lock); 1938 1939 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1940 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1941 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1942 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1943 1944 Ring(endpoint->device->slot, endpoint->id + 1); 1945 1946 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1947 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1948 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1949 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1950 1951 return B_OK; 1952 } 1953 1954 1955 status_t 1956 XHCI::_UnlinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1957 { 1958 TRACE("_UnlinkDescriptorForPipe\n"); 1959 // We presume that the caller has already locked or owns the endpoint. 1960 1961 endpoint->used--; 1962 if (descriptor == endpoint->td_head) { 1963 endpoint->td_head = descriptor->next; 1964 descriptor->next = NULL; 1965 return B_OK; 1966 } else { 1967 for (xhci_td *td = endpoint->td_head; td->next != NULL; td = td->next) { 1968 if (td->next == descriptor) { 1969 td->next = descriptor->next; 1970 descriptor->next = NULL; 1971 return B_OK; 1972 } 1973 } 1974 } 1975 1976 endpoint->used++; 1977 return B_ERROR; 1978 } 1979 1980 1981 status_t 1982 XHCI::ConfigureEndpoint(uint8 slot, uint8 number, uint8 type, bool directionIn, 1983 uint64 ringAddr, uint16 interval, uint16 maxPacketSize, usb_speed speed, 1984 uint8 maxBurst, uint16 bytesPerInterval) 1985 { 1986 struct xhci_device* device = &fDevices[slot]; 1987 1988 uint32 dwendpoint0 = 0; 1989 uint32 dwendpoint1 = 0; 1990 uint64 qwendpoint2 = 0; 1991 uint32 dwendpoint4 = 0; 1992 1993 // Compute and assign the endpoint type. (XHCI 1.1 § 6.2.3 Table 6-9 p429.) 1994 uint8 xhciType = 4; 1995 if ((type & USB_OBJECT_INTERRUPT_PIPE) != 0) 1996 xhciType = 3; 1997 if ((type & USB_OBJECT_BULK_PIPE) != 0) 1998 xhciType = 2; 1999 if ((type & USB_OBJECT_ISO_PIPE) != 0) 2000 xhciType = 1; 2001 xhciType |= directionIn ? (1 << 2) : 0; 2002 dwendpoint1 |= ENDPOINT_1_EPTYPE(xhciType); 2003 2004 // Compute and assign interval. (XHCI 1.1 § 6.2.3.6 p433.) 2005 uint16 calcInterval; 2006 if ((type & USB_OBJECT_BULK_PIPE) != 0 2007 || (type & USB_OBJECT_CONTROL_PIPE) != 0) { 2008 // Bulk and Control endpoints never issue NAKs. 2009 calcInterval = 0; 2010 } else { 2011 switch (speed) { 2012 case USB_SPEED_FULLSPEED: 2013 if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2014 // Convert 1-16 into 3-18. 2015 calcInterval = min_c(max_c(interval, 1), 16) + 2; 2016 break; 2017 } 2018 2019 // fall through 2020 case USB_SPEED_LOWSPEED: { 2021 // Convert 1ms-255ms into 3-10. 2022 2023 // Find the index of the highest set bit in "interval". 2024 uint32 temp = min_c(max_c(interval, 1), 255); 2025 for (calcInterval = 0; temp != 1; calcInterval++) 2026 temp = temp >> 1; 2027 calcInterval += 3; 2028 break; 2029 } 2030 2031 case USB_SPEED_HIGHSPEED: 2032 case USB_SPEED_SUPERSPEED: 2033 default: 2034 // Convert 1-16 into 0-15. 2035 calcInterval = min_c(max_c(interval, 1), 16) - 1; 2036 break; 2037 } 2038 } 2039 dwendpoint0 |= ENDPOINT_0_INTERVAL(calcInterval); 2040 2041 // For non-isochronous endpoints, we want the controller to retry failed 2042 // transfers, if possible. (XHCI 1.1 § 4.10.2.3 p189.) 2043 if ((type & USB_OBJECT_ISO_PIPE) == 0) 2044 dwendpoint1 |= ENDPOINT_1_CERR(3); 2045 2046 // Assign maximum burst size. For USB3 devices this is passed in; for 2047 // all other devices we compute it. (XHCI 1.1 § 4.8.2 p154.) 2048 if (speed == USB_SPEED_HIGHSPEED && (type & (USB_OBJECT_INTERRUPT_PIPE 2049 | USB_OBJECT_ISO_PIPE)) != 0) { 2050 maxBurst = (maxPacketSize & 0x1800) >> 11; 2051 } else if (speed != USB_SPEED_SUPERSPEED) { 2052 maxBurst = 0; 2053 } 2054 dwendpoint1 |= ENDPOINT_1_MAXBURST(maxBurst); 2055 2056 // Assign maximum packet size, set the ring address, and set the 2057 // "Dequeue Cycle State" bit. (XHCI 1.1 § 6.2.3 Table 6-10 p430.) 2058 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE(maxPacketSize); 2059 qwendpoint2 |= ENDPOINT_2_DCS_BIT | ringAddr; 2060 2061 // Assign average TRB length. 2062 if ((type & USB_OBJECT_CONTROL_PIPE) != 0) { 2063 // Control pipes are a special case, as they rarely have 2064 // outbound transfers of any substantial size. 2065 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(8); 2066 } else if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2067 // Isochronous pipes are another special case: the TRB size will be 2068 // one packet (which is normally smaller than the max packet size, 2069 // but we don't know what it is here.) 2070 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(maxPacketSize); 2071 } else { 2072 // Under all other circumstances, we put 4 packets in a TRB. 2073 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(maxPacketSize * 4); 2074 } 2075 2076 // Assign maximum ESIT payload. (XHCI 1.1 § 4.14.2 p250.) 2077 if ((type & (USB_OBJECT_INTERRUPT_PIPE | USB_OBJECT_ISO_PIPE)) != 0) { 2078 // TODO: For SuperSpeedPlus endpoints, there is yet another descriptor 2079 // for isochronous endpoints that specifies the maximum ESIT payload. 2080 // We don't fetch this yet, so just fall back to the USB2 computation 2081 // method if bytesPerInterval is 0. 2082 if (speed == USB_SPEED_SUPERSPEED && bytesPerInterval != 0) 2083 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD(bytesPerInterval); 2084 else if (speed >= USB_SPEED_HIGHSPEED) 2085 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD((maxBurst + 1) * maxPacketSize); 2086 } 2087 2088 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint0, 2089 dwendpoint0); 2090 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint1, 2091 dwendpoint1); 2092 _WriteContext(&device->input_ctx->endpoints[number].qwendpoint2, 2093 qwendpoint2); 2094 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint4, 2095 dwendpoint4); 2096 2097 TRACE("endpoint 0x%" B_PRIx32 " 0x%" B_PRIx32 " 0x%" B_PRIx64 " 0x%" 2098 B_PRIx32 "\n", 2099 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint0), 2100 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint1), 2101 _ReadContext(&device->input_ctx->endpoints[number].qwendpoint2), 2102 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint4)); 2103 2104 return B_OK; 2105 } 2106 2107 2108 status_t 2109 XHCI::GetPortSpeed(uint8 index, usb_speed* speed) 2110 { 2111 if (index >= fPortCount) 2112 return B_BAD_INDEX; 2113 2114 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2115 2116 switch (PS_SPEED_GET(portStatus)) { 2117 case 3: 2118 *speed = USB_SPEED_HIGHSPEED; 2119 break; 2120 case 2: 2121 *speed = USB_SPEED_LOWSPEED; 2122 break; 2123 case 1: 2124 *speed = USB_SPEED_FULLSPEED; 2125 break; 2126 case 4: 2127 *speed = USB_SPEED_SUPERSPEED; 2128 break; 2129 default: 2130 TRACE_ALWAYS("nonstandard port speed %" B_PRId32 ", assuming SuperSpeed\n", 2131 PS_SPEED_GET(portStatus)); 2132 *speed = USB_SPEED_SUPERSPEED; 2133 break; 2134 } 2135 2136 return B_OK; 2137 } 2138 2139 2140 status_t 2141 XHCI::GetPortStatus(uint8 index, usb_port_status* status) 2142 { 2143 if (index >= fPortCount) 2144 return B_BAD_INDEX; 2145 2146 status->status = status->change = 0; 2147 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2148 TRACE("port %" B_PRId8 " status=0x%08" B_PRIx32 "\n", index, portStatus); 2149 2150 // build the status 2151 switch (PS_SPEED_GET(portStatus)) { 2152 case 3: 2153 status->status |= PORT_STATUS_HIGH_SPEED; 2154 break; 2155 case 2: 2156 status->status |= PORT_STATUS_LOW_SPEED; 2157 break; 2158 default: 2159 break; 2160 } 2161 2162 if (portStatus & PS_CCS) 2163 status->status |= PORT_STATUS_CONNECTION; 2164 if (portStatus & PS_PED) 2165 status->status |= PORT_STATUS_ENABLE; 2166 if (portStatus & PS_OCA) 2167 status->status |= PORT_STATUS_OVER_CURRENT; 2168 if (portStatus & PS_PR) 2169 status->status |= PORT_STATUS_RESET; 2170 if (portStatus & PS_PP) { 2171 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) 2172 status->status |= PORT_STATUS_SS_POWER; 2173 else 2174 status->status |= PORT_STATUS_POWER; 2175 } 2176 2177 // build the change 2178 if (portStatus & PS_CSC) 2179 status->change |= PORT_STATUS_CONNECTION; 2180 if (portStatus & PS_PEC) 2181 status->change |= PORT_STATUS_ENABLE; 2182 if (portStatus & PS_OCC) 2183 status->change |= PORT_STATUS_OVER_CURRENT; 2184 if (portStatus & PS_PRC) 2185 status->change |= PORT_STATUS_RESET; 2186 2187 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) { 2188 if (portStatus & PS_PLC) 2189 status->change |= PORT_CHANGE_LINK_STATE; 2190 if (portStatus & PS_WRC) 2191 status->change |= PORT_CHANGE_BH_PORT_RESET; 2192 } 2193 2194 return B_OK; 2195 } 2196 2197 2198 status_t 2199 XHCI::SetPortFeature(uint8 index, uint16 feature) 2200 { 2201 TRACE("set port feature index %u feature %u\n", index, feature); 2202 if (index >= fPortCount) 2203 return B_BAD_INDEX; 2204 2205 uint32 portRegister = XHCI_PORTSC(index); 2206 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2207 2208 switch (feature) { 2209 case PORT_SUSPEND: 2210 if ((portStatus & PS_PED) == 0 || (portStatus & PS_PR) 2211 || (portStatus & PS_PLS_MASK) >= PS_XDEV_U3) { 2212 TRACE_ERROR("USB core suspending device not in U0/U1/U2.\n"); 2213 return B_BAD_VALUE; 2214 } 2215 portStatus &= ~PS_PLS_MASK; 2216 WriteOpReg(portRegister, portStatus | PS_LWS | PS_XDEV_U3); 2217 break; 2218 2219 case PORT_RESET: 2220 WriteOpReg(portRegister, portStatus | PS_PR); 2221 break; 2222 2223 case PORT_POWER: 2224 WriteOpReg(portRegister, portStatus | PS_PP); 2225 break; 2226 default: 2227 return B_BAD_VALUE; 2228 } 2229 ReadOpReg(portRegister); 2230 return B_OK; 2231 } 2232 2233 2234 status_t 2235 XHCI::ClearPortFeature(uint8 index, uint16 feature) 2236 { 2237 TRACE("clear port feature index %u feature %u\n", index, feature); 2238 if (index >= fPortCount) 2239 return B_BAD_INDEX; 2240 2241 uint32 portRegister = XHCI_PORTSC(index); 2242 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2243 2244 switch (feature) { 2245 case PORT_SUSPEND: 2246 portStatus = ReadOpReg(portRegister); 2247 if (portStatus & PS_PR) 2248 return B_BAD_VALUE; 2249 if (portStatus & PS_XDEV_U3) { 2250 if ((portStatus & PS_PED) == 0) 2251 return B_BAD_VALUE; 2252 portStatus &= ~PS_PLS_MASK; 2253 WriteOpReg(portRegister, portStatus | PS_XDEV_U0 | PS_LWS); 2254 } 2255 break; 2256 case PORT_ENABLE: 2257 WriteOpReg(portRegister, portStatus | PS_PED); 2258 break; 2259 case PORT_POWER: 2260 WriteOpReg(portRegister, portStatus & ~PS_PP); 2261 break; 2262 case C_PORT_CONNECTION: 2263 WriteOpReg(portRegister, portStatus | PS_CSC); 2264 break; 2265 case C_PORT_ENABLE: 2266 WriteOpReg(portRegister, portStatus | PS_PEC); 2267 break; 2268 case C_PORT_OVER_CURRENT: 2269 WriteOpReg(portRegister, portStatus | PS_OCC); 2270 break; 2271 case C_PORT_RESET: 2272 WriteOpReg(portRegister, portStatus | PS_PRC); 2273 break; 2274 case C_PORT_BH_PORT_RESET: 2275 WriteOpReg(portRegister, portStatus | PS_WRC); 2276 break; 2277 case C_PORT_LINK_STATE: 2278 WriteOpReg(portRegister, portStatus | PS_PLC); 2279 break; 2280 default: 2281 return B_BAD_VALUE; 2282 } 2283 2284 ReadOpReg(portRegister); 2285 return B_OK; 2286 } 2287 2288 2289 status_t 2290 XHCI::ControllerHalt() 2291 { 2292 // Mask off run state 2293 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) & ~CMD_RUN); 2294 2295 // wait for shutdown state 2296 if (WaitOpBits(XHCI_STS, STS_HCH, STS_HCH) != B_OK) { 2297 TRACE_ERROR("HCH shutdown timeout\n"); 2298 return B_ERROR; 2299 } 2300 return B_OK; 2301 } 2302 2303 2304 status_t 2305 XHCI::ControllerReset() 2306 { 2307 TRACE("ControllerReset() cmd: 0x%" B_PRIx32 " sts: 0x%" B_PRIx32 "\n", 2308 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 2309 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) | CMD_HCRST); 2310 2311 if (WaitOpBits(XHCI_CMD, CMD_HCRST, 0) != B_OK) { 2312 TRACE_ERROR("ControllerReset() failed CMD_HCRST\n"); 2313 return B_ERROR; 2314 } 2315 2316 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 2317 TRACE_ERROR("ControllerReset() failed STS_CNR\n"); 2318 return B_ERROR; 2319 } 2320 2321 return B_OK; 2322 } 2323 2324 2325 int32 2326 XHCI::InterruptHandler(void* data) 2327 { 2328 return ((XHCI*)data)->Interrupt(); 2329 } 2330 2331 2332 int32 2333 XHCI::Interrupt() 2334 { 2335 SpinLocker _(&fSpinlock); 2336 2337 uint32 status = ReadOpReg(XHCI_STS); 2338 uint32 temp = ReadRunReg32(XHCI_IMAN(0)); 2339 WriteOpReg(XHCI_STS, status); 2340 WriteRunReg32(XHCI_IMAN(0), temp); 2341 2342 int32 result = B_HANDLED_INTERRUPT; 2343 2344 if ((status & STS_HCH) != 0) { 2345 TRACE_ERROR("Host Controller halted\n"); 2346 return result; 2347 } 2348 if ((status & STS_HSE) != 0) { 2349 TRACE_ERROR("Host System Error\n"); 2350 return result; 2351 } 2352 if ((status & STS_HCE) != 0) { 2353 TRACE_ERROR("Host Controller Error\n"); 2354 return result; 2355 } 2356 2357 if ((status & STS_EINT) == 0) { 2358 TRACE("STS: 0x%" B_PRIx32 " IRQ_PENDING: 0x%" B_PRIx32 "\n", 2359 status, temp); 2360 return B_UNHANDLED_INTERRUPT; 2361 } 2362 2363 TRACE("Event Interrupt\n"); 2364 release_sem_etc(fEventSem, 1, B_DO_NOT_RESCHEDULE); 2365 return B_INVOKE_SCHEDULER; 2366 } 2367 2368 2369 void 2370 XHCI::Ring(uint8 slot, uint8 endpoint) 2371 { 2372 TRACE("Ding Dong! slot:%d endpoint %d\n", slot, endpoint) 2373 if ((slot == 0 && endpoint > 0) || (slot > 0 && endpoint == 0)) 2374 panic("Ring() invalid slot/endpoint combination\n"); 2375 if (slot > fSlotCount || endpoint >= XHCI_MAX_ENDPOINTS) 2376 panic("Ring() invalid slot or endpoint\n"); 2377 2378 WriteDoorReg32(XHCI_DOORBELL(slot), XHCI_DOORBELL_TARGET(endpoint) 2379 | XHCI_DOORBELL_STREAMID(0)); 2380 ReadDoorReg32(XHCI_DOORBELL(slot)); 2381 // Flush PCI writes 2382 } 2383 2384 2385 void 2386 XHCI::QueueCommand(xhci_trb* trb) 2387 { 2388 uint8 i, j; 2389 uint32 temp; 2390 2391 i = fCmdIdx; 2392 j = fCmdCcs; 2393 2394 TRACE("command[%u] = %" B_PRId32 " (0x%016" B_PRIx64 ", 0x%08" B_PRIx32 2395 ", 0x%08" B_PRIx32 ")\n", i, TRB_3_TYPE_GET(trb->flags), trb->address, 2396 trb->status, trb->flags); 2397 2398 fCmdRing[i].address = trb->address; 2399 fCmdRing[i].status = trb->status; 2400 temp = trb->flags; 2401 2402 if (j) 2403 temp |= TRB_3_CYCLE_BIT; 2404 else 2405 temp &= ~TRB_3_CYCLE_BIT; 2406 temp &= ~TRB_3_TC_BIT; 2407 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2408 2409 fCmdAddr = fErst->rs_addr + (XHCI_MAX_EVENTS + i) * sizeof(xhci_trb); 2410 2411 i++; 2412 2413 if (i == (XHCI_MAX_COMMANDS - 1)) { 2414 temp = TRB_3_TYPE(TRB_TYPE_LINK) | TRB_3_TC_BIT; 2415 if (j) 2416 temp |= TRB_3_CYCLE_BIT; 2417 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2418 2419 i = 0; 2420 j ^= 1; 2421 } 2422 2423 fCmdIdx = i; 2424 fCmdCcs = j; 2425 } 2426 2427 2428 void 2429 XHCI::HandleCmdComplete(xhci_trb* trb) 2430 { 2431 TRACE("HandleCmdComplete trb %p\n", trb); 2432 2433 if (fCmdAddr == trb->address) { 2434 TRACE("Received command event\n"); 2435 fCmdResult[0] = trb->status; 2436 fCmdResult[1] = B_LENDIAN_TO_HOST_INT32(trb->flags); 2437 release_sem_etc(fCmdCompSem, 1, B_DO_NOT_RESCHEDULE); 2438 } else 2439 TRACE_ERROR("received command event for unknown command!\n") 2440 } 2441 2442 2443 void 2444 XHCI::HandleTransferComplete(xhci_trb* trb) 2445 { 2446 TRACE("HandleTransferComplete trb %p\n", trb); 2447 2448 const uint32 flags = B_LENDIAN_TO_HOST_INT32(trb->flags); 2449 const uint8 endpointNumber = TRB_3_ENDPOINT_GET(flags), 2450 slot = TRB_3_SLOT_GET(flags); 2451 2452 if (slot > fSlotCount) 2453 TRACE_ERROR("invalid slot\n"); 2454 if (endpointNumber == 0 || endpointNumber >= XHCI_MAX_ENDPOINTS) { 2455 TRACE_ERROR("invalid endpoint\n"); 2456 return; 2457 } 2458 2459 xhci_device *device = &fDevices[slot]; 2460 xhci_endpoint *endpoint = &device->endpoints[endpointNumber - 1]; 2461 2462 if (endpoint->trbs == NULL) { 2463 TRACE_ERROR("got TRB but endpoint is not allocated!\n"); 2464 return; 2465 } 2466 2467 // Use mutex_trylock first, in case we are in KDL. 2468 MutexLocker endpointLocker(endpoint->lock, 2469 mutex_trylock(&endpoint->lock) == B_OK); 2470 if (!endpointLocker.IsLocked()) { 2471 // We failed to get the lock. Most likely it was destroyed 2472 // while we were waiting for it. 2473 return; 2474 } 2475 2476 // In the case of an Event Data TRB, the "transferred" field refers 2477 // to the actual number of bytes transferred across the whole TD. 2478 // (XHCI 1.2 § 6.4.2.1 Table 6-38 p478.) 2479 const uint8 completionCode = TRB_2_COMP_CODE_GET(trb->status); 2480 int32 transferred = TRB_2_REM_GET(trb->status), remainder = -1; 2481 2482 TRACE("HandleTransferComplete: ed %d, code %d, transferred %d\n", 2483 (flags & TRB_3_EVENT_DATA_BIT), completionCode, transferred); 2484 2485 if ((flags & TRB_3_EVENT_DATA_BIT) == 0) { 2486 TRACE("got an interrupt for a non-Event Data TRB!\n"); 2487 remainder = transferred; 2488 transferred = -1; 2489 } 2490 2491 if (completionCode != COMP_SUCCESS && completionCode != COMP_SHORT_PACKET) { 2492 TRACE_ALWAYS("transfer error on slot %" B_PRId8 " endpoint %" B_PRId8 2493 ": %s\n", slot, endpointNumber, xhci_error_string(completionCode)); 2494 } 2495 2496 const phys_addr_t source = B_LENDIAN_TO_HOST_INT64(trb->address); 2497 for (xhci_td *td = endpoint->td_head; td != NULL; td = td->next) { 2498 int64 offset = (source - td->trb_addr) / sizeof(xhci_trb); 2499 if (offset < 0 || offset >= td->trb_count) 2500 continue; 2501 2502 TRACE("HandleTransferComplete td %p trb %" B_PRId64 " found\n", 2503 td, offset); 2504 2505 // The TRB at offset trb_used will be the link TRB, which we do not 2506 // care about (and should not generate an interrupt at all.) We really 2507 // care about the properly last TRB, at index "count - 1", which the 2508 // Event Data TRB that _LinkDescriptorForPipe creates points to. 2509 // 2510 // But if we have an unsuccessful completion code, the transfer 2511 // likely failed midway; so just accept it anyway. 2512 if (offset == (td->trb_used - 1) || completionCode != COMP_SUCCESS) { 2513 _UnlinkDescriptorForPipe(td, endpoint); 2514 endpointLocker.Unlock(); 2515 2516 td->trb_completion_code = completionCode; 2517 td->td_transferred = transferred; 2518 td->trb_left = remainder; 2519 2520 // add descriptor to finished list 2521 if (mutex_trylock(&fFinishedLock) != B_OK) 2522 mutex_lock(&fFinishedLock); 2523 td->next = fFinishedHead; 2524 fFinishedHead = td; 2525 mutex_unlock(&fFinishedLock); 2526 2527 release_sem_etc(fFinishTransfersSem, 1, B_DO_NOT_RESCHEDULE); 2528 TRACE("HandleTransferComplete td %p done\n", td); 2529 } else { 2530 TRACE_ERROR("successful TRB 0x%" B_PRIxPHYSADDR " was found, but it wasn't " 2531 "the last in the TD!\n", source); 2532 } 2533 return; 2534 } 2535 TRACE_ERROR("TRB 0x%" B_PRIxPHYSADDR " was not found in the endpoint!\n", source); 2536 } 2537 2538 2539 void 2540 XHCI::DumpRing(xhci_trb *trbs, uint32 size) 2541 { 2542 if (!Lock()) { 2543 TRACE("Unable to get lock!\n"); 2544 return; 2545 } 2546 2547 for (uint32 i = 0; i < size; i++) { 2548 TRACE("command[%" B_PRId32 "] = %" B_PRId32 " (0x%016" B_PRIx64 "," 2549 " 0x%08" B_PRIx32 ", 0x%08" B_PRIx32 ")\n", i, 2550 TRB_3_TYPE_GET(B_LENDIAN_TO_HOST_INT32(trbs[i].flags)), 2551 trbs[i].address, trbs[i].status, trbs[i].flags); 2552 } 2553 2554 Unlock(); 2555 } 2556 2557 2558 status_t 2559 XHCI::DoCommand(xhci_trb* trb) 2560 { 2561 if (!Lock()) { 2562 TRACE("Unable to get lock!\n"); 2563 return B_ERROR; 2564 } 2565 2566 QueueCommand(trb); 2567 Ring(0, 0); 2568 2569 // Begin with a 50ms timeout. 2570 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 50 * 1000) != B_OK) { 2571 // We've hit the timeout. In some error cases, interrupts are not 2572 // generated; so here we force the event ring to be polled once. 2573 release_sem(fEventSem); 2574 2575 // Now try again, this time with a 750ms timeout. 2576 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 2577 750 * 1000) != B_OK) { 2578 TRACE("Unable to obtain fCmdCompSem!\n"); 2579 fCmdAddr = 0; 2580 Unlock(); 2581 return B_TIMED_OUT; 2582 } 2583 } 2584 2585 // eat up sems that have been released by multiple interrupts 2586 int32 semCount = 0; 2587 get_sem_count(fCmdCompSem, &semCount); 2588 if (semCount > 0) 2589 acquire_sem_etc(fCmdCompSem, semCount, B_RELATIVE_TIMEOUT, 0); 2590 2591 status_t status = B_OK; 2592 uint32 completionCode = TRB_2_COMP_CODE_GET(fCmdResult[0]); 2593 TRACE("Command Complete. Result: %" B_PRId32 "\n", completionCode); 2594 if (completionCode != COMP_SUCCESS) { 2595 TRACE_ERROR("unsuccessful command %" B_PRId32 ", error %s (%" B_PRId32 ")\n", 2596 TRB_3_TYPE_GET(trb->flags), xhci_error_string(completionCode), 2597 completionCode); 2598 status = B_IO_ERROR; 2599 } 2600 2601 trb->status = fCmdResult[0]; 2602 trb->flags = fCmdResult[1]; 2603 TRACE("Storing trb 0x%08" B_PRIx32 " 0x%08" B_PRIx32 "\n", trb->status, 2604 trb->flags); 2605 2606 fCmdAddr = 0; 2607 Unlock(); 2608 return status; 2609 } 2610 2611 2612 status_t 2613 XHCI::Noop() 2614 { 2615 TRACE("Issue No-Op\n"); 2616 xhci_trb trb; 2617 trb.address = 0; 2618 trb.status = 0; 2619 trb.flags = TRB_3_TYPE(TRB_TYPE_CMD_NOOP); 2620 2621 return DoCommand(&trb); 2622 } 2623 2624 2625 status_t 2626 XHCI::EnableSlot(uint8* slot) 2627 { 2628 TRACE("Enable Slot\n"); 2629 xhci_trb trb; 2630 trb.address = 0; 2631 trb.status = 0; 2632 trb.flags = TRB_3_TYPE(TRB_TYPE_ENABLE_SLOT); 2633 2634 status_t status = DoCommand(&trb); 2635 if (status != B_OK) 2636 return status; 2637 2638 *slot = TRB_3_SLOT_GET(trb.flags); 2639 return *slot != 0 ? B_OK : B_BAD_VALUE; 2640 } 2641 2642 2643 status_t 2644 XHCI::DisableSlot(uint8 slot) 2645 { 2646 TRACE("Disable Slot\n"); 2647 xhci_trb trb; 2648 trb.address = 0; 2649 trb.status = 0; 2650 trb.flags = TRB_3_TYPE(TRB_TYPE_DISABLE_SLOT) | TRB_3_SLOT(slot); 2651 2652 return DoCommand(&trb); 2653 } 2654 2655 2656 status_t 2657 XHCI::SetAddress(uint64 inputContext, bool bsr, uint8 slot) 2658 { 2659 TRACE("Set Address\n"); 2660 xhci_trb trb; 2661 trb.address = inputContext; 2662 trb.status = 0; 2663 trb.flags = TRB_3_TYPE(TRB_TYPE_ADDRESS_DEVICE) | TRB_3_SLOT(slot); 2664 2665 if (bsr) 2666 trb.flags |= TRB_3_BSR_BIT; 2667 2668 return DoCommand(&trb); 2669 } 2670 2671 2672 status_t 2673 XHCI::ConfigureEndpoint(uint64 inputContext, bool deconfigure, uint8 slot) 2674 { 2675 TRACE("Configure Endpoint\n"); 2676 xhci_trb trb; 2677 trb.address = inputContext; 2678 trb.status = 0; 2679 trb.flags = TRB_3_TYPE(TRB_TYPE_CONFIGURE_ENDPOINT) | TRB_3_SLOT(slot); 2680 2681 if (deconfigure) 2682 trb.flags |= TRB_3_DCEP_BIT; 2683 2684 return DoCommand(&trb); 2685 } 2686 2687 2688 status_t 2689 XHCI::EvaluateContext(uint64 inputContext, uint8 slot) 2690 { 2691 TRACE("Evaluate Context\n"); 2692 xhci_trb trb; 2693 trb.address = inputContext; 2694 trb.status = 0; 2695 trb.flags = TRB_3_TYPE(TRB_TYPE_EVALUATE_CONTEXT) | TRB_3_SLOT(slot); 2696 2697 return DoCommand(&trb); 2698 } 2699 2700 2701 status_t 2702 XHCI::ResetEndpoint(bool preserve, uint8 endpoint, uint8 slot) 2703 { 2704 TRACE("Reset Endpoint\n"); 2705 xhci_trb trb; 2706 trb.address = 0; 2707 trb.status = 0; 2708 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_ENDPOINT) 2709 | TRB_3_SLOT(slot) | TRB_3_ENDPOINT(endpoint); 2710 if (preserve) 2711 trb.flags |= TRB_3_PRSV_BIT; 2712 2713 return DoCommand(&trb); 2714 } 2715 2716 2717 status_t 2718 XHCI::StopEndpoint(bool suspend, uint8 endpoint, uint8 slot) 2719 { 2720 TRACE("Stop Endpoint\n"); 2721 xhci_trb trb; 2722 trb.address = 0; 2723 trb.status = 0; 2724 trb.flags = TRB_3_TYPE(TRB_TYPE_STOP_ENDPOINT) 2725 | TRB_3_SLOT(slot) | TRB_3_ENDPOINT(endpoint); 2726 if (suspend) 2727 trb.flags |= TRB_3_SUSPEND_ENDPOINT_BIT; 2728 2729 return DoCommand(&trb); 2730 } 2731 2732 2733 status_t 2734 XHCI::SetTRDequeue(uint64 dequeue, uint16 stream, uint8 endpoint, uint8 slot) 2735 { 2736 TRACE("Set TR Dequeue\n"); 2737 xhci_trb trb; 2738 trb.address = dequeue | ENDPOINT_2_DCS_BIT; 2739 // The DCS bit is copied from the address field as in ConfigureEndpoint. 2740 // (XHCI 1.1 § 4.6.10 p142.) 2741 trb.status = TRB_2_STREAM(stream); 2742 trb.flags = TRB_3_TYPE(TRB_TYPE_SET_TR_DEQUEUE) 2743 | TRB_3_SLOT(slot) | TRB_3_ENDPOINT(endpoint); 2744 2745 return DoCommand(&trb); 2746 } 2747 2748 2749 status_t 2750 XHCI::ResetDevice(uint8 slot) 2751 { 2752 TRACE("Reset Device\n"); 2753 xhci_trb trb; 2754 trb.address = 0; 2755 trb.status = 0; 2756 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_DEVICE) | TRB_3_SLOT(slot); 2757 2758 return DoCommand(&trb); 2759 } 2760 2761 2762 int32 2763 XHCI::EventThread(void* data) 2764 { 2765 ((XHCI *)data)->CompleteEvents(); 2766 return B_OK; 2767 } 2768 2769 2770 void 2771 XHCI::CompleteEvents() 2772 { 2773 while (!fStopThreads) { 2774 if (acquire_sem(fEventSem) < B_OK) 2775 continue; 2776 2777 // eat up sems that have been released by multiple interrupts 2778 int32 semCount = 0; 2779 get_sem_count(fEventSem, &semCount); 2780 if (semCount > 0) 2781 acquire_sem_etc(fEventSem, semCount, B_RELATIVE_TIMEOUT, 0); 2782 2783 ProcessEvents(); 2784 } 2785 } 2786 2787 2788 void 2789 XHCI::ProcessEvents() 2790 { 2791 // Use mutex_trylock first, in case we are in KDL. 2792 MutexLocker locker(fEventLock, mutex_trylock(&fEventLock) == B_OK); 2793 if (!locker.IsLocked()) { 2794 // We failed to get the lock. This really should not happen. 2795 TRACE_ERROR("failed to acquire event lock!\n"); 2796 return; 2797 } 2798 2799 uint16 i = fEventIdx; 2800 uint8 j = fEventCcs; 2801 uint8 t = 2; 2802 2803 while (1) { 2804 uint32 temp = B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags); 2805 uint8 event = TRB_3_TYPE_GET(temp); 2806 TRACE("event[%u] = %u (0x%016" B_PRIx64 " 0x%08" B_PRIx32 " 0x%08" 2807 B_PRIx32 ")\n", i, event, fEventRing[i].address, 2808 fEventRing[i].status, B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags)); 2809 uint8 k = (temp & TRB_3_CYCLE_BIT) ? 1 : 0; 2810 if (j != k) 2811 break; 2812 2813 switch (event) { 2814 case TRB_TYPE_COMMAND_COMPLETION: 2815 HandleCmdComplete(&fEventRing[i]); 2816 break; 2817 case TRB_TYPE_TRANSFER: 2818 HandleTransferComplete(&fEventRing[i]); 2819 break; 2820 case TRB_TYPE_PORT_STATUS_CHANGE: 2821 TRACE("port change detected\n"); 2822 break; 2823 default: 2824 TRACE_ERROR("Unhandled event = %u\n", event); 2825 break; 2826 } 2827 2828 i++; 2829 if (i == XHCI_MAX_EVENTS) { 2830 i = 0; 2831 j ^= 1; 2832 if (!--t) 2833 break; 2834 } 2835 } 2836 2837 fEventIdx = i; 2838 fEventCcs = j; 2839 2840 uint64 addr = fErst->rs_addr + i * sizeof(xhci_trb); 2841 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)addr | ERDP_BUSY); 2842 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(addr >> 32)); 2843 } 2844 2845 2846 int32 2847 XHCI::FinishThread(void* data) 2848 { 2849 ((XHCI *)data)->FinishTransfers(); 2850 return B_OK; 2851 } 2852 2853 2854 void 2855 XHCI::FinishTransfers() 2856 { 2857 while (!fStopThreads) { 2858 if (acquire_sem(fFinishTransfersSem) < B_OK) 2859 continue; 2860 2861 // eat up sems that have been released by multiple interrupts 2862 int32 semCount = 0; 2863 get_sem_count(fFinishTransfersSem, &semCount); 2864 if (semCount > 0) 2865 acquire_sem_etc(fFinishTransfersSem, semCount, B_RELATIVE_TIMEOUT, 0); 2866 2867 mutex_lock(&fFinishedLock); 2868 TRACE("finishing transfers\n"); 2869 while (fFinishedHead != NULL) { 2870 xhci_td* td = fFinishedHead; 2871 fFinishedHead = td->next; 2872 td->next = NULL; 2873 mutex_unlock(&fFinishedLock); 2874 2875 TRACE("finishing transfer td %p\n", td); 2876 2877 Transfer* transfer = td->transfer; 2878 if (transfer == NULL) { 2879 // No transfer? Quick way out. 2880 FreeDescriptor(td); 2881 mutex_lock(&fFinishedLock); 2882 continue; 2883 } 2884 2885 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 2886 2887 status_t callbackStatus = B_OK; 2888 switch (td->trb_completion_code) { 2889 case COMP_SHORT_PACKET: 2890 case COMP_SUCCESS: 2891 callbackStatus = B_OK; 2892 break; 2893 case COMP_DATA_BUFFER: 2894 callbackStatus = directionIn ? B_DEV_DATA_OVERRUN 2895 : B_DEV_DATA_UNDERRUN; 2896 break; 2897 case COMP_BABBLE: 2898 callbackStatus = directionIn ? B_DEV_FIFO_OVERRUN 2899 : B_DEV_FIFO_UNDERRUN; 2900 break; 2901 case COMP_USB_TRANSACTION: 2902 callbackStatus = B_DEV_CRC_ERROR; 2903 break; 2904 case COMP_STALL: 2905 callbackStatus = B_DEV_STALLED; 2906 break; 2907 default: 2908 callbackStatus = B_DEV_STALLED; 2909 break; 2910 } 2911 2912 size_t actualLength = transfer->DataLength(); 2913 if (td->trb_completion_code != COMP_SUCCESS) { 2914 actualLength = td->td_transferred; 2915 if (td->td_transferred == -1) 2916 actualLength = transfer->DataLength() - td->trb_left; 2917 TRACE("transfer not successful, actualLength=%" B_PRIuSIZE "\n", 2918 actualLength); 2919 } 2920 2921 usb_isochronous_data* isochronousData = transfer->IsochronousData(); 2922 if (isochronousData != NULL) { 2923 size_t packetSize = transfer->DataLength() / isochronousData->packet_count, 2924 left = actualLength; 2925 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 2926 size_t size = min_c(packetSize, left); 2927 isochronousData->packet_descriptors[i].actual_length = size; 2928 isochronousData->packet_descriptors[i].status = (size > 0) 2929 ? B_OK : B_DEV_FIFO_UNDERRUN; 2930 left -= size; 2931 } 2932 } 2933 2934 if (callbackStatus == B_OK && directionIn && actualLength > 0) { 2935 TRACE("copying in iov count %ld\n", transfer->VectorCount()); 2936 status_t status = transfer->PrepareKernelAccess(); 2937 if (status == B_OK) { 2938 ReadDescriptor(td, transfer->Vector(), 2939 transfer->VectorCount()); 2940 } else { 2941 callbackStatus = status; 2942 } 2943 } 2944 transfer->Finished(callbackStatus, actualLength); 2945 delete transfer; 2946 FreeDescriptor(td); 2947 mutex_lock(&fFinishedLock); 2948 } 2949 mutex_unlock(&fFinishedLock); 2950 } 2951 } 2952 2953 2954 inline void 2955 XHCI::WriteOpReg(uint32 reg, uint32 value) 2956 { 2957 *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg) = value; 2958 } 2959 2960 2961 inline uint32 2962 XHCI::ReadOpReg(uint32 reg) 2963 { 2964 return *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg); 2965 } 2966 2967 2968 inline status_t 2969 XHCI::WaitOpBits(uint32 reg, uint32 mask, uint32 expected) 2970 { 2971 int loops = 0; 2972 uint32 value = ReadOpReg(reg); 2973 while ((value & mask) != expected) { 2974 snooze(1000); 2975 value = ReadOpReg(reg); 2976 if (loops == 100) { 2977 TRACE("delay waiting on reg 0x%" B_PRIX32 " match 0x%" B_PRIX32 2978 " (0x%" B_PRIX32 ")\n", reg, expected, mask); 2979 } else if (loops > 250) { 2980 TRACE_ERROR("timeout waiting on reg 0x%" B_PRIX32 2981 " match 0x%" B_PRIX32 " (0x%" B_PRIX32 ")\n", reg, expected, 2982 mask); 2983 return B_ERROR; 2984 } 2985 loops++; 2986 } 2987 return B_OK; 2988 } 2989 2990 2991 inline uint32 2992 XHCI::ReadCapReg32(uint32 reg) 2993 { 2994 return *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg); 2995 } 2996 2997 2998 inline void 2999 XHCI::WriteCapReg32(uint32 reg, uint32 value) 3000 { 3001 *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg) = value; 3002 } 3003 3004 3005 inline uint32 3006 XHCI::ReadRunReg32(uint32 reg) 3007 { 3008 return *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg); 3009 } 3010 3011 3012 inline void 3013 XHCI::WriteRunReg32(uint32 reg, uint32 value) 3014 { 3015 *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg) = value; 3016 } 3017 3018 3019 inline uint32 3020 XHCI::ReadDoorReg32(uint32 reg) 3021 { 3022 return *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg); 3023 } 3024 3025 3026 inline void 3027 XHCI::WriteDoorReg32(uint32 reg, uint32 value) 3028 { 3029 *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg) = value; 3030 } 3031 3032 3033 inline addr_t 3034 XHCI::_OffsetContextAddr(addr_t p) 3035 { 3036 if (fContextSizeShift == 1) { 3037 // each structure is page aligned, each pointer is 32 bits aligned 3038 uint32 offset = p & ((B_PAGE_SIZE - 1) & ~31U); 3039 p += offset; 3040 } 3041 return p; 3042 } 3043 3044 inline uint32 3045 XHCI::_ReadContext(uint32* p) 3046 { 3047 p = (uint32*)_OffsetContextAddr((addr_t)p); 3048 return *p; 3049 } 3050 3051 3052 inline void 3053 XHCI::_WriteContext(uint32* p, uint32 value) 3054 { 3055 p = (uint32*)_OffsetContextAddr((addr_t)p); 3056 *p = value; 3057 } 3058 3059 3060 inline uint64 3061 XHCI::_ReadContext(uint64* p) 3062 { 3063 p = (uint64*)_OffsetContextAddr((addr_t)p); 3064 return *p; 3065 } 3066 3067 3068 inline void 3069 XHCI::_WriteContext(uint64* p, uint64 value) 3070 { 3071 p = (uint64*)_OffsetContextAddr((addr_t)p); 3072 *p = value; 3073 } 3074