1 /* 2 * Copyright 2011-2021, Haiku, Inc. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Augustin Cavalier <waddlesplash> 7 * Jian Chiang <j.jian.chiang@gmail.com> 8 * Jérôme Duval <jerome.duval@gmail.com> 9 * Akshay Jaggi <akshay1994.leo@gmail.com> 10 * Michael Lotz <mmlr@mlotz.ch> 11 * Alexander von Gluck <kallisti5@unixzen.com> 12 */ 13 14 15 #include <module.h> 16 #include <PCI.h> 17 #include <PCI_x86.h> 18 #include <USB3.h> 19 #include <KernelExport.h> 20 21 #include <ByteOrder.h> 22 #include <util/AutoLock.h> 23 24 #include "xhci.h" 25 26 #define USB_MODULE_NAME "xhci" 27 28 pci_module_info *XHCI::sPCIModule = NULL; 29 pci_x86_module_info *XHCI::sPCIx86Module = NULL; 30 31 32 static int32 33 xhci_std_ops(int32 op, ...) 34 { 35 switch (op) { 36 case B_MODULE_INIT: 37 TRACE_MODULE("xhci init module\n"); 38 return B_OK; 39 case B_MODULE_UNINIT: 40 TRACE_MODULE("xhci uninit module\n"); 41 return B_OK; 42 } 43 44 return EINVAL; 45 } 46 47 48 static const char* 49 xhci_error_string(uint32 error) 50 { 51 switch (error) { 52 case COMP_INVALID: return "Invalid"; 53 case COMP_SUCCESS: return "Success"; 54 case COMP_DATA_BUFFER: return "Data buffer"; 55 case COMP_BABBLE: return "Babble detected"; 56 case COMP_USB_TRANSACTION: return "USB transaction"; 57 case COMP_TRB: return "TRB"; 58 case COMP_STALL: return "Stall"; 59 case COMP_RESOURCE: return "Resource"; 60 case COMP_BANDWIDTH: return "Bandwidth"; 61 case COMP_NO_SLOTS: return "No slots"; 62 case COMP_INVALID_STREAM: return "Invalid stream"; 63 case COMP_SLOT_NOT_ENABLED: return "Slot not enabled"; 64 case COMP_ENDPOINT_NOT_ENABLED: return "Endpoint not enabled"; 65 case COMP_SHORT_PACKET: return "Short packet"; 66 case COMP_RING_UNDERRUN: return "Ring underrun"; 67 case COMP_RING_OVERRUN: return "Ring overrun"; 68 case COMP_VF_RING_FULL: return "VF Event Ring Full"; 69 case COMP_PARAMETER: return "Parameter"; 70 case COMP_BANDWIDTH_OVERRUN: return "Bandwidth overrun"; 71 case COMP_CONTEXT_STATE: return "Context state"; 72 case COMP_NO_PING_RESPONSE: return "No ping response"; 73 case COMP_EVENT_RING_FULL: return "Event ring full"; 74 case COMP_INCOMPATIBLE_DEVICE: return "Incompatible device"; 75 case COMP_MISSED_SERVICE: return "Missed service"; 76 case COMP_COMMAND_RING_STOPPED: return "Command ring stopped"; 77 case COMP_COMMAND_ABORTED: return "Command aborted"; 78 case COMP_STOPPED: return "Stopped"; 79 case COMP_LENGTH_INVALID: return "Length invalid"; 80 case COMP_MAX_EXIT_LATENCY: return "Max exit latency too large"; 81 case COMP_ISOC_OVERRUN: return "Isoch buffer overrun"; 82 case COMP_EVENT_LOST: return "Event lost"; 83 case COMP_UNDEFINED: return "Undefined"; 84 case COMP_INVALID_STREAM_ID: return "Invalid stream ID"; 85 case COMP_SECONDARY_BANDWIDTH: return "Secondary bandwidth"; 86 case COMP_SPLIT_TRANSACTION: return "Split transaction"; 87 88 default: return "Undefined"; 89 } 90 } 91 92 93 usb_host_controller_info xhci_module = { 94 { 95 "busses/usb/xhci", 96 0, 97 xhci_std_ops 98 }, 99 NULL, 100 XHCI::AddTo 101 }; 102 103 104 module_info *modules[] = { 105 (module_info *)&xhci_module, 106 NULL 107 }; 108 109 110 status_t 111 XHCI::AddTo(Stack *stack) 112 { 113 if (!sPCIModule) { 114 status_t status = get_module(B_PCI_MODULE_NAME, 115 (module_info **)&sPCIModule); 116 if (status < B_OK) { 117 TRACE_MODULE_ERROR("getting pci module failed! 0x%08" B_PRIx32 118 "\n", status); 119 return status; 120 } 121 } 122 123 TRACE_MODULE("searching devices\n"); 124 bool found = false; 125 pci_info *item = new(std::nothrow) pci_info; 126 if (item == NULL) { 127 sPCIModule = NULL; 128 put_module(B_PCI_MODULE_NAME); 129 return B_NO_MEMORY; 130 } 131 132 // Try to get the PCI x86 module as well so we can enable possible MSIs. 133 if (sPCIx86Module == NULL && get_module(B_PCI_X86_MODULE_NAME, 134 (module_info **)&sPCIx86Module) != B_OK) { 135 // If it isn't there, that's not critical though. 136 TRACE_MODULE_ERROR("failed to get pci x86 module\n"); 137 sPCIx86Module = NULL; 138 } 139 140 for (int32 i = 0; sPCIModule->get_nth_pci_info(i, item) >= B_OK; i++) { 141 if (item->class_base == PCI_serial_bus && item->class_sub == PCI_usb 142 && item->class_api == PCI_usb_xhci) { 143 TRACE_MODULE("found device at PCI:%d:%d:%d\n", 144 item->bus, item->device, item->function); 145 XHCI *bus = new(std::nothrow) XHCI(item, stack); 146 if (bus == NULL) { 147 delete item; 148 sPCIModule = NULL; 149 put_module(B_PCI_MODULE_NAME); 150 if (sPCIx86Module != NULL) 151 put_module(B_PCI_X86_MODULE_NAME); 152 return B_NO_MEMORY; 153 } 154 155 // The bus will put the PCI modules when it is destroyed, so get 156 // them again to increase their reference count. 157 get_module(B_PCI_MODULE_NAME, (module_info **)&sPCIModule); 158 if (sPCIx86Module != NULL) 159 get_module(B_PCI_X86_MODULE_NAME, (module_info **)&sPCIx86Module); 160 161 if (bus->InitCheck() < B_OK) { 162 TRACE_MODULE_ERROR("bus failed init check\n"); 163 delete bus; 164 continue; 165 } 166 167 // the bus took it away 168 item = new(std::nothrow) pci_info; 169 170 if (bus->Start() != B_OK) { 171 delete bus; 172 continue; 173 } 174 found = true; 175 } 176 } 177 178 // The modules will have been gotten again if we successfully 179 // initialized a bus, so we should put them here. 180 put_module(B_PCI_MODULE_NAME); 181 if (sPCIx86Module != NULL) 182 put_module(B_PCI_X86_MODULE_NAME); 183 184 if (!found) 185 TRACE_MODULE_ERROR("no devices found\n"); 186 delete item; 187 return found ? B_OK : ENODEV; 188 } 189 190 191 XHCI::XHCI(pci_info *info, Stack *stack) 192 : BusManager(stack), 193 fRegisterArea(-1), 194 fRegisters(NULL), 195 fPCIInfo(info), 196 fStack(stack), 197 fIRQ(0), 198 fUseMSI(false), 199 fErstArea(-1), 200 fDcbaArea(-1), 201 fCmdCompSem(-1), 202 fStopThreads(false), 203 fRootHub(NULL), 204 fPortCount(0), 205 fSlotCount(0), 206 fScratchpadCount(0), 207 fContextSizeShift(0), 208 fFinishedHead(NULL), 209 fFinishTransfersSem(-1), 210 fFinishThread(-1), 211 fEventSem(-1), 212 fEventThread(-1), 213 fEventIdx(0), 214 fCmdIdx(0), 215 fEventCcs(1), 216 fCmdCcs(1) 217 { 218 B_INITIALIZE_SPINLOCK(&fSpinlock); 219 mutex_init(&fFinishedLock, "XHCI finished transfers"); 220 mutex_init(&fEventLock, "XHCI event handler"); 221 222 if (BusManager::InitCheck() < B_OK) { 223 TRACE_ERROR("bus manager failed to init\n"); 224 return; 225 } 226 227 TRACE("constructing new XHCI host controller driver\n"); 228 fInitOK = false; 229 230 // enable busmaster and memory mapped access 231 uint16 command = sPCIModule->read_pci_config(fPCIInfo->bus, 232 fPCIInfo->device, fPCIInfo->function, PCI_command, 2); 233 command &= ~(PCI_command_io | PCI_command_int_disable); 234 command |= PCI_command_master | PCI_command_memory; 235 236 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 237 fPCIInfo->function, PCI_command, 2, command); 238 239 // map the registers (low + high for 64-bit when requested) 240 phys_addr_t physicalAddress = fPCIInfo->u.h0.base_registers[0]; 241 if ((fPCIInfo->u.h0.base_register_flags[0] & PCI_address_type) 242 == PCI_address_type_64) { 243 physicalAddress |= (uint64)fPCIInfo->u.h0.base_registers[1] << 32; 244 } 245 246 size_t mapSize = fPCIInfo->u.h0.base_register_sizes[0]; 247 248 TRACE("map registers %08" B_PRIxPHYSADDR ", size: %" B_PRIuSIZE "\n", 249 physicalAddress, mapSize); 250 251 fRegisterArea = map_physical_memory("XHCI memory mapped registers", 252 physicalAddress, mapSize, B_ANY_KERNEL_BLOCK_ADDRESS, 253 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 254 (void **)&fRegisters); 255 if (fRegisterArea < B_OK) { 256 TRACE_ERROR("failed to map register memory\n"); 257 return; 258 } 259 260 // determine the register offsets 261 fCapabilityRegisterOffset = 0; 262 fOperationalRegisterOffset = HCI_CAPLENGTH(ReadCapReg32(XHCI_HCI_CAPLENGTH)); 263 fRuntimeRegisterOffset = ReadCapReg32(XHCI_RTSOFF) & ~0x1F; 264 fDoorbellRegisterOffset = ReadCapReg32(XHCI_DBOFF) & ~0x3; 265 266 TRACE("mapped registers: %p\n", fRegisters); 267 TRACE("operational register offset: %" B_PRId32 "\n", fOperationalRegisterOffset); 268 TRACE("runtime register offset: %" B_PRId32 "\n", fRuntimeRegisterOffset); 269 TRACE("doorbell register offset: %" B_PRId32 "\n", fDoorbellRegisterOffset); 270 271 int32 interfaceVersion = HCI_VERSION(ReadCapReg32(XHCI_HCI_VERSION)); 272 if (interfaceVersion < 0x0090 || interfaceVersion > 0x0120) { 273 TRACE_ERROR("unsupported interface version: 0x%04" B_PRIx32 "\n", 274 interfaceVersion); 275 return; 276 } 277 TRACE_ALWAYS("interface version: 0x%04" B_PRIx32 "\n", interfaceVersion); 278 279 TRACE_ALWAYS("structural parameters: 1:0x%08" B_PRIx32 " 2:0x%08" 280 B_PRIx32 " 3:0x%08" B_PRIx32 "\n", ReadCapReg32(XHCI_HCSPARAMS1), 281 ReadCapReg32(XHCI_HCSPARAMS2), ReadCapReg32(XHCI_HCSPARAMS3)); 282 283 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 284 if (cparams == 0xffffffff) 285 return; 286 TRACE_ALWAYS("capability parameters: 0x%08" B_PRIx32 "\n", cparams); 287 288 // if 64 bytes context structures, then 1 289 fContextSizeShift = HCC_CSZ(cparams); 290 291 // Assume ownership of the controller from the BIOS. 292 uint32 eec = 0xffffffff; 293 uint32 eecp = HCS0_XECP(cparams) << 2; 294 for (; eecp != 0 && XECP_NEXT(eec); eecp += XECP_NEXT(eec) << 2) { 295 TRACE("eecp register: 0x%08" B_PRIx32 "\n", eecp); 296 297 eec = ReadCapReg32(eecp); 298 if (XECP_ID(eec) != XHCI_LEGSUP_CAPID) 299 continue; 300 301 if (eec & XHCI_LEGSUP_BIOSOWNED) { 302 TRACE_ALWAYS("the host controller is bios owned, claiming" 303 " ownership\n"); 304 WriteCapReg32(eecp, eec | XHCI_LEGSUP_OSOWNED); 305 306 for (int32 i = 0; i < 20; i++) { 307 eec = ReadCapReg32(eecp); 308 309 if ((eec & XHCI_LEGSUP_BIOSOWNED) == 0) 310 break; 311 312 TRACE_ALWAYS("controller is still bios owned, waiting\n"); 313 snooze(50000); 314 } 315 316 if (eec & XHCI_LEGSUP_BIOSOWNED) { 317 TRACE_ERROR("bios won't give up control over the host " 318 "controller (ignoring)\n"); 319 } else if (eec & XHCI_LEGSUP_OSOWNED) { 320 TRACE_ALWAYS("successfully took ownership of the host " 321 "controller\n"); 322 } 323 324 // Force off the BIOS owned flag, and clear all SMIs. Some BIOSes 325 // do indicate a successful handover but do not remove their SMIs 326 // and then freeze the system when interrupts are generated. 327 WriteCapReg32(eecp, eec & ~XHCI_LEGSUP_BIOSOWNED); 328 } 329 break; 330 } 331 uint32 legctlsts = ReadCapReg32(eecp + XHCI_LEGCTLSTS); 332 legctlsts &= XHCI_LEGCTLSTS_DISABLE_SMI; 333 legctlsts |= XHCI_LEGCTLSTS_EVENTS_SMI; 334 WriteCapReg32(eecp + XHCI_LEGCTLSTS, legctlsts); 335 336 // We need to explicitly take ownership of EHCI ports on earlier Intel chipsets. 337 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL) { 338 switch (fPCIInfo->device_id) { 339 case PCI_DEVICE_INTEL_PANTHER_POINT_XHCI: 340 case PCI_DEVICE_INTEL_LYNX_POINT_XHCI: 341 case PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI: 342 case PCI_DEVICE_INTEL_BAYTRAIL_XHCI: 343 case PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI: 344 case PCI_DEVICE_INTEL_WILDCAT_POINT_LP_XHCI: 345 _SwitchIntelPorts(); 346 break; 347 } 348 } 349 350 // halt the host controller 351 if (ControllerHalt() < B_OK) { 352 return; 353 } 354 355 // reset the host controller 356 if (ControllerReset() < B_OK) { 357 TRACE_ERROR("host controller failed to reset\n"); 358 return; 359 } 360 361 fCmdCompSem = create_sem(0, "XHCI Command Complete"); 362 fFinishTransfersSem = create_sem(0, "XHCI Finish Transfers"); 363 fEventSem = create_sem(0, "XHCI Event"); 364 if (fFinishTransfersSem < B_OK || fCmdCompSem < B_OK || fEventSem < B_OK) { 365 TRACE_ERROR("failed to create semaphores\n"); 366 return; 367 } 368 369 // create event handler thread 370 fEventThread = spawn_kernel_thread(EventThread, "xhci event thread", 371 B_URGENT_PRIORITY, (void *)this); 372 resume_thread(fEventThread); 373 374 // create finisher service thread 375 fFinishThread = spawn_kernel_thread(FinishThread, "xhci finish thread", 376 B_URGENT_PRIORITY - 1, (void *)this); 377 resume_thread(fFinishThread); 378 379 // Find the right interrupt vector, using MSIs if available. 380 fIRQ = fPCIInfo->u.h0.interrupt_line; 381 if (sPCIx86Module != NULL && sPCIx86Module->get_msi_count(fPCIInfo->bus, 382 fPCIInfo->device, fPCIInfo->function) >= 1) { 383 uint8 msiVector = 0; 384 if (sPCIx86Module->configure_msi(fPCIInfo->bus, fPCIInfo->device, 385 fPCIInfo->function, 1, &msiVector) == B_OK 386 && sPCIx86Module->enable_msi(fPCIInfo->bus, fPCIInfo->device, 387 fPCIInfo->function) == B_OK) { 388 TRACE_ALWAYS("using message signaled interrupts\n"); 389 fIRQ = msiVector; 390 fUseMSI = true; 391 } 392 } 393 394 if (fIRQ == 0 || fIRQ == 0xFF) { 395 TRACE_MODULE_ERROR("device PCI:%d:%d:%d was assigned an invalid IRQ\n", 396 fPCIInfo->bus, fPCIInfo->device, fPCIInfo->function); 397 return; 398 } 399 400 // Install the interrupt handler 401 TRACE("installing interrupt handler\n"); 402 install_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this, 0); 403 404 memset(fPortSpeeds, 0, sizeof(fPortSpeeds)); 405 memset(fDevices, 0, sizeof(fDevices)); 406 407 fInitOK = true; 408 TRACE("driver construction successful\n"); 409 } 410 411 412 XHCI::~XHCI() 413 { 414 TRACE("tear down XHCI host controller driver\n"); 415 416 WriteOpReg(XHCI_CMD, 0); 417 418 int32 result = 0; 419 fStopThreads = true; 420 delete_sem(fCmdCompSem); 421 delete_sem(fFinishTransfersSem); 422 delete_sem(fEventSem); 423 wait_for_thread(fFinishThread, &result); 424 wait_for_thread(fEventThread, &result); 425 426 mutex_destroy(&fFinishedLock); 427 mutex_destroy(&fEventLock); 428 429 remove_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this); 430 431 delete_area(fRegisterArea); 432 delete_area(fErstArea); 433 for (uint32 i = 0; i < fScratchpadCount; i++) 434 delete_area(fScratchpadArea[i]); 435 delete_area(fDcbaArea); 436 437 if (fUseMSI && sPCIx86Module != NULL) { 438 sPCIx86Module->disable_msi(fPCIInfo->bus, 439 fPCIInfo->device, fPCIInfo->function); 440 sPCIx86Module->unconfigure_msi(fPCIInfo->bus, 441 fPCIInfo->device, fPCIInfo->function); 442 } 443 put_module(B_PCI_MODULE_NAME); 444 if (sPCIx86Module != NULL) 445 put_module(B_PCI_X86_MODULE_NAME); 446 } 447 448 449 void 450 XHCI::_SwitchIntelPorts() 451 { 452 TRACE("Looking for EHCI owned ports\n"); 453 uint32 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 454 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3PRM, 4); 455 TRACE("Superspeed Ports: 0x%" B_PRIx32 "\n", ports); 456 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 457 fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4, ports); 458 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 459 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4); 460 TRACE("Superspeed ports now under XHCI : 0x%" B_PRIx32 "\n", ports); 461 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 462 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB2PRM, 4); 463 TRACE("USB 2.0 Ports : 0x%" B_PRIx32 "\n", ports); 464 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 465 fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4, ports); 466 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 467 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4); 468 TRACE("USB 2.0 ports now under XHCI: 0x%" B_PRIx32 "\n", ports); 469 } 470 471 472 status_t 473 XHCI::Start() 474 { 475 TRACE_ALWAYS("starting XHCI host controller\n"); 476 TRACE("usbcmd: 0x%08" B_PRIx32 "; usbsts: 0x%08" B_PRIx32 "\n", 477 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 478 479 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 480 TRACE("Start() failed STS_CNR\n"); 481 } 482 483 if ((ReadOpReg(XHCI_CMD) & CMD_RUN) != 0) { 484 TRACE_ERROR("Start() warning, starting running XHCI controller!\n"); 485 } 486 487 if ((ReadOpReg(XHCI_PAGESIZE) & (1 << 0)) == 0) { 488 TRACE_ERROR("controller does not support 4K page size\n"); 489 return B_ERROR; 490 } 491 492 // read port count from capability register 493 uint32 capabilities = ReadCapReg32(XHCI_HCSPARAMS1); 494 fPortCount = HCS_MAX_PORTS(capabilities); 495 if (fPortCount == 0) { 496 TRACE_ERROR("invalid number of ports: %u\n", fPortCount); 497 return B_ERROR; 498 } 499 500 fSlotCount = HCS_MAX_SLOTS(capabilities); 501 if (fSlotCount > XHCI_MAX_DEVICES) 502 fSlotCount = XHCI_MAX_DEVICES; 503 WriteOpReg(XHCI_CONFIG, fSlotCount); 504 505 // find out which protocol is used for each port 506 uint8 portFound = 0; 507 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 508 uint32 eec = 0xffffffff; 509 uint32 eecp = HCS0_XECP(cparams) << 2; 510 for (; eecp != 0 && XECP_NEXT(eec) && portFound < fPortCount; 511 eecp += XECP_NEXT(eec) << 2) { 512 eec = ReadCapReg32(eecp); 513 if (XECP_ID(eec) != XHCI_SUPPORTED_PROTOCOLS_CAPID) 514 continue; 515 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) > 3) 516 continue; 517 uint32 temp = ReadCapReg32(eecp + 8); 518 uint32 offset = XHCI_SUPPORTED_PROTOCOLS_1_OFFSET(temp); 519 uint32 count = XHCI_SUPPORTED_PROTOCOLS_1_COUNT(temp); 520 if (offset == 0 || count == 0) 521 continue; 522 offset--; 523 for (uint32 i = offset; i < offset + count; i++) { 524 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) == 0x3) 525 fPortSpeeds[i] = USB_SPEED_SUPERSPEED; 526 else 527 fPortSpeeds[i] = USB_SPEED_HIGHSPEED; 528 529 TRACE("speed for port %" B_PRId32 " is %s\n", i, 530 fPortSpeeds[i] == USB_SPEED_SUPERSPEED ? "super" : "high"); 531 } 532 portFound += count; 533 } 534 535 uint32 params2 = ReadCapReg32(XHCI_HCSPARAMS2); 536 fScratchpadCount = HCS_MAX_SC_BUFFERS(params2); 537 if (fScratchpadCount > XHCI_MAX_SCRATCHPADS) { 538 TRACE_ERROR("invalid number of scratchpads: %" B_PRIu32 "\n", 539 fScratchpadCount); 540 return B_ERROR; 541 } 542 543 uint32 params3 = ReadCapReg32(XHCI_HCSPARAMS3); 544 fExitLatMax = HCS_U1_DEVICE_LATENCY(params3) 545 + HCS_U2_DEVICE_LATENCY(params3); 546 547 // clear interrupts & disable device notifications 548 WriteOpReg(XHCI_STS, ReadOpReg(XHCI_STS)); 549 WriteOpReg(XHCI_DNCTRL, 0); 550 551 // allocate Device Context Base Address array 552 phys_addr_t dmaAddress; 553 fDcbaArea = fStack->AllocateArea((void **)&fDcba, &dmaAddress, 554 sizeof(*fDcba), "DCBA Area"); 555 if (fDcbaArea < B_OK) { 556 TRACE_ERROR("unable to create the DCBA area\n"); 557 return B_ERROR; 558 } 559 memset(fDcba, 0, sizeof(*fDcba)); 560 memset(fScratchpadArea, 0, sizeof(fScratchpadArea)); 561 memset(fScratchpad, 0, sizeof(fScratchpad)); 562 563 // setting the first address to the scratchpad array address 564 fDcba->baseAddress[0] = dmaAddress 565 + offsetof(struct xhci_device_context_array, scratchpad); 566 567 // fill up the scratchpad array with scratchpad pages 568 for (uint32 i = 0; i < fScratchpadCount; i++) { 569 phys_addr_t scratchDmaAddress; 570 fScratchpadArea[i] = fStack->AllocateArea((void **)&fScratchpad[i], 571 &scratchDmaAddress, B_PAGE_SIZE, "Scratchpad Area"); 572 if (fScratchpadArea[i] < B_OK) { 573 TRACE_ERROR("unable to create the scratchpad area\n"); 574 return B_ERROR; 575 } 576 fDcba->scratchpad[i] = scratchDmaAddress; 577 } 578 579 TRACE("setting DCBAAP %" B_PRIxPHYSADDR "\n", dmaAddress); 580 WriteOpReg(XHCI_DCBAAP_LO, (uint32)dmaAddress); 581 WriteOpReg(XHCI_DCBAAP_HI, (uint32)(dmaAddress >> 32)); 582 583 // allocate Event Ring Segment Table 584 uint8 *addr; 585 fErstArea = fStack->AllocateArea((void **)&addr, &dmaAddress, 586 (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 587 + sizeof(xhci_erst_element), 588 "USB XHCI ERST CMD_RING and EVENT_RING Area"); 589 590 if (fErstArea < B_OK) { 591 TRACE_ERROR("unable to create the ERST AND RING area\n"); 592 delete_area(fDcbaArea); 593 return B_ERROR; 594 } 595 fErst = (xhci_erst_element *)addr; 596 memset(fErst, 0, (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 597 + sizeof(xhci_erst_element)); 598 599 // fill with Event Ring Segment Base Address and Event Ring Segment Size 600 fErst->rs_addr = dmaAddress + sizeof(xhci_erst_element); 601 fErst->rs_size = XHCI_MAX_EVENTS; 602 fErst->rsvdz = 0; 603 604 addr += sizeof(xhci_erst_element); 605 fEventRing = (xhci_trb *)addr; 606 addr += XHCI_MAX_EVENTS * sizeof(xhci_trb); 607 fCmdRing = (xhci_trb *)addr; 608 609 TRACE("setting ERST size\n"); 610 WriteRunReg32(XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 611 612 TRACE("setting ERDP addr = 0x%" B_PRIx64 "\n", fErst->rs_addr); 613 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)fErst->rs_addr); 614 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(fErst->rs_addr >> 32)); 615 616 TRACE("setting ERST base addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 617 WriteRunReg32(XHCI_ERSTBA_LO(0), (uint32)dmaAddress); 618 WriteRunReg32(XHCI_ERSTBA_HI(0), (uint32)(dmaAddress >> 32)); 619 620 dmaAddress += sizeof(xhci_erst_element) + XHCI_MAX_EVENTS 621 * sizeof(xhci_trb); 622 623 // Make sure the Command Ring is stopped 624 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 625 TRACE_ALWAYS("Command Ring is running, send stop/cancel\n"); 626 WriteOpReg(XHCI_CRCR_LO, CRCR_CS); 627 WriteOpReg(XHCI_CRCR_HI, 0); 628 WriteOpReg(XHCI_CRCR_LO, CRCR_CA); 629 WriteOpReg(XHCI_CRCR_HI, 0); 630 snooze(1000); 631 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 632 TRACE_ERROR("Command Ring still running after stop/cancel\n"); 633 } 634 } 635 TRACE("setting CRCR addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 636 WriteOpReg(XHCI_CRCR_LO, (uint32)dmaAddress | CRCR_RCS); 637 WriteOpReg(XHCI_CRCR_HI, (uint32)(dmaAddress >> 32)); 638 // link trb 639 fCmdRing[XHCI_MAX_COMMANDS - 1].address = dmaAddress; 640 641 TRACE("setting interrupt rate\n"); 642 643 // Setting IMOD below 0x3F8 on Intel Lynx Point can cause IRQ lockups 644 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL 645 && (fPCIInfo->device_id == PCI_DEVICE_INTEL_PANTHER_POINT_XHCI 646 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_XHCI 647 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI 648 || fPCIInfo->device_id == PCI_DEVICE_INTEL_BAYTRAIL_XHCI 649 || fPCIInfo->device_id == PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI)) { 650 WriteRunReg32(XHCI_IMOD(0), 0x000003f8); // 4000 irq/s 651 } else { 652 WriteRunReg32(XHCI_IMOD(0), 0x000001f4); // 8000 irq/s 653 } 654 655 TRACE("enabling interrupt\n"); 656 WriteRunReg32(XHCI_IMAN(0), ReadRunReg32(XHCI_IMAN(0)) | IMAN_INTR_ENA); 657 658 WriteOpReg(XHCI_CMD, CMD_RUN | CMD_INTE | CMD_HSEE); 659 660 // wait for start up state 661 if (WaitOpBits(XHCI_STS, STS_HCH, 0) != B_OK) { 662 TRACE_ERROR("HCH start up timeout\n"); 663 } 664 665 fRootHub = new(std::nothrow) XHCIRootHub(RootObject(), 1); 666 if (!fRootHub) { 667 TRACE_ERROR("no memory to allocate root hub\n"); 668 return B_NO_MEMORY; 669 } 670 671 if (fRootHub->InitCheck() < B_OK) { 672 TRACE_ERROR("root hub failed init check\n"); 673 return fRootHub->InitCheck(); 674 } 675 676 SetRootHub(fRootHub); 677 678 TRACE_ALWAYS("successfully started the controller\n"); 679 680 #ifdef TRACE_USB 681 TRACE("No-Op test...\n"); 682 Noop(); 683 #endif 684 685 return BusManager::Start(); 686 } 687 688 689 status_t 690 XHCI::SubmitTransfer(Transfer *transfer) 691 { 692 // short circuit the root hub 693 if (transfer->TransferPipe()->DeviceAddress() == 1) 694 return fRootHub->ProcessTransfer(this, transfer); 695 696 TRACE("SubmitTransfer(%p)\n", transfer); 697 Pipe *pipe = transfer->TransferPipe(); 698 if ((pipe->Type() & USB_OBJECT_CONTROL_PIPE) != 0) 699 return SubmitControlRequest(transfer); 700 return SubmitNormalRequest(transfer); 701 } 702 703 704 status_t 705 XHCI::SubmitControlRequest(Transfer *transfer) 706 { 707 Pipe *pipe = transfer->TransferPipe(); 708 usb_request_data *requestData = transfer->RequestData(); 709 bool directionIn = (requestData->RequestType & USB_REQTYPE_DEVICE_IN) != 0; 710 711 TRACE("SubmitControlRequest() length %d\n", requestData->Length); 712 713 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 714 if (endpoint == NULL) { 715 TRACE_ERROR("control pipe has no endpoint!\n"); 716 return B_BAD_VALUE; 717 } 718 if (endpoint->device == NULL) { 719 panic("endpoint is not initialized!"); 720 return B_NO_INIT; 721 } 722 723 status_t status = transfer->InitKernelAccess(); 724 if (status != B_OK) 725 return status; 726 727 xhci_td *descriptor = CreateDescriptor(3, 1, requestData->Length); 728 if (descriptor == NULL) 729 return B_NO_MEMORY; 730 descriptor->transfer = transfer; 731 732 // Setup Stage 733 uint8 index = 0; 734 memcpy(&descriptor->trbs[index].address, requestData, 735 sizeof(usb_request_data)); 736 descriptor->trbs[index].status = TRB_2_IRQ(0) | TRB_2_BYTES(8); 737 descriptor->trbs[index].flags 738 = TRB_3_TYPE(TRB_TYPE_SETUP_STAGE) | TRB_3_IDT_BIT | TRB_3_CYCLE_BIT; 739 if (requestData->Length > 0) { 740 descriptor->trbs[index].flags |= 741 directionIn ? TRB_3_TRT_IN : TRB_3_TRT_OUT; 742 } 743 744 index++; 745 746 // Data Stage (if any) 747 if (requestData->Length > 0) { 748 descriptor->trbs[index].address = descriptor->buffer_addrs[0]; 749 descriptor->trbs[index].status = TRB_2_IRQ(0) 750 | TRB_2_BYTES(requestData->Length) 751 | TRB_2_TD_SIZE(0); 752 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_DATA_STAGE) 753 | (directionIn ? TRB_3_DIR_IN : 0) 754 | TRB_3_CYCLE_BIT; 755 756 if (!directionIn) { 757 transfer->PrepareKernelAccess(); 758 memcpy(descriptor->buffers[0], 759 (uint8 *)transfer->Vector()[0].iov_base, requestData->Length); 760 } 761 762 index++; 763 } 764 765 // Status Stage 766 descriptor->trbs[index].address = 0; 767 descriptor->trbs[index].status = TRB_2_IRQ(0); 768 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_STATUS_STAGE) 769 | ((directionIn && requestData->Length > 0) ? 0 : TRB_3_DIR_IN) 770 | TRB_3_CHAIN_BIT | TRB_3_ENT_BIT | TRB_3_CYCLE_BIT; 771 // Status Stage is an OUT transfer when the device is sending data 772 // (XHCI 1.2 § 4.11.2.2 Table 4-7 p213), and the CHAIN bit must be 773 // set when using an Event Data TRB (as _LinkDescriptorForPipe does) 774 // (XHCI 1.2 § 6.4.1.2.3 Table 6-31 p472) 775 776 descriptor->trb_used = index + 1; 777 778 status = _LinkDescriptorForPipe(descriptor, endpoint); 779 if (status != B_OK) { 780 FreeDescriptor(descriptor); 781 return status; 782 } 783 784 return B_OK; 785 } 786 787 788 status_t 789 XHCI::SubmitNormalRequest(Transfer *transfer) 790 { 791 TRACE("SubmitNormalRequest() length %" B_PRIuSIZE "\n", transfer->FragmentLength()); 792 793 Pipe *pipe = transfer->TransferPipe(); 794 usb_isochronous_data *isochronousData = transfer->IsochronousData(); 795 bool directionIn = (pipe->Direction() == Pipe::In); 796 797 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 798 if (endpoint == NULL) { 799 TRACE_ERROR("pipe has no endpoint!\n"); 800 return B_BAD_VALUE; 801 } 802 if (endpoint->device == NULL) { 803 panic("endpoint is not initialized!"); 804 return B_NO_INIT; 805 } 806 807 status_t status = transfer->InitKernelAccess(); 808 if (status != B_OK) 809 return status; 810 811 // TRBs within a TD must be "grouped" into TD Fragments, which mostly means 812 // that a max_burst_payload boundary cannot be crossed within a TRB, but 813 // only between TRBs. More than one TRB can be in a TD Fragment, but we keep 814 // things simple by setting trbSize to the MBP. (XHCI 1.2 § 4.11.7.1 p235.) 815 size_t trbSize = endpoint->max_burst_payload; 816 817 if (isochronousData != NULL) { 818 if (isochronousData->packet_count == 0) 819 return B_BAD_VALUE; 820 821 // Isochronous transfers use more specifically sized packets. 822 trbSize = transfer->DataLength() / isochronousData->packet_count; 823 if (trbSize == 0 || trbSize > pipe->MaxPacketSize() || trbSize 824 != (size_t)isochronousData->packet_descriptors[0].request_length) 825 return B_BAD_VALUE; 826 } 827 828 // Now that we know trbSize, compute the count. 829 const int32 trbCount = (transfer->FragmentLength() + trbSize - 1) / trbSize; 830 831 xhci_td *td = CreateDescriptor(trbCount, trbCount, trbSize); 832 if (td == NULL) 833 return B_NO_MEMORY; 834 835 // Normal Stage 836 const size_t maxPacketSize = pipe->MaxPacketSize(); 837 size_t remaining = transfer->FragmentLength(); 838 for (int32 i = 0; i < trbCount; i++) { 839 int32 trbLength = (remaining < trbSize) ? remaining : trbSize; 840 remaining -= trbLength; 841 842 // The "TD Size" field of a transfer TRB indicates the number of 843 // remaining maximum-size *packets* in this TD, *not* including the 844 // packets in the current TRB, and capped at 31 if there are more 845 // than 31 packets remaining in the TD. (XHCI 1.2 § 4.11.2.4 p218.) 846 int32 tdSize = (remaining + maxPacketSize - 1) / maxPacketSize; 847 if (tdSize > 31) 848 tdSize = 31; 849 850 td->trbs[i].address = td->buffer_addrs[i]; 851 td->trbs[i].status = TRB_2_IRQ(0) 852 | TRB_2_BYTES(trbLength) 853 | TRB_2_TD_SIZE(tdSize); 854 td->trbs[i].flags = TRB_3_TYPE(TRB_TYPE_NORMAL) 855 | TRB_3_CYCLE_BIT | TRB_3_CHAIN_BIT; 856 857 td->trb_used++; 858 } 859 860 // Isochronous-specific 861 if (isochronousData != NULL) { 862 // This is an isochronous transfer; we need to make the first TRB 863 // an isochronous TRB. 864 td->trbs[0].flags &= ~(TRB_3_TYPE(TRB_TYPE_NORMAL)); 865 td->trbs[0].flags |= TRB_3_TYPE(TRB_TYPE_ISOCH); 866 867 // Isochronous pipes are scheduled by microframes, one of which 868 // is 125us for USB 2 and above. But for USB 1 it was 1ms, so 869 // we need to use a different frame delta for that case. 870 uint8 frameDelta = 1; 871 if (transfer->TransferPipe()->Speed() == USB_SPEED_FULLSPEED) 872 frameDelta = 8; 873 874 // TODO: We do not currently take Mult into account at all! 875 // How are we supposed to do that here? 876 877 // Determine the (starting) frame number: if ISO_ASAP is set, 878 // we are queueing this "right away", and so want to reset 879 // the starting_frame_number. Otherwise we use the passed one. 880 uint32 frame; 881 if ((isochronousData->flags & USB_ISO_ASAP) != 0 882 || isochronousData->starting_frame_number == NULL) { 883 // All reads from the microframe index register must be 884 // incremented by 1. (XHCI 1.2 § 4.14.2.1.4 p265.) 885 frame = ReadRunReg32(XHCI_MFINDEX) + 1; 886 td->trbs[0].flags |= TRB_3_ISO_SIA_BIT; 887 } else { 888 frame = *isochronousData->starting_frame_number; 889 td->trbs[0].flags |= TRB_3_FRID(frame); 890 } 891 frame = (frame + frameDelta) % 2048; 892 if (isochronousData->starting_frame_number != NULL) 893 *isochronousData->starting_frame_number = frame; 894 895 // TODO: The OHCI bus driver seems to also do this for inbound 896 // isochronous transfers. Perhaps it should be moved into the stack? 897 if (directionIn) { 898 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 899 isochronousData->packet_descriptors[i].actual_length = 0; 900 isochronousData->packet_descriptors[i].status = B_NO_INIT; 901 } 902 } 903 } 904 905 // Set the ENT (Evaluate Next TRB) bit, so that the HC will not switch 906 // contexts before evaluating the Link TRB that _LinkDescriptorForPipe 907 // will insert, as otherwise there would be a race between us freeing 908 // and unlinking the descriptor, and the controller evaluating the Link TRB 909 // and thus getting back onto the main ring and executing the Event Data 910 // TRB that generates the interrupt for this transfer. 911 // 912 // Note that we *do not* unset the CHAIN bit in this TRB, thus including 913 // the Link TRB in this TD formally, which is required when using the 914 // ENT bit. (XHCI 1.2 § 4.12.3 p250.) 915 td->trbs[td->trb_used - 1].flags |= TRB_3_ENT_BIT; 916 917 if (!directionIn) { 918 TRACE("copying out iov count %ld\n", transfer->VectorCount()); 919 status_t status = transfer->PrepareKernelAccess(); 920 if (status != B_OK) { 921 FreeDescriptor(td); 922 return status; 923 } 924 WriteDescriptor(td, transfer->Vector(), transfer->VectorCount()); 925 } 926 927 td->transfer = transfer; 928 status = _LinkDescriptorForPipe(td, endpoint); 929 if (status != B_OK) { 930 FreeDescriptor(td); 931 return status; 932 } 933 934 return B_OK; 935 } 936 937 938 status_t 939 XHCI::CancelQueuedTransfers(Pipe *pipe, bool force) 940 { 941 xhci_endpoint* endpoint = (xhci_endpoint*)pipe->ControllerCookie(); 942 if (endpoint == NULL || endpoint->trbs == NULL) { 943 // Someone's de-allocated this pipe or endpoint in the meantime. 944 // (Possibly AllocateDevice failed, and we were the temporary pipe.) 945 return B_NO_INIT; 946 } 947 948 #ifndef TRACE_USB 949 if (force) 950 #endif 951 { 952 TRACE_ALWAYS("cancel queued transfers (%" B_PRId8 ") for pipe %p (%d)\n", 953 endpoint->used, pipe, pipe->EndpointAddress()); 954 } 955 956 MutexLocker endpointLocker(endpoint->lock); 957 958 if (endpoint->td_head == NULL) { 959 // There aren't any currently pending transfers to cancel. 960 return B_OK; 961 } 962 963 // Calling the callbacks while holding the endpoint lock could potentially 964 // cause deadlocks, so we instead store them in a pointer array. We need 965 // to do this separately from freeing the TDs, for in the case we fail 966 // to stop the endpoint, we cancel the transfers but do not free the TDs. 967 Transfer* transfers[XHCI_MAX_TRANSFERS]; 968 int32 transfersCount = 0; 969 970 for (xhci_td* td = endpoint->td_head; td != NULL; td = td->next) { 971 if (td->transfer == NULL) 972 continue; 973 974 // We can't cancel or delete transfers under "force", as they probably 975 // are not safe to use anymore. 976 if (!force) { 977 transfers[transfersCount] = td->transfer; 978 transfersCount++; 979 } 980 td->transfer = NULL; 981 } 982 983 // It is possible that while waiting for the stop-endpoint command to 984 // complete, one of the queued transfers posts a completion event, so in 985 // order to avoid a deadlock, we must unlock the endpoint. 986 endpointLocker.Unlock(); 987 status_t status = StopEndpoint(false, endpoint); 988 if (status == B_DEV_STALLED) { 989 // Only exit from a Halted state is a reset. (XHCI 1.2 § 4.8.3 p163.) 990 TRACE_ERROR("cancel queued transfers: halted endpoint, reset!\n"); 991 status = ResetEndpoint(false, endpoint); 992 } 993 endpointLocker.Lock(); 994 995 // Detach the head TD from the endpoint. 996 xhci_td* td_head = endpoint->td_head; 997 endpoint->td_head = NULL; 998 999 if (status == B_OK) { 1000 // Clear the endpoint's TRBs. 1001 memset(endpoint->trbs, 0, sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 1002 endpoint->used = 0; 1003 endpoint->current = 0; 1004 1005 // Set dequeue pointer location to the beginning of the ring. 1006 SetTRDequeue(endpoint->trb_addr, 0, endpoint->id + 1, 1007 endpoint->device->slot); 1008 1009 // We don't need to do anything else to restart the ring, as it will resume 1010 // operation as normal upon the next doorbell. (XHCI 1.2 § 4.6.9 p136.) 1011 } else { 1012 // We couldn't stop the endpoint. Most likely the device has been 1013 // removed and the endpoint was stopped by the hardware, or is 1014 // for some reason busy and cannot be stopped. 1015 TRACE_ERROR("cancel queued transfers: could not stop endpoint: %s!\n", 1016 strerror(status)); 1017 1018 // Instead of freeing the TDs, we want to leave them in the endpoint 1019 // so that when/if the hardware returns, they can be properly unlinked, 1020 // as otherwise the endpoint could get "stuck" by having the "used" 1021 // slowly accumulate due to "dead" transfers. 1022 endpoint->td_head = td_head; 1023 td_head = NULL; 1024 } 1025 1026 endpointLocker.Unlock(); 1027 1028 for (int32 i = 0; i < transfersCount; i++) { 1029 transfers[i]->Finished(B_CANCELED, 0); 1030 delete transfers[i]; 1031 } 1032 1033 // This loop looks a bit strange because we need to store the "next" 1034 // pointer before freeing the descriptor. 1035 xhci_td* td; 1036 while ((td = td_head) != NULL) { 1037 td_head = td_head->next; 1038 FreeDescriptor(td); 1039 } 1040 1041 return B_OK; 1042 } 1043 1044 1045 status_t 1046 XHCI::StartDebugTransfer(Transfer *transfer) 1047 { 1048 Pipe *pipe = transfer->TransferPipe(); 1049 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1050 if (endpoint == NULL) 1051 return B_BAD_VALUE; 1052 1053 // Check all locks that we are going to hit when running transfers. 1054 if (mutex_trylock(&endpoint->lock) != B_OK) 1055 return B_WOULD_BLOCK; 1056 if (mutex_trylock(&fFinishedLock) != B_OK) { 1057 mutex_unlock(&endpoint->lock); 1058 return B_WOULD_BLOCK; 1059 } 1060 if (mutex_trylock(&fEventLock) != B_OK) { 1061 mutex_unlock(&endpoint->lock); 1062 mutex_unlock(&fFinishedLock); 1063 return B_WOULD_BLOCK; 1064 } 1065 mutex_unlock(&endpoint->lock); 1066 mutex_unlock(&fFinishedLock); 1067 mutex_unlock(&fEventLock); 1068 1069 status_t status = SubmitTransfer(transfer); 1070 if (status != B_OK) 1071 return status; 1072 1073 // The endpoint's head TD is the TD of the just-submitted transfer. 1074 // Just like EHCI, abuse the callback cookie to hold the TD pointer. 1075 transfer->SetCallback(NULL, endpoint->td_head); 1076 1077 return B_OK; 1078 } 1079 1080 1081 status_t 1082 XHCI::CheckDebugTransfer(Transfer *transfer) 1083 { 1084 xhci_td *transfer_td = (xhci_td *)transfer->CallbackCookie(); 1085 if (transfer_td == NULL) 1086 return B_NO_INIT; 1087 1088 // Process events once, and then look for it in the finished list. 1089 ProcessEvents(); 1090 xhci_td *previous = NULL; 1091 for (xhci_td *td = fFinishedHead; td != NULL; td = td->next) { 1092 if (td != transfer_td) { 1093 previous = td; 1094 continue; 1095 } 1096 1097 // We've found it! 1098 if (previous == NULL) { 1099 fFinishedHead = fFinishedHead->next; 1100 } else { 1101 previous->next = td->next; 1102 } 1103 1104 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 1105 status_t status = (td->trb_completion_code == COMP_SUCCESS 1106 || td->trb_completion_code == COMP_SHORT_PACKET) ? B_OK : B_ERROR; 1107 1108 if (status == B_OK && directionIn) 1109 ReadDescriptor(td, transfer->Vector(), transfer->VectorCount()); 1110 1111 FreeDescriptor(td); 1112 transfer->SetCallback(NULL, NULL); 1113 return status; 1114 } 1115 1116 // We didn't find it. 1117 spin(75); 1118 return B_DEV_PENDING; 1119 } 1120 1121 1122 void 1123 XHCI::CancelDebugTransfer(Transfer *transfer) 1124 { 1125 while (CheckDebugTransfer(transfer) == B_DEV_PENDING) 1126 spin(100); 1127 } 1128 1129 1130 status_t 1131 XHCI::NotifyPipeChange(Pipe *pipe, usb_change change) 1132 { 1133 TRACE("pipe change %d for pipe %p (%d)\n", change, pipe, 1134 pipe->EndpointAddress()); 1135 1136 switch (change) { 1137 case USB_CHANGE_CREATED: 1138 return _InsertEndpointForPipe(pipe); 1139 case USB_CHANGE_DESTROYED: 1140 return _RemoveEndpointForPipe(pipe); 1141 1142 case USB_CHANGE_PIPE_POLICY_CHANGED: 1143 // We don't care about these, at least for now. 1144 return B_OK; 1145 } 1146 1147 TRACE_ERROR("unknown pipe change!\n"); 1148 return B_UNSUPPORTED; 1149 } 1150 1151 1152 xhci_td * 1153 XHCI::CreateDescriptor(uint32 trbCount, uint32 bufferCount, size_t bufferSize) 1154 { 1155 const bool inKDL = debug_debugger_running(); 1156 1157 xhci_td *result; 1158 if (!inKDL) { 1159 result = (xhci_td*)calloc(1, sizeof(xhci_td)); 1160 } else { 1161 // Just use the physical memory allocator while in KDL; it's less 1162 // secure than using the regular heap, but it's easier to deal with. 1163 phys_addr_t dummy; 1164 fStack->AllocateChunk((void **)&result, &dummy, sizeof(xhci_td)); 1165 } 1166 1167 if (result == NULL) { 1168 TRACE_ERROR("failed to allocate a transfer descriptor\n"); 1169 return NULL; 1170 } 1171 1172 // We always allocate 1 more TRB than requested, so that 1173 // _LinkDescriptorForPipe() has room to insert a link TRB. 1174 trbCount++; 1175 if (fStack->AllocateChunk((void **)&result->trbs, &result->trb_addr, 1176 (trbCount * sizeof(xhci_trb))) < B_OK) { 1177 TRACE_ERROR("failed to allocate TRBs\n"); 1178 FreeDescriptor(result); 1179 return NULL; 1180 } 1181 result->trb_count = trbCount; 1182 result->trb_used = 0; 1183 1184 if (bufferSize > 0) { 1185 // Due to how the USB stack allocates physical memory, we can't just 1186 // request one large chunk the size of the transfer, and so instead we 1187 // create a series of buffers as requested by our caller. 1188 1189 // We store the buffer pointers and addresses in one memory block. 1190 if (!inKDL) { 1191 result->buffers = (void**)calloc(bufferCount, 1192 (sizeof(void*) + sizeof(phys_addr_t))); 1193 } else { 1194 phys_addr_t dummy; 1195 fStack->AllocateChunk((void **)&result->buffers, &dummy, 1196 bufferCount * (sizeof(void*) + sizeof(phys_addr_t))); 1197 } 1198 if (result->buffers == NULL) { 1199 TRACE_ERROR("unable to allocate space for buffer infos\n"); 1200 FreeDescriptor(result); 1201 return NULL; 1202 } 1203 result->buffer_addrs = (phys_addr_t*)&result->buffers[bufferCount]; 1204 result->buffer_size = bufferSize; 1205 result->buffer_count = bufferCount; 1206 1207 // Optimization: If the requested total size of all buffers is less 1208 // than 32*B_PAGE_SIZE (the maximum size that the physical memory 1209 // allocator can handle), we allocate only one buffer and segment it. 1210 size_t totalSize = bufferSize * bufferCount; 1211 if (totalSize < (32 * B_PAGE_SIZE)) { 1212 if (fStack->AllocateChunk(&result->buffers[0], 1213 &result->buffer_addrs[0], totalSize) < B_OK) { 1214 TRACE_ERROR("unable to allocate space for large buffer (size %ld)\n", 1215 totalSize); 1216 FreeDescriptor(result); 1217 return NULL; 1218 } 1219 for (uint32 i = 1; i < bufferCount; i++) { 1220 result->buffers[i] = (void*)((addr_t)(result->buffers[i - 1]) 1221 + bufferSize); 1222 result->buffer_addrs[i] = result->buffer_addrs[i - 1] 1223 + bufferSize; 1224 } 1225 } else { 1226 // Otherwise, we allocate each buffer individually. 1227 for (uint32 i = 0; i < bufferCount; i++) { 1228 if (fStack->AllocateChunk(&result->buffers[i], 1229 &result->buffer_addrs[i], bufferSize) < B_OK) { 1230 TRACE_ERROR("unable to allocate space for a buffer (size " 1231 "%" B_PRIuSIZE ", count %" B_PRIu32 ")\n", 1232 bufferSize, bufferCount); 1233 FreeDescriptor(result); 1234 return NULL; 1235 } 1236 } 1237 } 1238 } else { 1239 result->buffers = NULL; 1240 result->buffer_addrs = NULL; 1241 } 1242 1243 // Initialize all other fields. 1244 result->transfer = NULL; 1245 result->trb_completion_code = 0; 1246 result->trb_left = 0; 1247 result->next = NULL; 1248 1249 TRACE("CreateDescriptor allocated %p, buffer_size %ld, buffer_count %" B_PRIu32 "\n", 1250 result, result->buffer_size, result->buffer_count); 1251 1252 return result; 1253 } 1254 1255 1256 void 1257 XHCI::FreeDescriptor(xhci_td *descriptor) 1258 { 1259 if (descriptor == NULL) 1260 return; 1261 1262 const bool inKDL = debug_debugger_running(); 1263 1264 if (descriptor->trbs != NULL) { 1265 fStack->FreeChunk(descriptor->trbs, descriptor->trb_addr, 1266 (descriptor->trb_count * sizeof(xhci_trb))); 1267 } 1268 if (descriptor->buffers != NULL) { 1269 size_t totalSize = descriptor->buffer_size * descriptor->buffer_count; 1270 if (totalSize < (32 * B_PAGE_SIZE)) { 1271 // This was allocated as one contiguous buffer. 1272 fStack->FreeChunk(descriptor->buffers[0], descriptor->buffer_addrs[0], 1273 totalSize); 1274 } else { 1275 for (uint32 i = 0; i < descriptor->buffer_count; i++) { 1276 if (descriptor->buffers[i] == NULL) 1277 continue; 1278 fStack->FreeChunk(descriptor->buffers[i], descriptor->buffer_addrs[i], 1279 descriptor->buffer_size); 1280 } 1281 } 1282 1283 if (!inKDL) { 1284 free(descriptor->buffers); 1285 } else { 1286 fStack->FreeChunk(descriptor->buffers, 0, 1287 descriptor->buffer_count * (sizeof(void*) + sizeof(phys_addr_t))); 1288 } 1289 } 1290 1291 if (!inKDL) 1292 free(descriptor); 1293 else 1294 fStack->FreeChunk(descriptor, 0, sizeof(xhci_td)); 1295 } 1296 1297 1298 size_t 1299 XHCI::WriteDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1300 { 1301 size_t written = 0; 1302 1303 size_t bufIdx = 0, bufUsed = 0; 1304 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1305 size_t length = vector[vecIdx].iov_len; 1306 1307 while (length > 0 && bufIdx < descriptor->buffer_count) { 1308 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1309 memcpy((uint8 *)descriptor->buffers[bufIdx] + bufUsed, 1310 (uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1311 toCopy); 1312 1313 written += toCopy; 1314 bufUsed += toCopy; 1315 length -= toCopy; 1316 if (bufUsed == descriptor->buffer_size) { 1317 bufIdx++; 1318 bufUsed = 0; 1319 } 1320 } 1321 } 1322 1323 TRACE("wrote descriptor (%" B_PRIuSIZE " bytes)\n", written); 1324 return written; 1325 } 1326 1327 1328 size_t 1329 XHCI::ReadDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1330 { 1331 size_t read = 0; 1332 1333 size_t bufIdx = 0, bufUsed = 0; 1334 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1335 size_t length = vector[vecIdx].iov_len; 1336 1337 while (length > 0 && bufIdx < descriptor->buffer_count) { 1338 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1339 memcpy((uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1340 (uint8 *)descriptor->buffers[bufIdx] + bufUsed, toCopy); 1341 1342 read += toCopy; 1343 bufUsed += toCopy; 1344 length -= toCopy; 1345 if (bufUsed == descriptor->buffer_size) { 1346 bufIdx++; 1347 bufUsed = 0; 1348 } 1349 } 1350 } 1351 1352 TRACE("read descriptor (%" B_PRIuSIZE " bytes)\n", read); 1353 return read; 1354 } 1355 1356 1357 Device * 1358 XHCI::AllocateDevice(Hub *parent, int8 hubAddress, uint8 hubPort, 1359 usb_speed speed) 1360 { 1361 TRACE("AllocateDevice hubAddress %d hubPort %d speed %d\n", hubAddress, 1362 hubPort, speed); 1363 1364 uint8 slot = XHCI_MAX_SLOTS; 1365 status_t status = EnableSlot(&slot); 1366 if (status != B_OK) { 1367 TRACE_ERROR("failed to enable slot: %s\n", strerror(status)); 1368 return NULL; 1369 } 1370 1371 if (slot == 0 || slot > fSlotCount) { 1372 TRACE_ERROR("AllocateDevice: bad slot\n"); 1373 return NULL; 1374 } 1375 1376 if (fDevices[slot].slot != 0) { 1377 TRACE_ERROR("AllocateDevice: slot already used\n"); 1378 return NULL; 1379 } 1380 1381 struct xhci_device *device = &fDevices[slot]; 1382 device->slot = slot; 1383 1384 device->input_ctx_area = fStack->AllocateArea((void **)&device->input_ctx, 1385 &device->input_ctx_addr, sizeof(*device->input_ctx) << fContextSizeShift, 1386 "XHCI input context"); 1387 if (device->input_ctx_area < B_OK) { 1388 TRACE_ERROR("unable to create a input context area\n"); 1389 CleanupDevice(device); 1390 return NULL; 1391 } 1392 if (fContextSizeShift == 1) { 1393 // 64-byte contexts have to be page-aligned in order for 1394 // _OffsetContextAddr to function properly. 1395 ASSERT((((addr_t)device->input_ctx) % B_PAGE_SIZE) == 0); 1396 } 1397 1398 memset(device->input_ctx, 0, sizeof(*device->input_ctx) << fContextSizeShift); 1399 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1400 _WriteContext(&device->input_ctx->input.addFlags, 3); 1401 1402 uint8 rhPort = hubPort; 1403 uint32 route = 0; 1404 for (Device *hubDevice = parent; hubDevice != RootObject(); 1405 hubDevice = (Device *)hubDevice->Parent()) { 1406 if (hubDevice->Parent() == RootObject()) 1407 break; 1408 1409 if (rhPort > 15) 1410 rhPort = 15; 1411 route = route << 4; 1412 route |= rhPort; 1413 1414 rhPort = hubDevice->HubPort(); 1415 } 1416 1417 uint32 dwslot0 = SLOT_0_NUM_ENTRIES(1) | SLOT_0_ROUTE(route); 1418 1419 // Get speed of port, only if device connected to root hub port 1420 // else we have to rely on value reported by the Hub Explore thread 1421 if (route == 0) { 1422 GetPortSpeed(hubPort - 1, &speed); 1423 TRACE("speed updated %d\n", speed); 1424 } 1425 1426 // add the speed 1427 switch (speed) { 1428 case USB_SPEED_LOWSPEED: 1429 dwslot0 |= SLOT_0_SPEED(2); 1430 break; 1431 case USB_SPEED_FULLSPEED: 1432 dwslot0 |= SLOT_0_SPEED(1); 1433 break; 1434 case USB_SPEED_HIGHSPEED: 1435 dwslot0 |= SLOT_0_SPEED(3); 1436 break; 1437 case USB_SPEED_SUPERSPEED: 1438 dwslot0 |= SLOT_0_SPEED(4); 1439 break; 1440 default: 1441 TRACE_ERROR("unknown usb speed\n"); 1442 break; 1443 } 1444 1445 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1446 // TODO enable power save 1447 _WriteContext(&device->input_ctx->slot.dwslot1, SLOT_1_RH_PORT(rhPort)); 1448 uint32 dwslot2 = SLOT_2_IRQ_TARGET(0); 1449 1450 // If LS/FS device connected to non-root HS device 1451 if (route != 0 && parent->Speed() == USB_SPEED_HIGHSPEED 1452 && (speed == USB_SPEED_LOWSPEED || speed == USB_SPEED_FULLSPEED)) { 1453 struct xhci_device *parenthub = (struct xhci_device *) 1454 parent->ControllerCookie(); 1455 dwslot2 |= SLOT_2_PORT_NUM(hubPort); 1456 dwslot2 |= SLOT_2_TT_HUB_SLOT(parenthub->slot); 1457 } 1458 1459 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1460 1461 _WriteContext(&device->input_ctx->slot.dwslot3, SLOT_3_SLOT_STATE(0) 1462 | SLOT_3_DEVICE_ADDRESS(0)); 1463 1464 TRACE("slot 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 1465 "\n", _ReadContext(&device->input_ctx->slot.dwslot0), 1466 _ReadContext(&device->input_ctx->slot.dwslot1), 1467 _ReadContext(&device->input_ctx->slot.dwslot2), 1468 _ReadContext(&device->input_ctx->slot.dwslot3)); 1469 1470 device->device_ctx_area = fStack->AllocateArea((void **)&device->device_ctx, 1471 &device->device_ctx_addr, sizeof(*device->device_ctx) << fContextSizeShift, 1472 "XHCI device context"); 1473 if (device->device_ctx_area < B_OK) { 1474 TRACE_ERROR("unable to create a device context area\n"); 1475 CleanupDevice(device); 1476 return NULL; 1477 } 1478 memset(device->device_ctx, 0, sizeof(*device->device_ctx) << fContextSizeShift); 1479 1480 device->trb_area = fStack->AllocateArea((void **)&device->trbs, 1481 &device->trb_addr, sizeof(xhci_trb) * (XHCI_MAX_ENDPOINTS - 1) 1482 * XHCI_ENDPOINT_RING_SIZE, "XHCI endpoint trbs"); 1483 if (device->trb_area < B_OK) { 1484 TRACE_ERROR("unable to create a device trbs area\n"); 1485 CleanupDevice(device); 1486 return NULL; 1487 } 1488 1489 // set up slot pointer to device context 1490 fDcba->baseAddress[slot] = device->device_ctx_addr; 1491 1492 size_t maxPacketSize; 1493 switch (speed) { 1494 case USB_SPEED_LOWSPEED: 1495 case USB_SPEED_FULLSPEED: 1496 maxPacketSize = 8; 1497 break; 1498 case USB_SPEED_HIGHSPEED: 1499 maxPacketSize = 64; 1500 break; 1501 default: 1502 maxPacketSize = 512; 1503 break; 1504 } 1505 1506 xhci_endpoint* endpoint0 = &device->endpoints[0]; 1507 mutex_init(&endpoint0->lock, "xhci endpoint lock"); 1508 endpoint0->device = device; 1509 endpoint0->id = 0; 1510 endpoint0->td_head = NULL; 1511 endpoint0->used = 0; 1512 endpoint0->current = 0; 1513 endpoint0->trbs = device->trbs; 1514 endpoint0->trb_addr = device->trb_addr; 1515 1516 // configure the Control endpoint 0 1517 if (ConfigureEndpoint(endpoint0, slot, 0, USB_OBJECT_CONTROL_PIPE, false, 1518 0, maxPacketSize, speed, 0, 0) != B_OK) { 1519 TRACE_ERROR("unable to configure default control endpoint\n"); 1520 CleanupDevice(device); 1521 return NULL; 1522 } 1523 1524 // device should get to addressed state (bsr = 0) 1525 status = SetAddress(device->input_ctx_addr, false, slot); 1526 if (status != B_OK) { 1527 TRACE_ERROR("unable to set address: %s\n", strerror(status)); 1528 CleanupDevice(device); 1529 return NULL; 1530 } 1531 1532 device->address = SLOT_3_DEVICE_ADDRESS_GET(_ReadContext( 1533 &device->device_ctx->slot.dwslot3)); 1534 1535 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", device->address, 1536 SLOT_3_SLOT_STATE_GET(_ReadContext( 1537 &device->device_ctx->slot.dwslot3))); 1538 TRACE("endpoint0 state 0x%08" B_PRIx32 "\n", 1539 ENDPOINT_0_STATE_GET(_ReadContext( 1540 &device->device_ctx->endpoints[0].dwendpoint0))); 1541 1542 // Wait a bit for the device to complete addressing 1543 snooze(USB_DELAY_SET_ADDRESS); 1544 1545 // Create a temporary pipe with the new address 1546 ControlPipe pipe(parent); 1547 pipe.SetControllerCookie(endpoint0); 1548 pipe.InitCommon(device->address + 1, 0, speed, Pipe::Default, maxPacketSize, 0, 1549 hubAddress, hubPort); 1550 1551 // Get the device descriptor 1552 // Just retrieve the first 8 bytes of the descriptor -> minimum supported 1553 // size of any device. It is enough because it includes the device type. 1554 1555 size_t actualLength = 0; 1556 usb_device_descriptor deviceDescriptor; 1557 1558 TRACE("getting the device descriptor\n"); 1559 status = pipe.SendRequest( 1560 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_STANDARD, // type 1561 USB_REQUEST_GET_DESCRIPTOR, // request 1562 USB_DESCRIPTOR_DEVICE << 8, // value 1563 0, // index 1564 8, // length 1565 (void *)&deviceDescriptor, // buffer 1566 8, // buffer length 1567 &actualLength); // actual length 1568 1569 if (actualLength != 8) { 1570 TRACE_ERROR("failed to get the device descriptor: %s\n", 1571 strerror(status)); 1572 CleanupDevice(device); 1573 return NULL; 1574 } 1575 1576 TRACE("device_class: %d device_subclass %d device_protocol %d\n", 1577 deviceDescriptor.device_class, deviceDescriptor.device_subclass, 1578 deviceDescriptor.device_protocol); 1579 1580 if (speed == USB_SPEED_FULLSPEED && deviceDescriptor.max_packet_size_0 != 8) { 1581 TRACE("Full speed device with different max packet size for Endpoint 0\n"); 1582 uint32 dwendpoint1 = _ReadContext( 1583 &device->input_ctx->endpoints[0].dwendpoint1); 1584 dwendpoint1 &= ~ENDPOINT_1_MAXPACKETSIZE(0xffff); 1585 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE( 1586 deviceDescriptor.max_packet_size_0); 1587 _WriteContext(&device->input_ctx->endpoints[0].dwendpoint1, 1588 dwendpoint1); 1589 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1590 _WriteContext(&device->input_ctx->input.addFlags, (1 << 1)); 1591 EvaluateContext(device->input_ctx_addr, device->slot); 1592 } 1593 1594 Device *deviceObject = NULL; 1595 if (deviceDescriptor.device_class == 0x09) { 1596 TRACE("creating new Hub\n"); 1597 TRACE("getting the hub descriptor\n"); 1598 size_t actualLength = 0; 1599 usb_hub_descriptor hubDescriptor; 1600 status = pipe.SendRequest( 1601 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_CLASS, // type 1602 USB_REQUEST_GET_DESCRIPTOR, // request 1603 USB_DESCRIPTOR_HUB << 8, // value 1604 0, // index 1605 sizeof(usb_hub_descriptor), // length 1606 (void *)&hubDescriptor, // buffer 1607 sizeof(usb_hub_descriptor), // buffer length 1608 &actualLength); 1609 1610 if (actualLength != sizeof(usb_hub_descriptor)) { 1611 TRACE_ERROR("error while getting the hub descriptor: %s\n", 1612 strerror(status)); 1613 CleanupDevice(device); 1614 return NULL; 1615 } 1616 1617 uint32 dwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1618 dwslot0 |= SLOT_0_HUB_BIT; 1619 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1620 uint32 dwslot1 = _ReadContext(&device->input_ctx->slot.dwslot1); 1621 dwslot1 |= SLOT_1_NUM_PORTS(hubDescriptor.num_ports); 1622 _WriteContext(&device->input_ctx->slot.dwslot1, dwslot1); 1623 if (speed == USB_SPEED_HIGHSPEED) { 1624 uint32 dwslot2 = _ReadContext(&device->input_ctx->slot.dwslot2); 1625 dwslot2 |= SLOT_2_TT_TIME(HUB_TTT_GET(hubDescriptor.characteristics)); 1626 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1627 } 1628 1629 deviceObject = new(std::nothrow) Hub(parent, hubAddress, hubPort, 1630 deviceDescriptor, device->address + 1, speed, false, device); 1631 } else { 1632 TRACE("creating new device\n"); 1633 deviceObject = new(std::nothrow) Device(parent, hubAddress, hubPort, 1634 deviceDescriptor, device->address + 1, speed, false, device); 1635 } 1636 if (deviceObject == NULL || deviceObject->InitCheck() != B_OK) { 1637 if (deviceObject == NULL) { 1638 TRACE_ERROR("no memory to allocate device\n"); 1639 } else { 1640 TRACE_ERROR("device object failed to initialize\n"); 1641 } 1642 CleanupDevice(device); 1643 return NULL; 1644 } 1645 1646 // We don't want to disable the default endpoint, naturally, which would 1647 // otherwise happen when this Pipe object is destroyed. 1648 pipe.SetControllerCookie(NULL); 1649 1650 TRACE("AllocateDevice() port %d slot %d\n", hubPort, slot); 1651 return deviceObject; 1652 } 1653 1654 1655 void 1656 XHCI::FreeDevice(Device *usbDevice) 1657 { 1658 xhci_device* device = (xhci_device*)usbDevice->ControllerCookie(); 1659 TRACE("FreeDevice() slot %d\n", device->slot); 1660 1661 // Delete the device first, so it cleans up its pipes and tells us 1662 // what we need to destroy before we tear down our internal state. 1663 delete usbDevice; 1664 1665 CleanupDevice(device); 1666 } 1667 1668 1669 void 1670 XHCI::CleanupDevice(xhci_device *device) 1671 { 1672 if (device->slot != 0) { 1673 DisableSlot(device->slot); 1674 fDcba->baseAddress[device->slot] = 0; 1675 } 1676 1677 if (device->trb_addr != 0) 1678 delete_area(device->trb_area); 1679 if (device->input_ctx_addr != 0) 1680 delete_area(device->input_ctx_area); 1681 if (device->device_ctx_addr != 0) 1682 delete_area(device->device_ctx_area); 1683 1684 memset(device, 0, sizeof(xhci_device)); 1685 } 1686 1687 1688 uint8 1689 XHCI::_GetEndpointState(xhci_endpoint* endpoint) 1690 { 1691 struct xhci_device_ctx* device_ctx = endpoint->device->device_ctx; 1692 return ENDPOINT_0_STATE_GET( 1693 _ReadContext(&device_ctx->endpoints[endpoint->id].dwendpoint0)); 1694 } 1695 1696 1697 status_t 1698 XHCI::_InsertEndpointForPipe(Pipe *pipe) 1699 { 1700 TRACE("insert endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1701 1702 if (pipe->ControllerCookie() != NULL 1703 || pipe->Parent()->Type() != USB_OBJECT_DEVICE) { 1704 // default pipe is already referenced 1705 return B_OK; 1706 } 1707 1708 Device* usbDevice = (Device *)pipe->Parent(); 1709 if (usbDevice->Parent() == RootObject()) { 1710 // root hub needs no initialization 1711 return B_OK; 1712 } 1713 1714 struct xhci_device *device = (struct xhci_device *) 1715 usbDevice->ControllerCookie(); 1716 if (device == NULL) { 1717 panic("device is NULL\n"); 1718 return B_NO_INIT; 1719 } 1720 1721 const uint8 id = (2 * pipe->EndpointAddress() 1722 + (pipe->Direction() != Pipe::Out ? 1 : 0)) - 1; 1723 if (id >= XHCI_MAX_ENDPOINTS - 1) 1724 return B_BAD_VALUE; 1725 1726 if (id > 0) { 1727 uint32 devicedwslot0 = _ReadContext(&device->device_ctx->slot.dwslot0); 1728 if (SLOT_0_NUM_ENTRIES_GET(devicedwslot0) == 1) { 1729 uint32 inputdwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1730 inputdwslot0 &= ~(SLOT_0_NUM_ENTRIES(0x1f)); 1731 inputdwslot0 |= SLOT_0_NUM_ENTRIES(XHCI_MAX_ENDPOINTS - 1); 1732 _WriteContext(&device->input_ctx->slot.dwslot0, inputdwslot0); 1733 EvaluateContext(device->input_ctx_addr, device->slot); 1734 } 1735 1736 xhci_endpoint* endpoint = &device->endpoints[id]; 1737 mutex_init(&endpoint->lock, "xhci endpoint lock"); 1738 MutexLocker endpointLocker(endpoint->lock); 1739 1740 endpoint->device = device; 1741 endpoint->id = id; 1742 endpoint->td_head = NULL; 1743 endpoint->used = 0; 1744 endpoint->current = 0; 1745 1746 endpoint->trbs = device->trbs + id * XHCI_ENDPOINT_RING_SIZE; 1747 endpoint->trb_addr = device->trb_addr 1748 + id * XHCI_ENDPOINT_RING_SIZE * sizeof(xhci_trb); 1749 memset(endpoint->trbs, 0, 1750 sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 1751 1752 TRACE("insert endpoint for pipe: trbs, device %p endpoint %p\n", 1753 device->trbs, endpoint->trbs); 1754 TRACE("insert endpoint for pipe: trb_addr, device 0x%" B_PRIxPHYSADDR 1755 " endpoint 0x%" B_PRIxPHYSADDR "\n", device->trb_addr, 1756 endpoint->trb_addr); 1757 1758 const uint8 endpointNum = id + 1; 1759 1760 status_t status = ConfigureEndpoint(endpoint, device->slot, id, pipe->Type(), 1761 pipe->Direction() == Pipe::In, pipe->Interval(), pipe->MaxPacketSize(), 1762 usbDevice->Speed(), pipe->MaxBurst(), pipe->BytesPerInterval()); 1763 if (status != B_OK) { 1764 TRACE_ERROR("unable to configure endpoint: %s\n", strerror(status)); 1765 return status; 1766 } 1767 1768 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1769 _WriteContext(&device->input_ctx->input.addFlags, 1770 (1 << endpointNum) | (1 << 0)); 1771 1772 ConfigureEndpoint(device->input_ctx_addr, false, device->slot); 1773 1774 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", 1775 device->address, SLOT_3_SLOT_STATE_GET(_ReadContext( 1776 &device->device_ctx->slot.dwslot3))); 1777 TRACE("endpoint[0] state 0x%08" B_PRIx32 "\n", 1778 ENDPOINT_0_STATE_GET(_ReadContext( 1779 &device->device_ctx->endpoints[0].dwendpoint0))); 1780 TRACE("endpoint[%d] state 0x%08" B_PRIx32 "\n", id, 1781 ENDPOINT_0_STATE_GET(_ReadContext( 1782 &device->device_ctx->endpoints[id].dwendpoint0))); 1783 } 1784 pipe->SetControllerCookie(&device->endpoints[id]); 1785 1786 return B_OK; 1787 } 1788 1789 1790 status_t 1791 XHCI::_RemoveEndpointForPipe(Pipe *pipe) 1792 { 1793 TRACE("remove endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1794 1795 if (pipe->Parent()->Type() != USB_OBJECT_DEVICE) 1796 return B_OK; 1797 Device* usbDevice = (Device *)pipe->Parent(); 1798 if (usbDevice->Parent() == RootObject()) 1799 return B_BAD_VALUE; 1800 1801 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1802 if (endpoint == NULL || endpoint->trbs == NULL) 1803 return B_NO_INIT; 1804 1805 pipe->SetControllerCookie(NULL); 1806 1807 if (endpoint->id > 0) { 1808 xhci_device *device = endpoint->device; 1809 uint8 epNumber = endpoint->id + 1; 1810 StopEndpoint(true, endpoint); 1811 1812 mutex_lock(&endpoint->lock); 1813 1814 // See comment in CancelQueuedTransfers. 1815 xhci_td* td; 1816 while ((td = endpoint->td_head) != NULL) { 1817 endpoint->td_head = endpoint->td_head->next; 1818 FreeDescriptor(td); 1819 } 1820 1821 mutex_destroy(&endpoint->lock); 1822 memset(endpoint, 0, sizeof(xhci_endpoint)); 1823 1824 _WriteContext(&device->input_ctx->input.dropFlags, (1 << epNumber)); 1825 _WriteContext(&device->input_ctx->input.addFlags, (1 << 0)); 1826 1827 // The Deconfigure bit in the Configure Endpoint command indicates 1828 // that *all* endpoints are to be deconfigured, and not just the ones 1829 // specified in the context flags. (XHCI 1.2 § 4.6.6 p115.) 1830 ConfigureEndpoint(device->input_ctx_addr, false, device->slot); 1831 } 1832 1833 return B_OK; 1834 } 1835 1836 1837 status_t 1838 XHCI::_LinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1839 { 1840 TRACE("link descriptor for pipe\n"); 1841 1842 // Use mutex_trylock first, in case we are in KDL. 1843 MutexLocker endpointLocker(&endpoint->lock, mutex_trylock(&endpoint->lock) == B_OK); 1844 1845 // "used" refers to the number of currently linked TDs, not the number of 1846 // used TRBs on the ring (we use 2 TRBs on the ring per transfer.) 1847 if (endpoint->used >= (XHCI_MAX_TRANSFERS - 1)) { 1848 TRACE_ERROR("link descriptor for pipe: max transfers count exceeded\n"); 1849 return B_BAD_VALUE; 1850 } 1851 1852 // We do not support queuing other transfers in tandem with a fragmented one. 1853 if (endpoint->td_head != NULL && endpoint->td_head->transfer != NULL 1854 && endpoint->td_head->transfer->IsFragmented()) { 1855 TRACE_ERROR("cannot submit transfer: a fragmented transfer is queued\n"); 1856 return B_DEV_RESOURCE_CONFLICT; 1857 } 1858 1859 endpoint->used++; 1860 descriptor->next = endpoint->td_head; 1861 endpoint->td_head = descriptor; 1862 1863 const uint32 current = endpoint->current, 1864 eventdata = current + 1, 1865 last = XHCI_ENDPOINT_RING_SIZE - 1; 1866 uint32 next = eventdata + 1; 1867 1868 TRACE("link descriptor for pipe: current %d, next %d\n", current, next); 1869 1870 // Add a Link TRB to the end of the descriptor. 1871 phys_addr_t addr = endpoint->trb_addr + eventdata * sizeof(xhci_trb); 1872 descriptor->trbs[descriptor->trb_used].address = addr; 1873 descriptor->trbs[descriptor->trb_used].status = TRB_2_IRQ(0); 1874 descriptor->trbs[descriptor->trb_used].flags = TRB_3_TYPE(TRB_TYPE_LINK) 1875 | TRB_3_CHAIN_BIT | TRB_3_CYCLE_BIT; 1876 // It is specified that (XHCI 1.2 § 4.12.3 Note 2 p251) if the TRB 1877 // following one with the ENT bit set is a Link TRB, the Link TRB 1878 // shall be evaluated *and* the subsequent TRB shall be. Thus a 1879 // TRB_3_ENT_BIT is unnecessary here; and from testing seems to 1880 // break all transfers on a (very) small number of controllers. 1881 1882 #if !B_HOST_IS_LENDIAN 1883 // Convert endianness. 1884 for (uint32 i = 0; i <= descriptor->trb_used; i++) { 1885 descriptor->trbs[i].address = 1886 B_HOST_TO_LENDIAN_INT64(descriptor->trbs[i].address); 1887 descriptor->trbs[i].status = 1888 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].status); 1889 descriptor->trbs[i].flags = 1890 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].flags); 1891 } 1892 #endif 1893 1894 // Link the descriptor. 1895 endpoint->trbs[current].address = 1896 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr); 1897 endpoint->trbs[current].status = 1898 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1899 endpoint->trbs[current].flags = 1900 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK)); 1901 1902 // Set up the Event Data TRB (XHCI 1.2 § 4.11.5.2 p230.) 1903 // 1904 // We do this on the main ring for two reasons: first, to avoid a small 1905 // potential race between the interrupt and the controller evaluating 1906 // the link TRB to get back onto the ring; and second, because many 1907 // controllers throw errors if the target of a Link TRB is not valid 1908 // (i.e. does not have its Cycle Bit set.) 1909 // 1910 // We also set the "address" field, which the controller will copy 1911 // verbatim into the TRB it posts to the event ring, to be the last 1912 // "real" TRB in the TD; this will allow us to determine what transfer 1913 // the resulting Transfer Event TRB refers to. 1914 endpoint->trbs[eventdata].address = 1915 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr 1916 + (descriptor->trb_used - 1) * sizeof(xhci_trb)); 1917 endpoint->trbs[eventdata].status = 1918 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1919 endpoint->trbs[eventdata].flags = 1920 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_EVENT_DATA) 1921 | TRB_3_IOC_BIT | TRB_3_CYCLE_BIT); 1922 1923 if (next == last) { 1924 // We always use 2 TRBs per _Link..() call, so if "next" is the last 1925 // TRB in the ring, we need to generate a link TRB at "next", and 1926 // then wrap it to 0. (We write the cycle bit later, after wrapping, 1927 // for the reason noted in the previous comment.) 1928 endpoint->trbs[next].address = 1929 B_HOST_TO_LENDIAN_INT64(endpoint->trb_addr); 1930 endpoint->trbs[next].status = 1931 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1932 endpoint->trbs[next].flags = 1933 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK)); 1934 1935 next = 0; 1936 } 1937 1938 endpoint->trbs[next].address = 0; 1939 endpoint->trbs[next].status = 0; 1940 endpoint->trbs[next].flags = 0; 1941 1942 memory_write_barrier(); 1943 1944 // Everything is ready, so write the cycle bit(s). 1945 endpoint->trbs[current].flags |= B_HOST_TO_LENDIAN_INT32(TRB_3_CYCLE_BIT); 1946 if (current == 0 && endpoint->trbs[last].address != 0) 1947 endpoint->trbs[last].flags |= B_HOST_TO_LENDIAN_INT32(TRB_3_CYCLE_BIT); 1948 1949 TRACE("_LinkDescriptorForPipe pCurrent %p phys 0x%" B_PRIxPHYSADDR 1950 " 0x%" B_PRIxPHYSADDR " 0x%08" B_PRIx32 "\n", &endpoint->trbs[current], 1951 endpoint->trb_addr + current * sizeof(struct xhci_trb), 1952 endpoint->trbs[current].address, 1953 B_LENDIAN_TO_HOST_INT32(endpoint->trbs[current].flags)); 1954 1955 endpoint->current = next; 1956 endpointLocker.Unlock(); 1957 1958 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1959 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1960 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1961 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1962 1963 Ring(endpoint->device->slot, endpoint->id + 1); 1964 1965 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1966 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1967 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1968 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1969 1970 return B_OK; 1971 } 1972 1973 1974 status_t 1975 XHCI::_UnlinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1976 { 1977 TRACE("unlink descriptor for pipe\n"); 1978 // We presume that the caller has already locked or owns the endpoint. 1979 1980 endpoint->used--; 1981 if (descriptor == endpoint->td_head) { 1982 endpoint->td_head = descriptor->next; 1983 descriptor->next = NULL; 1984 return B_OK; 1985 } else { 1986 for (xhci_td *td = endpoint->td_head; td->next != NULL; td = td->next) { 1987 if (td->next == descriptor) { 1988 td->next = descriptor->next; 1989 descriptor->next = NULL; 1990 return B_OK; 1991 } 1992 } 1993 } 1994 1995 endpoint->used++; 1996 return B_ERROR; 1997 } 1998 1999 2000 status_t 2001 XHCI::ConfigureEndpoint(xhci_endpoint* ep, uint8 slot, uint8 number, uint8 type, 2002 bool directionIn, uint16 interval, uint16 maxPacketSize, usb_speed speed, 2003 uint8 maxBurst, uint16 bytesPerInterval) 2004 { 2005 struct xhci_device* device = &fDevices[slot]; 2006 2007 uint32 dwendpoint0 = 0; 2008 uint32 dwendpoint1 = 0; 2009 uint64 qwendpoint2 = 0; 2010 uint32 dwendpoint4 = 0; 2011 2012 // Compute and assign the endpoint type. (XHCI 1.2 § 6.2.3 Table 6-9 p452.) 2013 uint8 xhciType = 4; 2014 if ((type & USB_OBJECT_INTERRUPT_PIPE) != 0) 2015 xhciType = 3; 2016 if ((type & USB_OBJECT_BULK_PIPE) != 0) 2017 xhciType = 2; 2018 if ((type & USB_OBJECT_ISO_PIPE) != 0) 2019 xhciType = 1; 2020 xhciType |= directionIn ? (1 << 2) : 0; 2021 dwendpoint1 |= ENDPOINT_1_EPTYPE(xhciType); 2022 2023 // Compute and assign interval. (XHCI 1.2 § 6.2.3.6 p456.) 2024 uint16 calcInterval; 2025 if ((type & USB_OBJECT_BULK_PIPE) != 0 2026 || (type & USB_OBJECT_CONTROL_PIPE) != 0) { 2027 // Bulk and Control endpoints never issue NAKs. 2028 calcInterval = 0; 2029 } else { 2030 switch (speed) { 2031 case USB_SPEED_FULLSPEED: 2032 if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2033 // Convert 1-16 into 3-18. 2034 calcInterval = min_c(max_c(interval, 1), 16) + 2; 2035 break; 2036 } 2037 2038 // fall through 2039 case USB_SPEED_LOWSPEED: { 2040 // Convert 1ms-255ms into 3-10. 2041 2042 // Find the index of the highest set bit in "interval". 2043 uint32 temp = min_c(max_c(interval, 1), 255); 2044 for (calcInterval = 0; temp != 1; calcInterval++) 2045 temp = temp >> 1; 2046 calcInterval += 3; 2047 break; 2048 } 2049 2050 case USB_SPEED_HIGHSPEED: 2051 case USB_SPEED_SUPERSPEED: 2052 default: 2053 // Convert 1-16 into 0-15. 2054 calcInterval = min_c(max_c(interval, 1), 16) - 1; 2055 break; 2056 } 2057 } 2058 dwendpoint0 |= ENDPOINT_0_INTERVAL(calcInterval); 2059 2060 // For non-isochronous endpoints, we want the controller to retry failed 2061 // transfers, if possible. (XHCI 1.2 § 4.10.2.3 p197.) 2062 if ((type & USB_OBJECT_ISO_PIPE) == 0) 2063 dwendpoint1 |= ENDPOINT_1_CERR(3); 2064 2065 // Assign maximum burst size. For USB3 devices this is passed in; for 2066 // all other devices we compute it. (XHCI 1.2 § 4.8.2 p161.) 2067 if (speed == USB_SPEED_HIGHSPEED && (type & (USB_OBJECT_INTERRUPT_PIPE 2068 | USB_OBJECT_ISO_PIPE)) != 0) { 2069 maxBurst = (maxPacketSize & 0x1800) >> 11; 2070 } else if (speed != USB_SPEED_SUPERSPEED) { 2071 maxBurst = 0; 2072 } 2073 dwendpoint1 |= ENDPOINT_1_MAXBURST(maxBurst); 2074 2075 // Assign maximum packet size, set the ring address, and set the 2076 // "Dequeue Cycle State" bit. (XHCI 1.2 § 6.2.3 Table 6-10 p453.) 2077 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE(maxPacketSize); 2078 qwendpoint2 |= ENDPOINT_2_DCS_BIT | ep->trb_addr; 2079 2080 // The Max Burst Payload is the number of bytes moved by a 2081 // maximum sized burst. (XHCI 1.2 § 4.11.7.1 p236.) 2082 ep->max_burst_payload = (maxBurst + 1) * maxPacketSize; 2083 if (ep->max_burst_payload == 0) { 2084 TRACE_ERROR("ConfigureEndpoint() failed invalid max_burst_payload\n"); 2085 return B_BAD_VALUE; 2086 } 2087 2088 // Assign average TRB length. 2089 if ((type & USB_OBJECT_CONTROL_PIPE) != 0) { 2090 // Control pipes are a special case, as they rarely have 2091 // outbound transfers of any substantial size. 2092 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(8); 2093 } else if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2094 // Isochronous pipes are another special case: the TRB size will be 2095 // one packet (which is normally smaller than the max packet size, 2096 // but we don't know what it is here.) 2097 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(maxPacketSize); 2098 } else { 2099 // Under all other circumstances, we put max_burst_payload in a TRB. 2100 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(ep->max_burst_payload); 2101 } 2102 2103 // Assign maximum ESIT payload. (XHCI 1.2 § 4.14.2 p259.) 2104 if ((type & (USB_OBJECT_INTERRUPT_PIPE | USB_OBJECT_ISO_PIPE)) != 0) { 2105 // TODO: For SuperSpeedPlus endpoints, there is yet another descriptor 2106 // for isochronous endpoints that specifies the maximum ESIT payload. 2107 // We don't fetch this yet, so just fall back to the USB2 computation 2108 // method if bytesPerInterval is 0. 2109 if (speed == USB_SPEED_SUPERSPEED && bytesPerInterval != 0) 2110 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD(bytesPerInterval); 2111 else if (speed >= USB_SPEED_HIGHSPEED) 2112 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD((maxBurst + 1) * maxPacketSize); 2113 } 2114 2115 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint0, 2116 dwendpoint0); 2117 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint1, 2118 dwendpoint1); 2119 _WriteContext(&device->input_ctx->endpoints[number].qwendpoint2, 2120 qwendpoint2); 2121 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint4, 2122 dwendpoint4); 2123 2124 TRACE("endpoint 0x%" B_PRIx32 " 0x%" B_PRIx32 " 0x%" B_PRIx64 " 0x%" 2125 B_PRIx32 "\n", 2126 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint0), 2127 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint1), 2128 _ReadContext(&device->input_ctx->endpoints[number].qwendpoint2), 2129 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint4)); 2130 2131 return B_OK; 2132 } 2133 2134 2135 status_t 2136 XHCI::GetPortSpeed(uint8 index, usb_speed* speed) 2137 { 2138 if (index >= fPortCount) 2139 return B_BAD_INDEX; 2140 2141 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2142 2143 switch (PS_SPEED_GET(portStatus)) { 2144 case 2: 2145 *speed = USB_SPEED_LOWSPEED; 2146 break; 2147 case 1: 2148 *speed = USB_SPEED_FULLSPEED; 2149 break; 2150 case 3: 2151 *speed = USB_SPEED_HIGHSPEED; 2152 break; 2153 case 4: 2154 *speed = USB_SPEED_SUPERSPEED; 2155 break; 2156 default: 2157 TRACE_ALWAYS("nonstandard port speed %" B_PRId32 ", assuming SuperSpeed\n", 2158 PS_SPEED_GET(portStatus)); 2159 *speed = USB_SPEED_SUPERSPEED; 2160 break; 2161 } 2162 2163 return B_OK; 2164 } 2165 2166 2167 status_t 2168 XHCI::GetPortStatus(uint8 index, usb_port_status* status) 2169 { 2170 if (index >= fPortCount) 2171 return B_BAD_INDEX; 2172 2173 status->status = status->change = 0; 2174 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2175 TRACE("port %" B_PRId8 " status=0x%08" B_PRIx32 "\n", index, portStatus); 2176 2177 // build the status 2178 switch (PS_SPEED_GET(portStatus)) { 2179 case 3: 2180 status->status |= PORT_STATUS_HIGH_SPEED; 2181 break; 2182 case 2: 2183 status->status |= PORT_STATUS_LOW_SPEED; 2184 break; 2185 default: 2186 break; 2187 } 2188 2189 if (portStatus & PS_CCS) 2190 status->status |= PORT_STATUS_CONNECTION; 2191 if (portStatus & PS_PED) 2192 status->status |= PORT_STATUS_ENABLE; 2193 if (portStatus & PS_OCA) 2194 status->status |= PORT_STATUS_OVER_CURRENT; 2195 if (portStatus & PS_PR) 2196 status->status |= PORT_STATUS_RESET; 2197 if (portStatus & PS_PP) { 2198 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) 2199 status->status |= PORT_STATUS_SS_POWER; 2200 else 2201 status->status |= PORT_STATUS_POWER; 2202 } 2203 2204 // build the change 2205 if (portStatus & PS_CSC) 2206 status->change |= PORT_STATUS_CONNECTION; 2207 if (portStatus & PS_PEC) 2208 status->change |= PORT_STATUS_ENABLE; 2209 if (portStatus & PS_OCC) 2210 status->change |= PORT_STATUS_OVER_CURRENT; 2211 if (portStatus & PS_PRC) 2212 status->change |= PORT_STATUS_RESET; 2213 2214 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) { 2215 if (portStatus & PS_PLC) 2216 status->change |= PORT_CHANGE_LINK_STATE; 2217 if (portStatus & PS_WRC) 2218 status->change |= PORT_CHANGE_BH_PORT_RESET; 2219 } 2220 2221 return B_OK; 2222 } 2223 2224 2225 status_t 2226 XHCI::SetPortFeature(uint8 index, uint16 feature) 2227 { 2228 TRACE("set port feature index %u feature %u\n", index, feature); 2229 if (index >= fPortCount) 2230 return B_BAD_INDEX; 2231 2232 uint32 portRegister = XHCI_PORTSC(index); 2233 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2234 2235 switch (feature) { 2236 case PORT_SUSPEND: 2237 if ((portStatus & PS_PED) == 0 || (portStatus & PS_PR) 2238 || (portStatus & PS_PLS_MASK) >= PS_XDEV_U3) { 2239 TRACE_ERROR("USB core suspending device not in U0/U1/U2.\n"); 2240 return B_BAD_VALUE; 2241 } 2242 portStatus &= ~PS_PLS_MASK; 2243 WriteOpReg(portRegister, portStatus | PS_LWS | PS_XDEV_U3); 2244 break; 2245 2246 case PORT_RESET: 2247 WriteOpReg(portRegister, portStatus | PS_PR); 2248 break; 2249 2250 case PORT_POWER: 2251 WriteOpReg(portRegister, portStatus | PS_PP); 2252 break; 2253 default: 2254 return B_BAD_VALUE; 2255 } 2256 ReadOpReg(portRegister); 2257 return B_OK; 2258 } 2259 2260 2261 status_t 2262 XHCI::ClearPortFeature(uint8 index, uint16 feature) 2263 { 2264 TRACE("clear port feature index %u feature %u\n", index, feature); 2265 if (index >= fPortCount) 2266 return B_BAD_INDEX; 2267 2268 uint32 portRegister = XHCI_PORTSC(index); 2269 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2270 2271 switch (feature) { 2272 case PORT_SUSPEND: 2273 portStatus = ReadOpReg(portRegister); 2274 if (portStatus & PS_PR) 2275 return B_BAD_VALUE; 2276 if (portStatus & PS_XDEV_U3) { 2277 if ((portStatus & PS_PED) == 0) 2278 return B_BAD_VALUE; 2279 portStatus &= ~PS_PLS_MASK; 2280 WriteOpReg(portRegister, portStatus | PS_XDEV_U0 | PS_LWS); 2281 } 2282 break; 2283 case PORT_ENABLE: 2284 WriteOpReg(portRegister, portStatus | PS_PED); 2285 break; 2286 case PORT_POWER: 2287 WriteOpReg(portRegister, portStatus & ~PS_PP); 2288 break; 2289 case C_PORT_CONNECTION: 2290 WriteOpReg(portRegister, portStatus | PS_CSC); 2291 break; 2292 case C_PORT_ENABLE: 2293 WriteOpReg(portRegister, portStatus | PS_PEC); 2294 break; 2295 case C_PORT_OVER_CURRENT: 2296 WriteOpReg(portRegister, portStatus | PS_OCC); 2297 break; 2298 case C_PORT_RESET: 2299 WriteOpReg(portRegister, portStatus | PS_PRC); 2300 break; 2301 case C_PORT_BH_PORT_RESET: 2302 WriteOpReg(portRegister, portStatus | PS_WRC); 2303 break; 2304 case C_PORT_LINK_STATE: 2305 WriteOpReg(portRegister, portStatus | PS_PLC); 2306 break; 2307 default: 2308 return B_BAD_VALUE; 2309 } 2310 2311 ReadOpReg(portRegister); 2312 return B_OK; 2313 } 2314 2315 2316 status_t 2317 XHCI::ControllerHalt() 2318 { 2319 // Mask off run state 2320 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) & ~CMD_RUN); 2321 2322 // wait for shutdown state 2323 if (WaitOpBits(XHCI_STS, STS_HCH, STS_HCH) != B_OK) { 2324 TRACE_ERROR("HCH shutdown timeout\n"); 2325 return B_ERROR; 2326 } 2327 return B_OK; 2328 } 2329 2330 2331 status_t 2332 XHCI::ControllerReset() 2333 { 2334 TRACE("ControllerReset() cmd: 0x%" B_PRIx32 " sts: 0x%" B_PRIx32 "\n", 2335 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 2336 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) | CMD_HCRST); 2337 2338 if (WaitOpBits(XHCI_CMD, CMD_HCRST, 0) != B_OK) { 2339 TRACE_ERROR("ControllerReset() failed CMD_HCRST\n"); 2340 return B_ERROR; 2341 } 2342 2343 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 2344 TRACE_ERROR("ControllerReset() failed STS_CNR\n"); 2345 return B_ERROR; 2346 } 2347 2348 return B_OK; 2349 } 2350 2351 2352 int32 2353 XHCI::InterruptHandler(void* data) 2354 { 2355 return ((XHCI*)data)->Interrupt(); 2356 } 2357 2358 2359 int32 2360 XHCI::Interrupt() 2361 { 2362 SpinLocker _(&fSpinlock); 2363 2364 uint32 status = ReadOpReg(XHCI_STS); 2365 uint32 temp = ReadRunReg32(XHCI_IMAN(0)); 2366 WriteOpReg(XHCI_STS, status); 2367 WriteRunReg32(XHCI_IMAN(0), temp); 2368 2369 int32 result = B_HANDLED_INTERRUPT; 2370 2371 if ((status & STS_HCH) != 0) { 2372 TRACE_ERROR("Host Controller halted\n"); 2373 return result; 2374 } 2375 if ((status & STS_HSE) != 0) { 2376 TRACE_ERROR("Host System Error\n"); 2377 return result; 2378 } 2379 if ((status & STS_HCE) != 0) { 2380 TRACE_ERROR("Host Controller Error\n"); 2381 return result; 2382 } 2383 2384 if ((status & STS_EINT) == 0) { 2385 TRACE("STS: 0x%" B_PRIx32 " IRQ_PENDING: 0x%" B_PRIx32 "\n", 2386 status, temp); 2387 return B_UNHANDLED_INTERRUPT; 2388 } 2389 2390 TRACE("Event Interrupt\n"); 2391 release_sem_etc(fEventSem, 1, B_DO_NOT_RESCHEDULE); 2392 return B_INVOKE_SCHEDULER; 2393 } 2394 2395 2396 void 2397 XHCI::Ring(uint8 slot, uint8 endpoint) 2398 { 2399 TRACE("Ding Dong! slot:%d endpoint %d\n", slot, endpoint) 2400 if ((slot == 0 && endpoint > 0) || (slot > 0 && endpoint == 0)) 2401 panic("Ring() invalid slot/endpoint combination\n"); 2402 if (slot > fSlotCount || endpoint >= XHCI_MAX_ENDPOINTS) 2403 panic("Ring() invalid slot or endpoint\n"); 2404 2405 WriteDoorReg32(XHCI_DOORBELL(slot), XHCI_DOORBELL_TARGET(endpoint) 2406 | XHCI_DOORBELL_STREAMID(0)); 2407 ReadDoorReg32(XHCI_DOORBELL(slot)); 2408 // Flush PCI writes 2409 } 2410 2411 2412 void 2413 XHCI::QueueCommand(xhci_trb* trb) 2414 { 2415 uint8 i, j; 2416 uint32 temp; 2417 2418 i = fCmdIdx; 2419 j = fCmdCcs; 2420 2421 TRACE("command[%u] = %" B_PRId32 " (0x%016" B_PRIx64 ", 0x%08" B_PRIx32 2422 ", 0x%08" B_PRIx32 ")\n", i, TRB_3_TYPE_GET(trb->flags), trb->address, 2423 trb->status, trb->flags); 2424 2425 fCmdRing[i].address = trb->address; 2426 fCmdRing[i].status = trb->status; 2427 temp = trb->flags; 2428 2429 if (j) 2430 temp |= TRB_3_CYCLE_BIT; 2431 else 2432 temp &= ~TRB_3_CYCLE_BIT; 2433 temp &= ~TRB_3_TC_BIT; 2434 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2435 2436 fCmdAddr = fErst->rs_addr + (XHCI_MAX_EVENTS + i) * sizeof(xhci_trb); 2437 2438 i++; 2439 2440 if (i == (XHCI_MAX_COMMANDS - 1)) { 2441 temp = TRB_3_TYPE(TRB_TYPE_LINK) | TRB_3_TC_BIT; 2442 if (j) 2443 temp |= TRB_3_CYCLE_BIT; 2444 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2445 2446 i = 0; 2447 j ^= 1; 2448 } 2449 2450 fCmdIdx = i; 2451 fCmdCcs = j; 2452 } 2453 2454 2455 void 2456 XHCI::HandleCmdComplete(xhci_trb* trb) 2457 { 2458 if (fCmdAddr == trb->address) { 2459 TRACE("Received command event\n"); 2460 fCmdResult[0] = trb->status; 2461 fCmdResult[1] = B_LENDIAN_TO_HOST_INT32(trb->flags); 2462 release_sem_etc(fCmdCompSem, 1, B_DO_NOT_RESCHEDULE); 2463 } else 2464 TRACE_ERROR("received command event for unknown command!\n") 2465 } 2466 2467 2468 void 2469 XHCI::HandleTransferComplete(xhci_trb* trb) 2470 { 2471 const uint32 flags = B_LENDIAN_TO_HOST_INT32(trb->flags); 2472 const uint8 endpointNumber = TRB_3_ENDPOINT_GET(flags), 2473 slot = TRB_3_SLOT_GET(flags); 2474 2475 if (slot > fSlotCount) 2476 TRACE_ERROR("invalid slot\n"); 2477 if (endpointNumber == 0 || endpointNumber >= XHCI_MAX_ENDPOINTS) { 2478 TRACE_ERROR("invalid endpoint\n"); 2479 return; 2480 } 2481 2482 xhci_device *device = &fDevices[slot]; 2483 xhci_endpoint *endpoint = &device->endpoints[endpointNumber - 1]; 2484 2485 if (endpoint->trbs == NULL) { 2486 TRACE_ERROR("got TRB but endpoint is not allocated!\n"); 2487 return; 2488 } 2489 2490 // Use mutex_trylock first, in case we are in KDL. 2491 MutexLocker endpointLocker(endpoint->lock, mutex_trylock(&endpoint->lock) == B_OK); 2492 if (!endpointLocker.IsLocked()) { 2493 // We failed to get the lock. Most likely it was destroyed 2494 // while we were waiting for it. 2495 return; 2496 } 2497 2498 // In the case of an Event Data TRB, the "transferred" field refers 2499 // to the actual number of bytes transferred across the whole TD. 2500 // (XHCI 1.2 § 6.4.2.1 Table 6-38 p478.) 2501 const uint8 completionCode = TRB_2_COMP_CODE_GET(trb->status); 2502 int32 transferred = TRB_2_REM_GET(trb->status), remainder = -1; 2503 2504 TRACE("HandleTransferComplete: ed %" B_PRIu32 ", code %" B_PRIu8 ", transferred %" B_PRId32 "\n", 2505 (flags & TRB_3_EVENT_DATA_BIT), completionCode, transferred); 2506 2507 if ((flags & TRB_3_EVENT_DATA_BIT) == 0) { 2508 // This should only occur under error conditions. 2509 TRACE("got an interrupt for a non-Event Data TRB!\n"); 2510 remainder = transferred; 2511 transferred = -1; 2512 } 2513 2514 if (completionCode != COMP_SUCCESS && completionCode != COMP_SHORT_PACKET 2515 && completionCode != COMP_STOPPED) { 2516 TRACE_ALWAYS("transfer error on slot %" B_PRId8 " endpoint %" B_PRId8 2517 ": %s\n", slot, endpointNumber, xhci_error_string(completionCode)); 2518 } 2519 2520 const phys_addr_t source = B_LENDIAN_TO_HOST_INT64(trb->address); 2521 for (xhci_td *td = endpoint->td_head; td != NULL; td = td->next) { 2522 int64 offset = (source - td->trb_addr) / sizeof(xhci_trb); 2523 if (offset < 0 || offset >= td->trb_count) 2524 continue; 2525 2526 TRACE("HandleTransferComplete td %p trb %" B_PRId64 " found\n", 2527 td, offset); 2528 2529 // The TRB at offset trb_used will be the link TRB, which we do not 2530 // care about (and should not generate an interrupt at all.) We really 2531 // care about the properly last TRB, at index "count - 1", which the 2532 // Event Data TRB that _LinkDescriptorForPipe creates points to. 2533 // 2534 // But if we have an unsuccessful completion code, the transfer 2535 // likely failed midway; so just accept it anyway. 2536 if (offset == (td->trb_used - 1) || completionCode != COMP_SUCCESS) { 2537 _UnlinkDescriptorForPipe(td, endpoint); 2538 endpointLocker.Unlock(); 2539 2540 td->trb_completion_code = completionCode; 2541 td->td_transferred = transferred; 2542 td->trb_left = remainder; 2543 2544 // add descriptor to finished list 2545 if (mutex_trylock(&fFinishedLock) != B_OK) 2546 mutex_lock(&fFinishedLock); 2547 td->next = fFinishedHead; 2548 fFinishedHead = td; 2549 mutex_unlock(&fFinishedLock); 2550 2551 release_sem_etc(fFinishTransfersSem, 1, B_DO_NOT_RESCHEDULE); 2552 TRACE("HandleTransferComplete td %p done\n", td); 2553 } else { 2554 TRACE_ERROR("successful TRB 0x%" B_PRIxPHYSADDR " was found, but it wasn't " 2555 "the last in the TD!\n", source); 2556 } 2557 return; 2558 } 2559 TRACE_ERROR("TRB 0x%" B_PRIxPHYSADDR " was not found in the endpoint!\n", source); 2560 } 2561 2562 2563 void 2564 XHCI::DumpRing(xhci_trb *trbs, uint32 size) 2565 { 2566 if (!Lock()) { 2567 TRACE("Unable to get lock!\n"); 2568 return; 2569 } 2570 2571 for (uint32 i = 0; i < size; i++) { 2572 TRACE("command[%" B_PRId32 "] = %" B_PRId32 " (0x%016" B_PRIx64 "," 2573 " 0x%08" B_PRIx32 ", 0x%08" B_PRIx32 ")\n", i, 2574 TRB_3_TYPE_GET(B_LENDIAN_TO_HOST_INT32(trbs[i].flags)), 2575 trbs[i].address, trbs[i].status, trbs[i].flags); 2576 } 2577 2578 Unlock(); 2579 } 2580 2581 2582 status_t 2583 XHCI::DoCommand(xhci_trb* trb) 2584 { 2585 if (!Lock()) { 2586 TRACE("Unable to get lock!\n"); 2587 return B_ERROR; 2588 } 2589 2590 QueueCommand(trb); 2591 Ring(0, 0); 2592 2593 // Begin with a 50ms timeout. 2594 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 50 * 1000) != B_OK) { 2595 // We've hit the timeout. In some error cases, interrupts are not 2596 // generated; so here we force the event ring to be polled once. 2597 release_sem(fEventSem); 2598 2599 // Now try again, this time with a 750ms timeout. 2600 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 2601 750 * 1000) != B_OK) { 2602 TRACE("Unable to obtain fCmdCompSem!\n"); 2603 fCmdAddr = 0; 2604 Unlock(); 2605 return B_TIMED_OUT; 2606 } 2607 } 2608 2609 // eat up sems that have been released by multiple interrupts 2610 int32 semCount = 0; 2611 get_sem_count(fCmdCompSem, &semCount); 2612 if (semCount > 0) 2613 acquire_sem_etc(fCmdCompSem, semCount, B_RELATIVE_TIMEOUT, 0); 2614 2615 status_t status = B_OK; 2616 uint32 completionCode = TRB_2_COMP_CODE_GET(fCmdResult[0]); 2617 TRACE("command complete\n"); 2618 if (completionCode != COMP_SUCCESS) { 2619 TRACE_ERROR("unsuccessful command %" B_PRId32 ", error %s (%" B_PRId32 ")\n", 2620 TRB_3_TYPE_GET(trb->flags), xhci_error_string(completionCode), 2621 completionCode); 2622 status = B_IO_ERROR; 2623 } 2624 2625 trb->status = fCmdResult[0]; 2626 trb->flags = fCmdResult[1]; 2627 2628 fCmdAddr = 0; 2629 Unlock(); 2630 return status; 2631 } 2632 2633 2634 status_t 2635 XHCI::Noop() 2636 { 2637 TRACE("Issue No-Op\n"); 2638 xhci_trb trb; 2639 trb.address = 0; 2640 trb.status = 0; 2641 trb.flags = TRB_3_TYPE(TRB_TYPE_CMD_NOOP); 2642 2643 return DoCommand(&trb); 2644 } 2645 2646 2647 status_t 2648 XHCI::EnableSlot(uint8* slot) 2649 { 2650 TRACE("Enable Slot\n"); 2651 xhci_trb trb; 2652 trb.address = 0; 2653 trb.status = 0; 2654 trb.flags = TRB_3_TYPE(TRB_TYPE_ENABLE_SLOT); 2655 2656 status_t status = DoCommand(&trb); 2657 if (status != B_OK) 2658 return status; 2659 2660 *slot = TRB_3_SLOT_GET(trb.flags); 2661 return *slot != 0 ? B_OK : B_BAD_VALUE; 2662 } 2663 2664 2665 status_t 2666 XHCI::DisableSlot(uint8 slot) 2667 { 2668 TRACE("Disable Slot\n"); 2669 xhci_trb trb; 2670 trb.address = 0; 2671 trb.status = 0; 2672 trb.flags = TRB_3_TYPE(TRB_TYPE_DISABLE_SLOT) | TRB_3_SLOT(slot); 2673 2674 return DoCommand(&trb); 2675 } 2676 2677 2678 status_t 2679 XHCI::SetAddress(uint64 inputContext, bool bsr, uint8 slot) 2680 { 2681 TRACE("Set Address\n"); 2682 xhci_trb trb; 2683 trb.address = inputContext; 2684 trb.status = 0; 2685 trb.flags = TRB_3_TYPE(TRB_TYPE_ADDRESS_DEVICE) | TRB_3_SLOT(slot); 2686 2687 if (bsr) 2688 trb.flags |= TRB_3_BSR_BIT; 2689 2690 return DoCommand(&trb); 2691 } 2692 2693 2694 status_t 2695 XHCI::ConfigureEndpoint(uint64 inputContext, bool deconfigure, uint8 slot) 2696 { 2697 TRACE("Configure Endpoint\n"); 2698 xhci_trb trb; 2699 trb.address = inputContext; 2700 trb.status = 0; 2701 trb.flags = TRB_3_TYPE(TRB_TYPE_CONFIGURE_ENDPOINT) | TRB_3_SLOT(slot); 2702 2703 if (deconfigure) 2704 trb.flags |= TRB_3_DCEP_BIT; 2705 2706 return DoCommand(&trb); 2707 } 2708 2709 2710 status_t 2711 XHCI::EvaluateContext(uint64 inputContext, uint8 slot) 2712 { 2713 TRACE("Evaluate Context\n"); 2714 xhci_trb trb; 2715 trb.address = inputContext; 2716 trb.status = 0; 2717 trb.flags = TRB_3_TYPE(TRB_TYPE_EVALUATE_CONTEXT) | TRB_3_SLOT(slot); 2718 2719 return DoCommand(&trb); 2720 } 2721 2722 2723 status_t 2724 XHCI::ResetEndpoint(bool preserve, xhci_endpoint* endpoint) 2725 { 2726 TRACE("Reset Endpoint\n"); 2727 2728 switch (_GetEndpointState(endpoint)) { 2729 case ENDPOINT_STATE_STOPPED: 2730 TRACE("Reset Endpoint: already stopped"); 2731 return B_OK; 2732 case ENDPOINT_STATE_HALTED: 2733 TRACE("Reset Endpoint: warning, weird state!"); 2734 default: 2735 break; 2736 } 2737 2738 xhci_trb trb; 2739 trb.address = 0; 2740 trb.status = 0; 2741 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_ENDPOINT) 2742 | TRB_3_SLOT(endpoint->device->slot) | TRB_3_ENDPOINT(endpoint->id + 1); 2743 if (preserve) 2744 trb.flags |= TRB_3_PRSV_BIT; 2745 2746 return DoCommand(&trb); 2747 } 2748 2749 2750 status_t 2751 XHCI::StopEndpoint(bool suspend, xhci_endpoint* endpoint) 2752 { 2753 TRACE("Stop Endpoint\n"); 2754 2755 switch (_GetEndpointState(endpoint)) { 2756 case ENDPOINT_STATE_HALTED: 2757 TRACE("Stop Endpoint: error, halted"); 2758 return B_DEV_STALLED; 2759 case ENDPOINT_STATE_STOPPED: 2760 TRACE("Stop Endpoint: already stopped"); 2761 return B_OK; 2762 default: 2763 break; 2764 } 2765 2766 xhci_trb trb; 2767 trb.address = 0; 2768 trb.status = 0; 2769 trb.flags = TRB_3_TYPE(TRB_TYPE_STOP_ENDPOINT) 2770 | TRB_3_SLOT(endpoint->device->slot) | TRB_3_ENDPOINT(endpoint->id + 1); 2771 if (suspend) 2772 trb.flags |= TRB_3_SUSPEND_ENDPOINT_BIT; 2773 2774 return DoCommand(&trb); 2775 } 2776 2777 2778 status_t 2779 XHCI::SetTRDequeue(uint64 dequeue, uint16 stream, uint8 endpoint, uint8 slot) 2780 { 2781 TRACE("Set TR Dequeue\n"); 2782 xhci_trb trb; 2783 trb.address = dequeue | ENDPOINT_2_DCS_BIT; 2784 // The DCS bit is copied from the address field as in ConfigureEndpoint. 2785 // (XHCI 1.2 § 4.6.10 p142.) 2786 trb.status = TRB_2_STREAM(stream); 2787 trb.flags = TRB_3_TYPE(TRB_TYPE_SET_TR_DEQUEUE) 2788 | TRB_3_SLOT(slot) | TRB_3_ENDPOINT(endpoint); 2789 2790 return DoCommand(&trb); 2791 } 2792 2793 2794 status_t 2795 XHCI::ResetDevice(uint8 slot) 2796 { 2797 TRACE("Reset Device\n"); 2798 xhci_trb trb; 2799 trb.address = 0; 2800 trb.status = 0; 2801 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_DEVICE) | TRB_3_SLOT(slot); 2802 2803 return DoCommand(&trb); 2804 } 2805 2806 2807 int32 2808 XHCI::EventThread(void* data) 2809 { 2810 ((XHCI *)data)->CompleteEvents(); 2811 return B_OK; 2812 } 2813 2814 2815 void 2816 XHCI::CompleteEvents() 2817 { 2818 while (!fStopThreads) { 2819 if (acquire_sem(fEventSem) < B_OK) 2820 continue; 2821 2822 // eat up sems that have been released by multiple interrupts 2823 int32 semCount = 0; 2824 get_sem_count(fEventSem, &semCount); 2825 if (semCount > 0) 2826 acquire_sem_etc(fEventSem, semCount, B_RELATIVE_TIMEOUT, 0); 2827 2828 ProcessEvents(); 2829 } 2830 } 2831 2832 2833 void 2834 XHCI::ProcessEvents() 2835 { 2836 // Use mutex_trylock first, in case we are in KDL. 2837 MutexLocker locker(fEventLock, mutex_trylock(&fEventLock) == B_OK); 2838 if (!locker.IsLocked()) { 2839 // We failed to get the lock. This really should not happen. 2840 TRACE_ERROR("failed to acquire event lock!\n"); 2841 return; 2842 } 2843 2844 uint16 i = fEventIdx; 2845 uint8 j = fEventCcs; 2846 uint8 t = 2; 2847 2848 while (1) { 2849 uint32 temp = B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags); 2850 uint8 event = TRB_3_TYPE_GET(temp); 2851 TRACE("event[%u] = %u (0x%016" B_PRIx64 " 0x%08" B_PRIx32 " 0x%08" 2852 B_PRIx32 ")\n", i, event, fEventRing[i].address, 2853 fEventRing[i].status, B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags)); 2854 uint8 k = (temp & TRB_3_CYCLE_BIT) ? 1 : 0; 2855 if (j != k) 2856 break; 2857 2858 switch (event) { 2859 case TRB_TYPE_COMMAND_COMPLETION: 2860 HandleCmdComplete(&fEventRing[i]); 2861 break; 2862 case TRB_TYPE_TRANSFER: 2863 HandleTransferComplete(&fEventRing[i]); 2864 break; 2865 case TRB_TYPE_PORT_STATUS_CHANGE: 2866 TRACE("port change detected\n"); 2867 break; 2868 default: 2869 TRACE_ERROR("Unhandled event = %u\n", event); 2870 break; 2871 } 2872 2873 i++; 2874 if (i == XHCI_MAX_EVENTS) { 2875 i = 0; 2876 j ^= 1; 2877 if (!--t) 2878 break; 2879 } 2880 } 2881 2882 fEventIdx = i; 2883 fEventCcs = j; 2884 2885 uint64 addr = fErst->rs_addr + i * sizeof(xhci_trb); 2886 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)addr | ERDP_BUSY); 2887 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(addr >> 32)); 2888 } 2889 2890 2891 int32 2892 XHCI::FinishThread(void* data) 2893 { 2894 ((XHCI *)data)->FinishTransfers(); 2895 return B_OK; 2896 } 2897 2898 2899 void 2900 XHCI::FinishTransfers() 2901 { 2902 while (!fStopThreads) { 2903 if (acquire_sem(fFinishTransfersSem) < B_OK) 2904 continue; 2905 2906 // eat up sems that have been released by multiple interrupts 2907 int32 semCount = 0; 2908 get_sem_count(fFinishTransfersSem, &semCount); 2909 if (semCount > 0) 2910 acquire_sem_etc(fFinishTransfersSem, semCount, B_RELATIVE_TIMEOUT, 0); 2911 2912 mutex_lock(&fFinishedLock); 2913 TRACE("finishing transfers\n"); 2914 while (fFinishedHead != NULL) { 2915 xhci_td* td = fFinishedHead; 2916 fFinishedHead = td->next; 2917 td->next = NULL; 2918 mutex_unlock(&fFinishedLock); 2919 2920 TRACE("finishing transfer td %p\n", td); 2921 2922 Transfer* transfer = td->transfer; 2923 if (transfer == NULL) { 2924 // No transfer? Quick way out. 2925 FreeDescriptor(td); 2926 mutex_lock(&fFinishedLock); 2927 continue; 2928 } 2929 2930 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 2931 2932 status_t callbackStatus = B_OK; 2933 const uint8 completionCode = td->trb_completion_code; 2934 switch (completionCode) { 2935 case COMP_SHORT_PACKET: 2936 case COMP_SUCCESS: 2937 callbackStatus = B_OK; 2938 break; 2939 case COMP_DATA_BUFFER: 2940 callbackStatus = directionIn ? B_DEV_DATA_OVERRUN 2941 : B_DEV_DATA_UNDERRUN; 2942 break; 2943 case COMP_BABBLE: 2944 callbackStatus = directionIn ? B_DEV_FIFO_OVERRUN 2945 : B_DEV_FIFO_UNDERRUN; 2946 break; 2947 case COMP_USB_TRANSACTION: 2948 callbackStatus = B_DEV_CRC_ERROR; 2949 break; 2950 case COMP_STALL: 2951 callbackStatus = B_DEV_STALLED; 2952 break; 2953 default: 2954 callbackStatus = B_DEV_STALLED; 2955 break; 2956 } 2957 2958 size_t actualLength = transfer->FragmentLength(); 2959 if (completionCode != COMP_SUCCESS) { 2960 actualLength = td->td_transferred; 2961 if (td->td_transferred == -1) 2962 actualLength = transfer->FragmentLength() - td->trb_left; 2963 TRACE("transfer not successful, actualLength=%" B_PRIuSIZE "\n", 2964 actualLength); 2965 } 2966 2967 usb_isochronous_data* isochronousData = transfer->IsochronousData(); 2968 if (isochronousData != NULL) { 2969 size_t packetSize = transfer->DataLength() 2970 / isochronousData->packet_count, 2971 left = actualLength; 2972 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 2973 size_t size = min_c(packetSize, left); 2974 isochronousData->packet_descriptors[i].actual_length = size; 2975 isochronousData->packet_descriptors[i].status = (size > 0) 2976 ? B_OK : B_DEV_FIFO_UNDERRUN; 2977 left -= size; 2978 } 2979 } 2980 2981 if (callbackStatus == B_OK && directionIn && actualLength > 0) { 2982 TRACE("copying in iov count %ld\n", transfer->VectorCount()); 2983 status_t status = transfer->PrepareKernelAccess(); 2984 if (status == B_OK) { 2985 ReadDescriptor(td, transfer->Vector(), 2986 transfer->VectorCount()); 2987 } else { 2988 callbackStatus = status; 2989 } 2990 } 2991 2992 FreeDescriptor(td); 2993 2994 // this transfer may still have data left 2995 bool finished = true; 2996 transfer->AdvanceByFragment(actualLength); 2997 if (completionCode == COMP_SUCCESS 2998 && transfer->FragmentLength() > 0) { 2999 TRACE("still %" B_PRIuSIZE " bytes left on transfer\n", 3000 transfer->FragmentLength()); 3001 callbackStatus = SubmitTransfer(transfer); 3002 finished = (callbackStatus != B_OK); 3003 } 3004 if (finished) { 3005 // The actualLength was already handled in AdvanceByFragment. 3006 transfer->Finished(callbackStatus, 0); 3007 delete transfer; 3008 } 3009 3010 mutex_lock(&fFinishedLock); 3011 } 3012 mutex_unlock(&fFinishedLock); 3013 } 3014 } 3015 3016 3017 inline void 3018 XHCI::WriteOpReg(uint32 reg, uint32 value) 3019 { 3020 *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg) = value; 3021 } 3022 3023 3024 inline uint32 3025 XHCI::ReadOpReg(uint32 reg) 3026 { 3027 return *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg); 3028 } 3029 3030 3031 inline status_t 3032 XHCI::WaitOpBits(uint32 reg, uint32 mask, uint32 expected) 3033 { 3034 int loops = 0; 3035 uint32 value = ReadOpReg(reg); 3036 while ((value & mask) != expected) { 3037 snooze(1000); 3038 value = ReadOpReg(reg); 3039 if (loops == 100) { 3040 TRACE("delay waiting on reg 0x%" B_PRIX32 " match 0x%" B_PRIX32 3041 " (0x%" B_PRIX32 ")\n", reg, expected, mask); 3042 } else if (loops > 250) { 3043 TRACE_ERROR("timeout waiting on reg 0x%" B_PRIX32 3044 " match 0x%" B_PRIX32 " (0x%" B_PRIX32 ")\n", reg, expected, 3045 mask); 3046 return B_ERROR; 3047 } 3048 loops++; 3049 } 3050 return B_OK; 3051 } 3052 3053 3054 inline uint32 3055 XHCI::ReadCapReg32(uint32 reg) 3056 { 3057 return *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg); 3058 } 3059 3060 3061 inline void 3062 XHCI::WriteCapReg32(uint32 reg, uint32 value) 3063 { 3064 *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg) = value; 3065 } 3066 3067 3068 inline uint32 3069 XHCI::ReadRunReg32(uint32 reg) 3070 { 3071 return *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg); 3072 } 3073 3074 3075 inline void 3076 XHCI::WriteRunReg32(uint32 reg, uint32 value) 3077 { 3078 *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg) = value; 3079 } 3080 3081 3082 inline uint32 3083 XHCI::ReadDoorReg32(uint32 reg) 3084 { 3085 return *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg); 3086 } 3087 3088 3089 inline void 3090 XHCI::WriteDoorReg32(uint32 reg, uint32 value) 3091 { 3092 *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg) = value; 3093 } 3094 3095 3096 inline addr_t 3097 XHCI::_OffsetContextAddr(addr_t p) 3098 { 3099 if (fContextSizeShift == 1) { 3100 // each structure is page aligned, each pointer is 32 bits aligned 3101 uint32 offset = p & ((B_PAGE_SIZE - 1) & ~31U); 3102 p += offset; 3103 } 3104 return p; 3105 } 3106 3107 inline uint32 3108 XHCI::_ReadContext(uint32* p) 3109 { 3110 p = (uint32*)_OffsetContextAddr((addr_t)p); 3111 return *p; 3112 } 3113 3114 3115 inline void 3116 XHCI::_WriteContext(uint32* p, uint32 value) 3117 { 3118 p = (uint32*)_OffsetContextAddr((addr_t)p); 3119 *p = value; 3120 } 3121 3122 3123 inline uint64 3124 XHCI::_ReadContext(uint64* p) 3125 { 3126 p = (uint64*)_OffsetContextAddr((addr_t)p); 3127 return *p; 3128 } 3129 3130 3131 inline void 3132 XHCI::_WriteContext(uint64* p, uint64 value) 3133 { 3134 p = (uint64*)_OffsetContextAddr((addr_t)p); 3135 *p = value; 3136 } 3137