1 /* 2 * Copyright 2011-2021, Haiku, Inc. All rights reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Augustin Cavalier <waddlesplash> 7 * Jian Chiang <j.jian.chiang@gmail.com> 8 * Jérôme Duval <jerome.duval@gmail.com> 9 * Akshay Jaggi <akshay1994.leo@gmail.com> 10 * Michael Lotz <mmlr@mlotz.ch> 11 * Alexander von Gluck <kallisti5@unixzen.com> 12 */ 13 14 15 #include <module.h> 16 #include <PCI.h> 17 #include <PCI_x86.h> 18 #include <USB3.h> 19 #include <KernelExport.h> 20 21 #include <ByteOrder.h> 22 #include <util/AutoLock.h> 23 24 #include "xhci.h" 25 26 #define USB_MODULE_NAME "xhci" 27 28 pci_module_info *XHCI::sPCIModule = NULL; 29 pci_x86_module_info *XHCI::sPCIx86Module = NULL; 30 31 32 static int32 33 xhci_std_ops(int32 op, ...) 34 { 35 switch (op) { 36 case B_MODULE_INIT: 37 TRACE_MODULE("xhci init module\n"); 38 return B_OK; 39 case B_MODULE_UNINIT: 40 TRACE_MODULE("xhci uninit module\n"); 41 return B_OK; 42 } 43 44 return EINVAL; 45 } 46 47 48 static const char* 49 xhci_error_string(uint32 error) 50 { 51 switch (error) { 52 case COMP_INVALID: return "Invalid"; 53 case COMP_SUCCESS: return "Success"; 54 case COMP_DATA_BUFFER: return "Data buffer"; 55 case COMP_BABBLE: return "Babble detected"; 56 case COMP_USB_TRANSACTION: return "USB transaction"; 57 case COMP_TRB: return "TRB"; 58 case COMP_STALL: return "Stall"; 59 case COMP_RESOURCE: return "Resource"; 60 case COMP_BANDWIDTH: return "Bandwidth"; 61 case COMP_NO_SLOTS: return "No slots"; 62 case COMP_INVALID_STREAM: return "Invalid stream"; 63 case COMP_SLOT_NOT_ENABLED: return "Slot not enabled"; 64 case COMP_ENDPOINT_NOT_ENABLED: return "Endpoint not enabled"; 65 case COMP_SHORT_PACKET: return "Short packet"; 66 case COMP_RING_UNDERRUN: return "Ring underrun"; 67 case COMP_RING_OVERRUN: return "Ring overrun"; 68 case COMP_VF_RING_FULL: return "VF Event Ring Full"; 69 case COMP_PARAMETER: return "Parameter"; 70 case COMP_BANDWIDTH_OVERRUN: return "Bandwidth overrun"; 71 case COMP_CONTEXT_STATE: return "Context state"; 72 case COMP_NO_PING_RESPONSE: return "No ping response"; 73 case COMP_EVENT_RING_FULL: return "Event ring full"; 74 case COMP_INCOMPATIBLE_DEVICE: return "Incompatible device"; 75 case COMP_MISSED_SERVICE: return "Missed service"; 76 case COMP_COMMAND_RING_STOPPED: return "Command ring stopped"; 77 case COMP_COMMAND_ABORTED: return "Command aborted"; 78 case COMP_STOPPED: return "Stopped"; 79 case COMP_LENGTH_INVALID: return "Length invalid"; 80 case COMP_MAX_EXIT_LATENCY: return "Max exit latency too large"; 81 case COMP_ISOC_OVERRUN: return "Isoch buffer overrun"; 82 case COMP_EVENT_LOST: return "Event lost"; 83 case COMP_UNDEFINED: return "Undefined"; 84 case COMP_INVALID_STREAM_ID: return "Invalid stream ID"; 85 case COMP_SECONDARY_BANDWIDTH: return "Secondary bandwidth"; 86 case COMP_SPLIT_TRANSACTION: return "Split transaction"; 87 88 default: return "Undefined"; 89 } 90 } 91 92 93 usb_host_controller_info xhci_module = { 94 { 95 "busses/usb/xhci", 96 0, 97 xhci_std_ops 98 }, 99 NULL, 100 XHCI::AddTo 101 }; 102 103 104 module_info *modules[] = { 105 (module_info *)&xhci_module, 106 NULL 107 }; 108 109 110 status_t 111 XHCI::AddTo(Stack *stack) 112 { 113 if (!sPCIModule) { 114 status_t status = get_module(B_PCI_MODULE_NAME, 115 (module_info **)&sPCIModule); 116 if (status < B_OK) { 117 TRACE_MODULE_ERROR("getting pci module failed! 0x%08" B_PRIx32 118 "\n", status); 119 return status; 120 } 121 } 122 123 TRACE_MODULE("searching devices\n"); 124 bool found = false; 125 pci_info *item = new(std::nothrow) pci_info; 126 if (item == NULL) { 127 sPCIModule = NULL; 128 put_module(B_PCI_MODULE_NAME); 129 return B_NO_MEMORY; 130 } 131 132 // Try to get the PCI x86 module as well so we can enable possible MSIs. 133 if (sPCIx86Module == NULL && get_module(B_PCI_X86_MODULE_NAME, 134 (module_info **)&sPCIx86Module) != B_OK) { 135 // If it isn't there, that's not critical though. 136 TRACE_MODULE_ERROR("failed to get pci x86 module\n"); 137 sPCIx86Module = NULL; 138 } 139 140 for (int32 i = 0; sPCIModule->get_nth_pci_info(i, item) >= B_OK; i++) { 141 if (item->class_base == PCI_serial_bus && item->class_sub == PCI_usb 142 && item->class_api == PCI_usb_xhci) { 143 TRACE_MODULE("found device at PCI:%d:%d:%d\n", 144 item->bus, item->device, item->function); 145 XHCI *bus = new(std::nothrow) XHCI(item, stack); 146 if (bus == NULL) { 147 delete item; 148 sPCIModule = NULL; 149 put_module(B_PCI_MODULE_NAME); 150 if (sPCIx86Module != NULL) 151 put_module(B_PCI_X86_MODULE_NAME); 152 return B_NO_MEMORY; 153 } 154 155 // The bus will put the PCI modules when it is destroyed, so get 156 // them again to increase their reference count. 157 get_module(B_PCI_MODULE_NAME, (module_info **)&sPCIModule); 158 if (sPCIx86Module != NULL) 159 get_module(B_PCI_X86_MODULE_NAME, (module_info **)&sPCIx86Module); 160 161 if (bus->InitCheck() < B_OK) { 162 TRACE_MODULE_ERROR("bus failed init check\n"); 163 delete bus; 164 continue; 165 } 166 167 // the bus took it away 168 item = new(std::nothrow) pci_info; 169 170 if (bus->Start() != B_OK) { 171 delete bus; 172 continue; 173 } 174 found = true; 175 } 176 } 177 178 // The modules will have been gotten again if we successfully 179 // initialized a bus, so we should put them here. 180 put_module(B_PCI_MODULE_NAME); 181 if (sPCIx86Module != NULL) 182 put_module(B_PCI_X86_MODULE_NAME); 183 184 if (!found) 185 TRACE_MODULE_ERROR("no devices found\n"); 186 delete item; 187 return found ? B_OK : ENODEV; 188 } 189 190 191 XHCI::XHCI(pci_info *info, Stack *stack) 192 : BusManager(stack), 193 fRegisterArea(-1), 194 fRegisters(NULL), 195 fPCIInfo(info), 196 fStack(stack), 197 fIRQ(0), 198 fUseMSI(false), 199 fErstArea(-1), 200 fDcbaArea(-1), 201 fCmdCompSem(-1), 202 fStopThreads(false), 203 fRootHub(NULL), 204 fPortCount(0), 205 fSlotCount(0), 206 fScratchpadCount(0), 207 fContextSizeShift(0), 208 fFinishedHead(NULL), 209 fFinishTransfersSem(-1), 210 fFinishThread(-1), 211 fEventSem(-1), 212 fEventThread(-1), 213 fEventIdx(0), 214 fCmdIdx(0), 215 fEventCcs(1), 216 fCmdCcs(1) 217 { 218 B_INITIALIZE_SPINLOCK(&fSpinlock); 219 mutex_init(&fFinishedLock, "XHCI finished transfers"); 220 mutex_init(&fEventLock, "XHCI event handler"); 221 222 if (BusManager::InitCheck() < B_OK) { 223 TRACE_ERROR("bus manager failed to init\n"); 224 return; 225 } 226 227 TRACE("constructing new XHCI host controller driver\n"); 228 fInitOK = false; 229 230 // enable busmaster and memory mapped access 231 uint16 command = sPCIModule->read_pci_config(fPCIInfo->bus, 232 fPCIInfo->device, fPCIInfo->function, PCI_command, 2); 233 command &= ~(PCI_command_io | PCI_command_int_disable); 234 command |= PCI_command_master | PCI_command_memory; 235 236 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 237 fPCIInfo->function, PCI_command, 2, command); 238 239 // map the registers (low + high for 64-bit when requested) 240 phys_addr_t physicalAddress = fPCIInfo->u.h0.base_registers[0]; 241 if ((fPCIInfo->u.h0.base_register_flags[0] & PCI_address_type) 242 == PCI_address_type_64) { 243 physicalAddress |= (uint64)fPCIInfo->u.h0.base_registers[1] << 32; 244 } 245 246 size_t mapSize = fPCIInfo->u.h0.base_register_sizes[0]; 247 248 TRACE("map registers %08" B_PRIxPHYSADDR ", size: %" B_PRIuSIZE "\n", 249 physicalAddress, mapSize); 250 251 fRegisterArea = map_physical_memory("XHCI memory mapped registers", 252 physicalAddress, mapSize, B_ANY_KERNEL_BLOCK_ADDRESS, 253 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 254 (void **)&fRegisters); 255 if (fRegisterArea < B_OK) { 256 TRACE_ERROR("failed to map register memory\n"); 257 return; 258 } 259 260 // determine the register offsets 261 fCapabilityRegisterOffset = 0; 262 fOperationalRegisterOffset = HCI_CAPLENGTH(ReadCapReg32(XHCI_HCI_CAPLENGTH)); 263 fRuntimeRegisterOffset = ReadCapReg32(XHCI_RTSOFF) & ~0x1F; 264 fDoorbellRegisterOffset = ReadCapReg32(XHCI_DBOFF) & ~0x3; 265 266 TRACE("mapped registers: %p\n", fRegisters); 267 TRACE("operational register offset: %" B_PRId32 "\n", fOperationalRegisterOffset); 268 TRACE("runtime register offset: %" B_PRId32 "\n", fRuntimeRegisterOffset); 269 TRACE("doorbell register offset: %" B_PRId32 "\n", fDoorbellRegisterOffset); 270 271 int32 interfaceVersion = HCI_VERSION(ReadCapReg32(XHCI_HCI_VERSION)); 272 if (interfaceVersion < 0x0090 || interfaceVersion > 0x0120) { 273 TRACE_ERROR("unsupported interface version: 0x%04" B_PRIx32 "\n", 274 interfaceVersion); 275 return; 276 } 277 TRACE_ALWAYS("interface version: 0x%04" B_PRIx32 "\n", interfaceVersion); 278 279 TRACE_ALWAYS("structural parameters: 1:0x%08" B_PRIx32 " 2:0x%08" 280 B_PRIx32 " 3:0x%08" B_PRIx32 "\n", ReadCapReg32(XHCI_HCSPARAMS1), 281 ReadCapReg32(XHCI_HCSPARAMS2), ReadCapReg32(XHCI_HCSPARAMS3)); 282 283 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 284 if (cparams == 0xffffffff) 285 return; 286 TRACE_ALWAYS("capability parameters: 0x%08" B_PRIx32 "\n", cparams); 287 288 // if 64 bytes context structures, then 1 289 fContextSizeShift = HCC_CSZ(cparams); 290 291 // Assume ownership of the controller from the BIOS. 292 uint32 eec = 0xffffffff; 293 uint32 eecp = HCS0_XECP(cparams) << 2; 294 for (; eecp != 0 && XECP_NEXT(eec); eecp += XECP_NEXT(eec) << 2) { 295 TRACE("eecp register: 0x%08" B_PRIx32 "\n", eecp); 296 297 eec = ReadCapReg32(eecp); 298 if (XECP_ID(eec) != XHCI_LEGSUP_CAPID) 299 continue; 300 301 if (eec & XHCI_LEGSUP_BIOSOWNED) { 302 TRACE_ALWAYS("the host controller is bios owned, claiming" 303 " ownership\n"); 304 WriteCapReg32(eecp, eec | XHCI_LEGSUP_OSOWNED); 305 306 for (int32 i = 0; i < 20; i++) { 307 eec = ReadCapReg32(eecp); 308 309 if ((eec & XHCI_LEGSUP_BIOSOWNED) == 0) 310 break; 311 312 TRACE_ALWAYS("controller is still bios owned, waiting\n"); 313 snooze(50000); 314 } 315 316 if (eec & XHCI_LEGSUP_BIOSOWNED) { 317 TRACE_ERROR("bios won't give up control over the host " 318 "controller (ignoring)\n"); 319 } else if (eec & XHCI_LEGSUP_OSOWNED) { 320 TRACE_ALWAYS("successfully took ownership of the host " 321 "controller\n"); 322 } 323 324 // Force off the BIOS owned flag, and clear all SMIs. Some BIOSes 325 // do indicate a successful handover but do not remove their SMIs 326 // and then freeze the system when interrupts are generated. 327 WriteCapReg32(eecp, eec & ~XHCI_LEGSUP_BIOSOWNED); 328 } 329 break; 330 } 331 uint32 legctlsts = ReadCapReg32(eecp + XHCI_LEGCTLSTS); 332 legctlsts &= XHCI_LEGCTLSTS_DISABLE_SMI; 333 legctlsts |= XHCI_LEGCTLSTS_EVENTS_SMI; 334 WriteCapReg32(eecp + XHCI_LEGCTLSTS, legctlsts); 335 336 // We need to explicitly take ownership of EHCI ports on earlier Intel chipsets. 337 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL) { 338 switch (fPCIInfo->device_id) { 339 case PCI_DEVICE_INTEL_PANTHER_POINT_XHCI: 340 case PCI_DEVICE_INTEL_LYNX_POINT_XHCI: 341 case PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI: 342 case PCI_DEVICE_INTEL_BAYTRAIL_XHCI: 343 case PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI: 344 case PCI_DEVICE_INTEL_WILDCAT_POINT_LP_XHCI: 345 _SwitchIntelPorts(); 346 break; 347 } 348 } 349 350 // halt the host controller 351 if (ControllerHalt() < B_OK) { 352 return; 353 } 354 355 // reset the host controller 356 if (ControllerReset() < B_OK) { 357 TRACE_ERROR("host controller failed to reset\n"); 358 return; 359 } 360 361 fCmdCompSem = create_sem(0, "XHCI Command Complete"); 362 fFinishTransfersSem = create_sem(0, "XHCI Finish Transfers"); 363 fEventSem = create_sem(0, "XHCI Event"); 364 if (fFinishTransfersSem < B_OK || fCmdCompSem < B_OK || fEventSem < B_OK) { 365 TRACE_ERROR("failed to create semaphores\n"); 366 return; 367 } 368 369 // create event handler thread 370 fEventThread = spawn_kernel_thread(EventThread, "xhci event thread", 371 B_URGENT_PRIORITY, (void *)this); 372 resume_thread(fEventThread); 373 374 // create finisher service thread 375 fFinishThread = spawn_kernel_thread(FinishThread, "xhci finish thread", 376 B_URGENT_PRIORITY - 1, (void *)this); 377 resume_thread(fFinishThread); 378 379 // Find the right interrupt vector, using MSIs if available. 380 fIRQ = fPCIInfo->u.h0.interrupt_line; 381 if (sPCIx86Module != NULL && sPCIx86Module->get_msi_count(fPCIInfo->bus, 382 fPCIInfo->device, fPCIInfo->function) >= 1) { 383 uint8 msiVector = 0; 384 if (sPCIx86Module->configure_msi(fPCIInfo->bus, fPCIInfo->device, 385 fPCIInfo->function, 1, &msiVector) == B_OK 386 && sPCIx86Module->enable_msi(fPCIInfo->bus, fPCIInfo->device, 387 fPCIInfo->function) == B_OK) { 388 TRACE_ALWAYS("using message signaled interrupts\n"); 389 fIRQ = msiVector; 390 fUseMSI = true; 391 } 392 } 393 394 if (fIRQ == 0 || fIRQ == 0xFF) { 395 TRACE_MODULE_ERROR("device PCI:%d:%d:%d was assigned an invalid IRQ\n", 396 fPCIInfo->bus, fPCIInfo->device, fPCIInfo->function); 397 return; 398 } 399 400 // Install the interrupt handler 401 TRACE("installing interrupt handler\n"); 402 install_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this, 0); 403 404 memset(fPortSpeeds, 0, sizeof(fPortSpeeds)); 405 memset(fDevices, 0, sizeof(fDevices)); 406 407 fInitOK = true; 408 TRACE("driver construction successful\n"); 409 } 410 411 412 XHCI::~XHCI() 413 { 414 TRACE("tear down XHCI host controller driver\n"); 415 416 WriteOpReg(XHCI_CMD, 0); 417 418 int32 result = 0; 419 fStopThreads = true; 420 delete_sem(fCmdCompSem); 421 delete_sem(fFinishTransfersSem); 422 delete_sem(fEventSem); 423 wait_for_thread(fFinishThread, &result); 424 wait_for_thread(fEventThread, &result); 425 426 mutex_destroy(&fFinishedLock); 427 mutex_destroy(&fEventLock); 428 429 remove_io_interrupt_handler(fIRQ, InterruptHandler, (void *)this); 430 431 delete_area(fRegisterArea); 432 delete_area(fErstArea); 433 for (uint32 i = 0; i < fScratchpadCount; i++) 434 delete_area(fScratchpadArea[i]); 435 delete_area(fDcbaArea); 436 437 if (fUseMSI && sPCIx86Module != NULL) { 438 sPCIx86Module->disable_msi(fPCIInfo->bus, 439 fPCIInfo->device, fPCIInfo->function); 440 sPCIx86Module->unconfigure_msi(fPCIInfo->bus, 441 fPCIInfo->device, fPCIInfo->function); 442 } 443 put_module(B_PCI_MODULE_NAME); 444 if (sPCIx86Module != NULL) 445 put_module(B_PCI_X86_MODULE_NAME); 446 } 447 448 449 void 450 XHCI::_SwitchIntelPorts() 451 { 452 TRACE("Looking for EHCI owned ports\n"); 453 uint32 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 454 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3PRM, 4); 455 TRACE("Superspeed Ports: 0x%" B_PRIx32 "\n", ports); 456 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 457 fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4, ports); 458 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 459 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB3_PSSEN, 4); 460 TRACE("Superspeed ports now under XHCI : 0x%" B_PRIx32 "\n", ports); 461 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 462 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_USB2PRM, 4); 463 TRACE("USB 2.0 Ports : 0x%" B_PRIx32 "\n", ports); 464 sPCIModule->write_pci_config(fPCIInfo->bus, fPCIInfo->device, 465 fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4, ports); 466 ports = sPCIModule->read_pci_config(fPCIInfo->bus, 467 fPCIInfo->device, fPCIInfo->function, XHCI_INTEL_XUSB2PR, 4); 468 TRACE("USB 2.0 ports now under XHCI: 0x%" B_PRIx32 "\n", ports); 469 } 470 471 472 status_t 473 XHCI::Start() 474 { 475 TRACE_ALWAYS("starting XHCI host controller\n"); 476 TRACE("usbcmd: 0x%08" B_PRIx32 "; usbsts: 0x%08" B_PRIx32 "\n", 477 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 478 479 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 480 TRACE("Start() failed STS_CNR\n"); 481 } 482 483 if ((ReadOpReg(XHCI_CMD) & CMD_RUN) != 0) { 484 TRACE_ERROR("Start() warning, starting running XHCI controller!\n"); 485 } 486 487 if ((ReadOpReg(XHCI_PAGESIZE) & (1 << 0)) == 0) { 488 TRACE_ERROR("controller does not support 4K page size\n"); 489 return B_ERROR; 490 } 491 492 // read port count from capability register 493 uint32 capabilities = ReadCapReg32(XHCI_HCSPARAMS1); 494 fPortCount = HCS_MAX_PORTS(capabilities); 495 if (fPortCount == 0) { 496 TRACE_ERROR("invalid number of ports: %u\n", fPortCount); 497 return B_ERROR; 498 } 499 500 fSlotCount = HCS_MAX_SLOTS(capabilities); 501 if (fSlotCount > XHCI_MAX_DEVICES) 502 fSlotCount = XHCI_MAX_DEVICES; 503 WriteOpReg(XHCI_CONFIG, fSlotCount); 504 505 // find out which protocol is used for each port 506 uint8 portFound = 0; 507 uint32 cparams = ReadCapReg32(XHCI_HCCPARAMS); 508 uint32 eec = 0xffffffff; 509 uint32 eecp = HCS0_XECP(cparams) << 2; 510 for (; eecp != 0 && XECP_NEXT(eec) && portFound < fPortCount; 511 eecp += XECP_NEXT(eec) << 2) { 512 eec = ReadCapReg32(eecp); 513 if (XECP_ID(eec) != XHCI_SUPPORTED_PROTOCOLS_CAPID) 514 continue; 515 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) > 3) 516 continue; 517 uint32 temp = ReadCapReg32(eecp + 8); 518 uint32 offset = XHCI_SUPPORTED_PROTOCOLS_1_OFFSET(temp); 519 uint32 count = XHCI_SUPPORTED_PROTOCOLS_1_COUNT(temp); 520 if (offset == 0 || count == 0) 521 continue; 522 offset--; 523 for (uint32 i = offset; i < offset + count; i++) { 524 if (XHCI_SUPPORTED_PROTOCOLS_0_MAJOR(eec) == 0x3) 525 fPortSpeeds[i] = USB_SPEED_SUPERSPEED; 526 else 527 fPortSpeeds[i] = USB_SPEED_HIGHSPEED; 528 529 TRACE("speed for port %" B_PRId32 " is %s\n", i, 530 fPortSpeeds[i] == USB_SPEED_SUPERSPEED ? "super" : "high"); 531 } 532 portFound += count; 533 } 534 535 uint32 params2 = ReadCapReg32(XHCI_HCSPARAMS2); 536 fScratchpadCount = HCS_MAX_SC_BUFFERS(params2); 537 if (fScratchpadCount > XHCI_MAX_SCRATCHPADS) { 538 TRACE_ERROR("invalid number of scratchpads: %" B_PRIu32 "\n", 539 fScratchpadCount); 540 return B_ERROR; 541 } 542 543 uint32 params3 = ReadCapReg32(XHCI_HCSPARAMS3); 544 fExitLatMax = HCS_U1_DEVICE_LATENCY(params3) 545 + HCS_U2_DEVICE_LATENCY(params3); 546 547 // clear interrupts & disable device notifications 548 WriteOpReg(XHCI_STS, ReadOpReg(XHCI_STS)); 549 WriteOpReg(XHCI_DNCTRL, 0); 550 551 // allocate Device Context Base Address array 552 phys_addr_t dmaAddress; 553 fDcbaArea = fStack->AllocateArea((void **)&fDcba, &dmaAddress, 554 sizeof(*fDcba), "DCBA Area"); 555 if (fDcbaArea < B_OK) { 556 TRACE_ERROR("unable to create the DCBA area\n"); 557 return B_ERROR; 558 } 559 memset(fDcba, 0, sizeof(*fDcba)); 560 memset(fScratchpadArea, 0, sizeof(fScratchpadArea)); 561 memset(fScratchpad, 0, sizeof(fScratchpad)); 562 563 // setting the first address to the scratchpad array address 564 fDcba->baseAddress[0] = dmaAddress 565 + offsetof(struct xhci_device_context_array, scratchpad); 566 567 // fill up the scratchpad array with scratchpad pages 568 for (uint32 i = 0; i < fScratchpadCount; i++) { 569 phys_addr_t scratchDmaAddress; 570 fScratchpadArea[i] = fStack->AllocateArea((void **)&fScratchpad[i], 571 &scratchDmaAddress, B_PAGE_SIZE, "Scratchpad Area"); 572 if (fScratchpadArea[i] < B_OK) { 573 TRACE_ERROR("unable to create the scratchpad area\n"); 574 return B_ERROR; 575 } 576 fDcba->scratchpad[i] = scratchDmaAddress; 577 } 578 579 TRACE("setting DCBAAP %" B_PRIxPHYSADDR "\n", dmaAddress); 580 WriteOpReg(XHCI_DCBAAP_LO, (uint32)dmaAddress); 581 WriteOpReg(XHCI_DCBAAP_HI, (uint32)(dmaAddress >> 32)); 582 583 // allocate Event Ring Segment Table 584 uint8 *addr; 585 fErstArea = fStack->AllocateArea((void **)&addr, &dmaAddress, 586 (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 587 + sizeof(xhci_erst_element), 588 "USB XHCI ERST CMD_RING and EVENT_RING Area"); 589 590 if (fErstArea < B_OK) { 591 TRACE_ERROR("unable to create the ERST AND RING area\n"); 592 delete_area(fDcbaArea); 593 return B_ERROR; 594 } 595 fErst = (xhci_erst_element *)addr; 596 memset(fErst, 0, (XHCI_MAX_COMMANDS + XHCI_MAX_EVENTS) * sizeof(xhci_trb) 597 + sizeof(xhci_erst_element)); 598 599 // fill with Event Ring Segment Base Address and Event Ring Segment Size 600 fErst->rs_addr = dmaAddress + sizeof(xhci_erst_element); 601 fErst->rs_size = XHCI_MAX_EVENTS; 602 fErst->rsvdz = 0; 603 604 addr += sizeof(xhci_erst_element); 605 fEventRing = (xhci_trb *)addr; 606 addr += XHCI_MAX_EVENTS * sizeof(xhci_trb); 607 fCmdRing = (xhci_trb *)addr; 608 609 TRACE("setting ERST size\n"); 610 WriteRunReg32(XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 611 612 TRACE("setting ERDP addr = 0x%" B_PRIx64 "\n", fErst->rs_addr); 613 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)fErst->rs_addr); 614 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(fErst->rs_addr >> 32)); 615 616 TRACE("setting ERST base addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 617 WriteRunReg32(XHCI_ERSTBA_LO(0), (uint32)dmaAddress); 618 WriteRunReg32(XHCI_ERSTBA_HI(0), (uint32)(dmaAddress >> 32)); 619 620 dmaAddress += sizeof(xhci_erst_element) + XHCI_MAX_EVENTS 621 * sizeof(xhci_trb); 622 623 // Make sure the Command Ring is stopped 624 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 625 TRACE_ALWAYS("Command Ring is running, send stop/cancel\n"); 626 WriteOpReg(XHCI_CRCR_LO, CRCR_CS); 627 WriteOpReg(XHCI_CRCR_HI, 0); 628 WriteOpReg(XHCI_CRCR_LO, CRCR_CA); 629 WriteOpReg(XHCI_CRCR_HI, 0); 630 snooze(1000); 631 if ((ReadOpReg(XHCI_CRCR_LO) & CRCR_CRR) != 0) { 632 TRACE_ERROR("Command Ring still running after stop/cancel\n"); 633 } 634 } 635 TRACE("setting CRCR addr = 0x%" B_PRIxPHYSADDR "\n", dmaAddress); 636 WriteOpReg(XHCI_CRCR_LO, (uint32)dmaAddress | CRCR_RCS); 637 WriteOpReg(XHCI_CRCR_HI, (uint32)(dmaAddress >> 32)); 638 // link trb 639 fCmdRing[XHCI_MAX_COMMANDS - 1].address = dmaAddress; 640 641 TRACE("setting interrupt rate\n"); 642 643 // Setting IMOD below 0x3F8 on Intel Lynx Point can cause IRQ lockups 644 if (fPCIInfo->vendor_id == PCI_VENDOR_INTEL 645 && (fPCIInfo->device_id == PCI_DEVICE_INTEL_PANTHER_POINT_XHCI 646 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_XHCI 647 || fPCIInfo->device_id == PCI_DEVICE_INTEL_LYNX_POINT_LP_XHCI 648 || fPCIInfo->device_id == PCI_DEVICE_INTEL_BAYTRAIL_XHCI 649 || fPCIInfo->device_id == PCI_DEVICE_INTEL_WILDCAT_POINT_XHCI)) { 650 WriteRunReg32(XHCI_IMOD(0), 0x000003f8); // 4000 irq/s 651 } else { 652 WriteRunReg32(XHCI_IMOD(0), 0x000001f4); // 8000 irq/s 653 } 654 655 TRACE("enabling interrupt\n"); 656 WriteRunReg32(XHCI_IMAN(0), ReadRunReg32(XHCI_IMAN(0)) | IMAN_INTR_ENA); 657 658 WriteOpReg(XHCI_CMD, CMD_RUN | CMD_INTE | CMD_HSEE); 659 660 // wait for start up state 661 if (WaitOpBits(XHCI_STS, STS_HCH, 0) != B_OK) { 662 TRACE_ERROR("HCH start up timeout\n"); 663 } 664 665 fRootHub = new(std::nothrow) XHCIRootHub(RootObject(), 1); 666 if (!fRootHub) { 667 TRACE_ERROR("no memory to allocate root hub\n"); 668 return B_NO_MEMORY; 669 } 670 671 if (fRootHub->InitCheck() < B_OK) { 672 TRACE_ERROR("root hub failed init check\n"); 673 return fRootHub->InitCheck(); 674 } 675 676 SetRootHub(fRootHub); 677 678 TRACE_ALWAYS("successfully started the controller\n"); 679 680 #ifdef TRACE_USB 681 TRACE("No-Op test...\n"); 682 Noop(); 683 #endif 684 685 return BusManager::Start(); 686 } 687 688 689 status_t 690 XHCI::SubmitTransfer(Transfer *transfer) 691 { 692 // short circuit the root hub 693 if (transfer->TransferPipe()->DeviceAddress() == 1) 694 return fRootHub->ProcessTransfer(this, transfer); 695 696 TRACE("SubmitTransfer(%p)\n", transfer); 697 Pipe *pipe = transfer->TransferPipe(); 698 if ((pipe->Type() & USB_OBJECT_CONTROL_PIPE) != 0) 699 return SubmitControlRequest(transfer); 700 return SubmitNormalRequest(transfer); 701 } 702 703 704 status_t 705 XHCI::SubmitControlRequest(Transfer *transfer) 706 { 707 Pipe *pipe = transfer->TransferPipe(); 708 usb_request_data *requestData = transfer->RequestData(); 709 bool directionIn = (requestData->RequestType & USB_REQTYPE_DEVICE_IN) != 0; 710 711 TRACE("SubmitControlRequest() length %d\n", requestData->Length); 712 713 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 714 if (endpoint == NULL) { 715 TRACE_ERROR("control pipe has no endpoint!\n"); 716 return B_BAD_VALUE; 717 } 718 if (endpoint->device == NULL) { 719 panic("endpoint is not initialized!"); 720 return B_NO_INIT; 721 } 722 723 status_t status = transfer->InitKernelAccess(); 724 if (status != B_OK) 725 return status; 726 727 xhci_td *descriptor = CreateDescriptor(3, 1, requestData->Length); 728 if (descriptor == NULL) 729 return B_NO_MEMORY; 730 descriptor->transfer = transfer; 731 732 // Setup Stage 733 uint8 index = 0; 734 memcpy(&descriptor->trbs[index].address, requestData, 735 sizeof(usb_request_data)); 736 descriptor->trbs[index].status = TRB_2_IRQ(0) | TRB_2_BYTES(8); 737 descriptor->trbs[index].flags 738 = TRB_3_TYPE(TRB_TYPE_SETUP_STAGE) | TRB_3_IDT_BIT | TRB_3_CYCLE_BIT; 739 if (requestData->Length > 0) { 740 descriptor->trbs[index].flags |= 741 directionIn ? TRB_3_TRT_IN : TRB_3_TRT_OUT; 742 } 743 744 index++; 745 746 // Data Stage (if any) 747 if (requestData->Length > 0) { 748 descriptor->trbs[index].address = descriptor->buffer_addrs[0]; 749 descriptor->trbs[index].status = TRB_2_IRQ(0) 750 | TRB_2_BYTES(requestData->Length) 751 | TRB_2_TD_SIZE(0); 752 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_DATA_STAGE) 753 | (directionIn ? TRB_3_DIR_IN : 0) 754 | TRB_3_CYCLE_BIT; 755 756 if (!directionIn) { 757 transfer->PrepareKernelAccess(); 758 memcpy(descriptor->buffers[0], 759 (uint8 *)transfer->Vector()[0].iov_base, requestData->Length); 760 } 761 762 index++; 763 } 764 765 // Status Stage 766 descriptor->trbs[index].address = 0; 767 descriptor->trbs[index].status = TRB_2_IRQ(0); 768 descriptor->trbs[index].flags = TRB_3_TYPE(TRB_TYPE_STATUS_STAGE) 769 | ((directionIn && requestData->Length > 0) ? 0 : TRB_3_DIR_IN) 770 | TRB_3_CHAIN_BIT | TRB_3_ENT_BIT | TRB_3_CYCLE_BIT; 771 // Status Stage is an OUT transfer when the device is sending data 772 // (XHCI 1.2 § 4.11.2.2 Table 4-7 p213), and the CHAIN bit must be 773 // set when using an Event Data TRB (as _LinkDescriptorForPipe does) 774 // (XHCI 1.2 § 6.4.1.2.3 Table 6-31 p472) 775 776 descriptor->trb_used = index + 1; 777 778 status = _LinkDescriptorForPipe(descriptor, endpoint); 779 if (status != B_OK) { 780 FreeDescriptor(descriptor); 781 return status; 782 } 783 784 return B_OK; 785 } 786 787 788 status_t 789 XHCI::SubmitNormalRequest(Transfer *transfer) 790 { 791 TRACE("SubmitNormalRequest() length %" B_PRIuSIZE "\n", transfer->FragmentLength()); 792 793 Pipe *pipe = transfer->TransferPipe(); 794 usb_isochronous_data *isochronousData = transfer->IsochronousData(); 795 bool directionIn = (pipe->Direction() == Pipe::In); 796 797 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 798 if (endpoint == NULL) { 799 TRACE_ERROR("pipe has no endpoint!\n"); 800 return B_BAD_VALUE; 801 } 802 if (endpoint->device == NULL) { 803 panic("endpoint is not initialized!"); 804 return B_NO_INIT; 805 } 806 807 status_t status = transfer->InitKernelAccess(); 808 if (status != B_OK) 809 return status; 810 811 // TRBs within a TD must be "grouped" into TD Fragments, which mostly means 812 // that a max_burst_payload boundary cannot be crossed within a TRB, but 813 // only between TRBs. More than one TRB can be in a TD Fragment, but we keep 814 // things simple by setting trbSize to the MBP. (XHCI 1.2 § 4.11.7.1 p235.) 815 size_t trbSize = endpoint->max_burst_payload; 816 817 if (isochronousData != NULL) { 818 if (isochronousData->packet_count == 0) 819 return B_BAD_VALUE; 820 821 // Isochronous transfers use more specifically sized packets. 822 trbSize = transfer->DataLength() / isochronousData->packet_count; 823 if (trbSize == 0 || trbSize > pipe->MaxPacketSize() || trbSize 824 != (size_t)isochronousData->packet_descriptors[0].request_length) 825 return B_BAD_VALUE; 826 } 827 828 // Now that we know trbSize, compute the count. 829 const int32 trbCount = (transfer->FragmentLength() + trbSize - 1) / trbSize; 830 831 xhci_td *td = CreateDescriptor(trbCount, trbCount, trbSize); 832 if (td == NULL) 833 return B_NO_MEMORY; 834 835 // Normal Stage 836 const size_t maxPacketSize = pipe->MaxPacketSize(); 837 size_t remaining = transfer->FragmentLength(); 838 for (int32 i = 0; i < trbCount; i++) { 839 int32 trbLength = (remaining < trbSize) ? remaining : trbSize; 840 remaining -= trbLength; 841 842 // The "TD Size" field of a transfer TRB indicates the number of 843 // remaining maximum-size *packets* in this TD, *not* including the 844 // packets in the current TRB, and capped at 31 if there are more 845 // than 31 packets remaining in the TD. (XHCI 1.2 § 4.11.2.4 p218.) 846 int32 tdSize = (remaining + maxPacketSize - 1) / maxPacketSize; 847 if (tdSize > 31) 848 tdSize = 31; 849 850 td->trbs[i].address = td->buffer_addrs[i]; 851 td->trbs[i].status = TRB_2_IRQ(0) 852 | TRB_2_BYTES(trbLength) 853 | TRB_2_TD_SIZE(tdSize); 854 td->trbs[i].flags = TRB_3_TYPE(TRB_TYPE_NORMAL) 855 | TRB_3_CYCLE_BIT | TRB_3_CHAIN_BIT; 856 857 td->trb_used++; 858 } 859 860 // Isochronous-specific 861 if (isochronousData != NULL) { 862 // This is an isochronous transfer; we need to make the first TRB 863 // an isochronous TRB. 864 td->trbs[0].flags &= ~(TRB_3_TYPE(TRB_TYPE_NORMAL)); 865 td->trbs[0].flags |= TRB_3_TYPE(TRB_TYPE_ISOCH); 866 867 // Isochronous pipes are scheduled by microframes, one of which 868 // is 125us for USB 2 and above. But for USB 1 it was 1ms, so 869 // we need to use a different frame delta for that case. 870 uint8 frameDelta = 1; 871 if (transfer->TransferPipe()->Speed() == USB_SPEED_FULLSPEED) 872 frameDelta = 8; 873 874 // TODO: We do not currently take Mult into account at all! 875 // How are we supposed to do that here? 876 877 // Determine the (starting) frame number: if ISO_ASAP is set, 878 // we are queueing this "right away", and so want to reset 879 // the starting_frame_number. Otherwise we use the passed one. 880 uint32 frame; 881 if ((isochronousData->flags & USB_ISO_ASAP) != 0 882 || isochronousData->starting_frame_number == NULL) { 883 // All reads from the microframe index register must be 884 // incremented by 1. (XHCI 1.2 § 4.14.2.1.4 p265.) 885 frame = ReadRunReg32(XHCI_MFINDEX) + 1; 886 td->trbs[0].flags |= TRB_3_ISO_SIA_BIT; 887 } else { 888 frame = *isochronousData->starting_frame_number; 889 td->trbs[0].flags |= TRB_3_FRID(frame); 890 } 891 frame = (frame + frameDelta) % 2048; 892 if (isochronousData->starting_frame_number != NULL) 893 *isochronousData->starting_frame_number = frame; 894 895 // TODO: The OHCI bus driver seems to also do this for inbound 896 // isochronous transfers. Perhaps it should be moved into the stack? 897 if (directionIn) { 898 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 899 isochronousData->packet_descriptors[i].actual_length = 0; 900 isochronousData->packet_descriptors[i].status = B_NO_INIT; 901 } 902 } 903 } 904 905 // Set the ENT (Evaluate Next TRB) bit, so that the HC will not switch 906 // contexts before evaluating the Link TRB that _LinkDescriptorForPipe 907 // will insert, as otherwise there would be a race between us freeing 908 // and unlinking the descriptor, and the controller evaluating the Link TRB 909 // and thus getting back onto the main ring and executing the Event Data 910 // TRB that generates the interrupt for this transfer. 911 // 912 // Note that we *do not* unset the CHAIN bit in this TRB, thus including 913 // the Link TRB in this TD formally, which is required when using the 914 // ENT bit. (XHCI 1.2 § 4.12.3 p250.) 915 td->trbs[td->trb_used - 1].flags |= TRB_3_ENT_BIT; 916 917 if (!directionIn) { 918 TRACE("copying out iov count %ld\n", transfer->VectorCount()); 919 status_t status = transfer->PrepareKernelAccess(); 920 if (status != B_OK) { 921 FreeDescriptor(td); 922 return status; 923 } 924 WriteDescriptor(td, transfer->Vector(), transfer->VectorCount()); 925 } 926 927 td->transfer = transfer; 928 status = _LinkDescriptorForPipe(td, endpoint); 929 if (status != B_OK) { 930 FreeDescriptor(td); 931 return status; 932 } 933 934 return B_OK; 935 } 936 937 938 status_t 939 XHCI::CancelQueuedTransfers(Pipe *pipe, bool force) 940 { 941 xhci_endpoint* endpoint = (xhci_endpoint*)pipe->ControllerCookie(); 942 if (endpoint == NULL || endpoint->trbs == NULL) { 943 // Someone's de-allocated this pipe or endpoint in the meantime. 944 // (Possibly AllocateDevice failed, and we were the temporary pipe.) 945 return B_NO_INIT; 946 } 947 948 #ifndef TRACE_USB 949 if (force) 950 #endif 951 { 952 TRACE_ALWAYS("cancel queued transfers (%" B_PRId8 ") for pipe %p (%d)\n", 953 endpoint->used, pipe, pipe->EndpointAddress()); 954 } 955 956 MutexLocker endpointLocker(endpoint->lock); 957 958 if (endpoint->td_head == NULL) { 959 // There aren't any currently pending transfers to cancel. 960 return B_OK; 961 } 962 963 // Calling the callbacks while holding the endpoint lock could potentially 964 // cause deadlocks, so we instead store them in a pointer array. We need 965 // to do this separately from freeing the TDs, for in the case we fail 966 // to stop the endpoint, we cancel the transfers but do not free the TDs. 967 Transfer* transfers[XHCI_MAX_TRANSFERS]; 968 int32 transfersCount = 0; 969 970 for (xhci_td* td = endpoint->td_head; td != NULL; td = td->next) { 971 if (td->transfer == NULL) 972 continue; 973 974 // We can't cancel or delete transfers under "force", as they probably 975 // are not safe to use anymore. 976 if (!force) { 977 transfers[transfersCount] = td->transfer; 978 transfersCount++; 979 } 980 td->transfer = NULL; 981 } 982 983 // It is possible that while waiting for the stop-endpoint command to 984 // complete, one of the queued transfers posts a completion event, so in 985 // order to avoid a deadlock, we must unlock the endpoint. 986 endpointLocker.Unlock(); 987 status_t status = StopEndpoint(false, endpoint); 988 if (status == B_DEV_STALLED) { 989 // Only exit from a Halted state is a reset. (XHCI 1.2 § 4.8.3 p163.) 990 TRACE_ERROR("cancel queued transfers: halted endpoint, reset!\n"); 991 status = ResetEndpoint(false, endpoint); 992 } 993 endpointLocker.Lock(); 994 995 // Detach the head TD from the endpoint. 996 xhci_td* td_head = endpoint->td_head; 997 endpoint->td_head = NULL; 998 999 if (status == B_OK) { 1000 // Clear the endpoint's TRBs. 1001 memset(endpoint->trbs, 0, sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 1002 endpoint->used = 0; 1003 endpoint->current = 0; 1004 1005 // Set dequeue pointer location to the beginning of the ring. 1006 SetTRDequeue(endpoint->trb_addr, 0, endpoint->id + 1, 1007 endpoint->device->slot); 1008 1009 // We don't need to do anything else to restart the ring, as it will resume 1010 // operation as normal upon the next doorbell. (XHCI 1.2 § 4.6.9 p136.) 1011 } else { 1012 // We couldn't stop the endpoint. Most likely the device has been 1013 // removed and the endpoint was stopped by the hardware, or is 1014 // for some reason busy and cannot be stopped. 1015 TRACE_ERROR("cancel queued transfers: could not stop endpoint: %s!\n", 1016 strerror(status)); 1017 1018 // Instead of freeing the TDs, we want to leave them in the endpoint 1019 // so that when/if the hardware returns, they can be properly unlinked, 1020 // as otherwise the endpoint could get "stuck" by having the "used" 1021 // slowly accumulate due to "dead" transfers. 1022 endpoint->td_head = td_head; 1023 td_head = NULL; 1024 } 1025 1026 endpointLocker.Unlock(); 1027 1028 for (int32 i = 0; i < transfersCount; i++) { 1029 transfers[i]->Finished(B_CANCELED, 0); 1030 delete transfers[i]; 1031 } 1032 1033 // This loop looks a bit strange because we need to store the "next" 1034 // pointer before freeing the descriptor. 1035 xhci_td* td; 1036 while ((td = td_head) != NULL) { 1037 td_head = td_head->next; 1038 FreeDescriptor(td); 1039 } 1040 1041 return B_OK; 1042 } 1043 1044 1045 status_t 1046 XHCI::StartDebugTransfer(Transfer *transfer) 1047 { 1048 Pipe *pipe = transfer->TransferPipe(); 1049 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1050 if (endpoint == NULL) 1051 return B_BAD_VALUE; 1052 1053 // Check all locks that we are going to hit when running transfers. 1054 if (mutex_trylock(&endpoint->lock) != B_OK) 1055 return B_WOULD_BLOCK; 1056 if (mutex_trylock(&fFinishedLock) != B_OK) { 1057 mutex_unlock(&endpoint->lock); 1058 return B_WOULD_BLOCK; 1059 } 1060 if (mutex_trylock(&fEventLock) != B_OK) { 1061 mutex_unlock(&endpoint->lock); 1062 mutex_unlock(&fFinishedLock); 1063 return B_WOULD_BLOCK; 1064 } 1065 mutex_unlock(&endpoint->lock); 1066 mutex_unlock(&fFinishedLock); 1067 mutex_unlock(&fEventLock); 1068 1069 status_t status = SubmitTransfer(transfer); 1070 if (status != B_OK) 1071 return status; 1072 1073 // The endpoint's head TD is the TD of the just-submitted transfer. 1074 // Just like EHCI, abuse the callback cookie to hold the TD pointer. 1075 transfer->SetCallback(NULL, endpoint->td_head); 1076 1077 return B_OK; 1078 } 1079 1080 1081 status_t 1082 XHCI::CheckDebugTransfer(Transfer *transfer) 1083 { 1084 xhci_td *transfer_td = (xhci_td *)transfer->CallbackCookie(); 1085 if (transfer_td == NULL) 1086 return B_NO_INIT; 1087 1088 // Process events once, and then look for it in the finished list. 1089 ProcessEvents(); 1090 xhci_td *previous = NULL; 1091 for (xhci_td *td = fFinishedHead; td != NULL; td = td->next) { 1092 if (td != transfer_td) { 1093 previous = td; 1094 continue; 1095 } 1096 1097 // We've found it! 1098 if (previous == NULL) { 1099 fFinishedHead = fFinishedHead->next; 1100 } else { 1101 previous->next = td->next; 1102 } 1103 1104 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 1105 status_t status = (td->trb_completion_code == COMP_SUCCESS 1106 || td->trb_completion_code == COMP_SHORT_PACKET) ? B_OK : B_ERROR; 1107 1108 if (status == B_OK && directionIn) 1109 ReadDescriptor(td, transfer->Vector(), transfer->VectorCount()); 1110 1111 FreeDescriptor(td); 1112 transfer->SetCallback(NULL, NULL); 1113 return status; 1114 } 1115 1116 // We didn't find it. 1117 spin(75); 1118 return B_DEV_PENDING; 1119 } 1120 1121 1122 void 1123 XHCI::CancelDebugTransfer(Transfer *transfer) 1124 { 1125 while (CheckDebugTransfer(transfer) == B_DEV_PENDING) 1126 spin(100); 1127 } 1128 1129 1130 status_t 1131 XHCI::NotifyPipeChange(Pipe *pipe, usb_change change) 1132 { 1133 TRACE("pipe change %d for pipe %p (%d)\n", change, pipe, 1134 pipe->EndpointAddress()); 1135 1136 switch (change) { 1137 case USB_CHANGE_CREATED: 1138 return _InsertEndpointForPipe(pipe); 1139 case USB_CHANGE_DESTROYED: 1140 return _RemoveEndpointForPipe(pipe); 1141 1142 case USB_CHANGE_PIPE_POLICY_CHANGED: 1143 // We don't care about these, at least for now. 1144 return B_OK; 1145 } 1146 1147 TRACE_ERROR("unknown pipe change!\n"); 1148 return B_UNSUPPORTED; 1149 } 1150 1151 1152 xhci_td * 1153 XHCI::CreateDescriptor(uint32 trbCount, uint32 bufferCount, size_t bufferSize) 1154 { 1155 const bool inKDL = debug_debugger_running(); 1156 1157 xhci_td *result; 1158 if (!inKDL) { 1159 result = (xhci_td*)calloc(1, sizeof(xhci_td)); 1160 } else { 1161 // Just use the physical memory allocator while in KDL; it's less 1162 // secure than using the regular heap, but it's easier to deal with. 1163 phys_addr_t dummy; 1164 fStack->AllocateChunk((void **)&result, &dummy, sizeof(xhci_td)); 1165 } 1166 1167 if (result == NULL) { 1168 TRACE_ERROR("failed to allocate a transfer descriptor\n"); 1169 return NULL; 1170 } 1171 1172 // We always allocate 1 more TRB than requested, so that 1173 // _LinkDescriptorForPipe() has room to insert a link TRB. 1174 trbCount++; 1175 if (fStack->AllocateChunk((void **)&result->trbs, &result->trb_addr, 1176 (trbCount * sizeof(xhci_trb))) < B_OK) { 1177 TRACE_ERROR("failed to allocate TRBs\n"); 1178 FreeDescriptor(result); 1179 return NULL; 1180 } 1181 result->trb_count = trbCount; 1182 result->trb_used = 0; 1183 1184 if (bufferSize > 0) { 1185 // Due to how the USB stack allocates physical memory, we can't just 1186 // request one large chunk the size of the transfer, and so instead we 1187 // create a series of buffers as requested by our caller. 1188 1189 // We store the buffer pointers and addresses in one memory block. 1190 if (!inKDL) { 1191 result->buffers = (void**)calloc(bufferCount, 1192 (sizeof(void*) + sizeof(phys_addr_t))); 1193 } else { 1194 phys_addr_t dummy; 1195 fStack->AllocateChunk((void **)&result->buffers, &dummy, 1196 bufferCount * (sizeof(void*) + sizeof(phys_addr_t))); 1197 } 1198 if (result->buffers == NULL) { 1199 TRACE_ERROR("unable to allocate space for buffer infos\n"); 1200 FreeDescriptor(result); 1201 return NULL; 1202 } 1203 result->buffer_addrs = (phys_addr_t*)&result->buffers[bufferCount]; 1204 result->buffer_size = bufferSize; 1205 result->buffer_count = bufferCount; 1206 1207 // Optimization: If the requested total size of all buffers is less 1208 // than 32*B_PAGE_SIZE (the maximum size that the physical memory 1209 // allocator can handle), we allocate only one buffer and segment it. 1210 size_t totalSize = bufferSize * bufferCount; 1211 if (totalSize < (32 * B_PAGE_SIZE)) { 1212 if (fStack->AllocateChunk(&result->buffers[0], 1213 &result->buffer_addrs[0], totalSize) < B_OK) { 1214 TRACE_ERROR("unable to allocate space for large buffer (size %ld)\n", 1215 totalSize); 1216 FreeDescriptor(result); 1217 return NULL; 1218 } 1219 for (uint32 i = 1; i < bufferCount; i++) { 1220 result->buffers[i] = (void*)((addr_t)(result->buffers[i - 1]) 1221 + bufferSize); 1222 result->buffer_addrs[i] = result->buffer_addrs[i - 1] 1223 + bufferSize; 1224 } 1225 } else { 1226 // Otherwise, we allocate each buffer individually. 1227 for (uint32 i = 0; i < bufferCount; i++) { 1228 if (fStack->AllocateChunk(&result->buffers[i], 1229 &result->buffer_addrs[i], bufferSize) < B_OK) { 1230 TRACE_ERROR("unable to allocate space for a buffer (size " 1231 "%" B_PRIuSIZE ", count %" B_PRIu32 ")\n", 1232 bufferSize, bufferCount); 1233 FreeDescriptor(result); 1234 return NULL; 1235 } 1236 } 1237 } 1238 } else { 1239 result->buffers = NULL; 1240 result->buffer_addrs = NULL; 1241 } 1242 1243 // Initialize all other fields. 1244 result->transfer = NULL; 1245 result->trb_completion_code = 0; 1246 result->trb_left = 0; 1247 result->next = NULL; 1248 1249 TRACE("CreateDescriptor allocated %p, buffer_size %ld, buffer_count %" B_PRIu32 "\n", 1250 result, result->buffer_size, result->buffer_count); 1251 1252 return result; 1253 } 1254 1255 1256 void 1257 XHCI::FreeDescriptor(xhci_td *descriptor) 1258 { 1259 if (descriptor == NULL) 1260 return; 1261 1262 const bool inKDL = debug_debugger_running(); 1263 1264 if (descriptor->trbs != NULL) { 1265 fStack->FreeChunk(descriptor->trbs, descriptor->trb_addr, 1266 (descriptor->trb_count * sizeof(xhci_trb))); 1267 } 1268 if (descriptor->buffers != NULL) { 1269 size_t totalSize = descriptor->buffer_size * descriptor->buffer_count; 1270 if (totalSize < (32 * B_PAGE_SIZE)) { 1271 // This was allocated as one contiguous buffer. 1272 fStack->FreeChunk(descriptor->buffers[0], descriptor->buffer_addrs[0], 1273 totalSize); 1274 } else { 1275 for (uint32 i = 0; i < descriptor->buffer_count; i++) { 1276 if (descriptor->buffers[i] == NULL) 1277 continue; 1278 fStack->FreeChunk(descriptor->buffers[i], descriptor->buffer_addrs[i], 1279 descriptor->buffer_size); 1280 } 1281 } 1282 1283 if (!inKDL) { 1284 free(descriptor->buffers); 1285 } else { 1286 fStack->FreeChunk(descriptor->buffers, 0, 1287 descriptor->buffer_count * (sizeof(void*) + sizeof(phys_addr_t))); 1288 } 1289 } 1290 1291 if (!inKDL) 1292 free(descriptor); 1293 else 1294 fStack->FreeChunk(descriptor, 0, sizeof(xhci_td)); 1295 } 1296 1297 1298 size_t 1299 XHCI::WriteDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1300 { 1301 size_t written = 0; 1302 1303 size_t bufIdx = 0, bufUsed = 0; 1304 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1305 size_t length = vector[vecIdx].iov_len; 1306 1307 while (length > 0 && bufIdx < descriptor->buffer_count) { 1308 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1309 memcpy((uint8 *)descriptor->buffers[bufIdx] + bufUsed, 1310 (uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1311 toCopy); 1312 1313 written += toCopy; 1314 bufUsed += toCopy; 1315 length -= toCopy; 1316 if (bufUsed == descriptor->buffer_size) { 1317 bufIdx++; 1318 bufUsed = 0; 1319 } 1320 } 1321 } 1322 1323 TRACE("wrote descriptor (%" B_PRIuSIZE " bytes)\n", written); 1324 return written; 1325 } 1326 1327 1328 size_t 1329 XHCI::ReadDescriptor(xhci_td *descriptor, iovec *vector, size_t vectorCount) 1330 { 1331 size_t read = 0; 1332 1333 size_t bufIdx = 0, bufUsed = 0; 1334 for (size_t vecIdx = 0; vecIdx < vectorCount; vecIdx++) { 1335 size_t length = vector[vecIdx].iov_len; 1336 1337 while (length > 0 && bufIdx < descriptor->buffer_count) { 1338 size_t toCopy = min_c(length, descriptor->buffer_size - bufUsed); 1339 memcpy((uint8 *)vector[vecIdx].iov_base + (vector[vecIdx].iov_len - length), 1340 (uint8 *)descriptor->buffers[bufIdx] + bufUsed, toCopy); 1341 1342 read += toCopy; 1343 bufUsed += toCopy; 1344 length -= toCopy; 1345 if (bufUsed == descriptor->buffer_size) { 1346 bufIdx++; 1347 bufUsed = 0; 1348 } 1349 } 1350 } 1351 1352 TRACE("read descriptor (%" B_PRIuSIZE " bytes)\n", read); 1353 return read; 1354 } 1355 1356 1357 Device * 1358 XHCI::AllocateDevice(Hub *parent, int8 hubAddress, uint8 hubPort, 1359 usb_speed speed) 1360 { 1361 TRACE("AllocateDevice hubAddress %d hubPort %d speed %d\n", hubAddress, 1362 hubPort, speed); 1363 1364 uint8 slot = XHCI_MAX_SLOTS; 1365 status_t status = EnableSlot(&slot); 1366 if (status != B_OK) { 1367 TRACE_ERROR("failed to enable slot: %s\n", strerror(status)); 1368 return NULL; 1369 } 1370 1371 if (slot == 0 || slot > fSlotCount) { 1372 TRACE_ERROR("AllocateDevice: bad slot\n"); 1373 return NULL; 1374 } 1375 1376 if (fDevices[slot].slot != 0) { 1377 TRACE_ERROR("AllocateDevice: slot already used\n"); 1378 return NULL; 1379 } 1380 1381 struct xhci_device *device = &fDevices[slot]; 1382 device->slot = slot; 1383 1384 device->input_ctx_area = fStack->AllocateArea((void **)&device->input_ctx, 1385 &device->input_ctx_addr, sizeof(*device->input_ctx) << fContextSizeShift, 1386 "XHCI input context"); 1387 if (device->input_ctx_area < B_OK) { 1388 TRACE_ERROR("unable to create a input context area\n"); 1389 CleanupDevice(device); 1390 return NULL; 1391 } 1392 if (fContextSizeShift == 1) { 1393 // 64-byte contexts have to be page-aligned in order for 1394 // _OffsetContextAddr to function properly. 1395 ASSERT((((addr_t)device->input_ctx) % B_PAGE_SIZE) == 0); 1396 } 1397 1398 memset(device->input_ctx, 0, sizeof(*device->input_ctx) << fContextSizeShift); 1399 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1400 _WriteContext(&device->input_ctx->input.addFlags, 3); 1401 1402 uint8 rhPort = hubPort; 1403 uint32 route = 0; 1404 for (Device *hubDevice = parent; hubDevice != RootObject(); 1405 hubDevice = (Device *)hubDevice->Parent()) { 1406 if (hubDevice->Parent() == RootObject()) 1407 break; 1408 1409 if (rhPort > 15) 1410 rhPort = 15; 1411 route = route << 4; 1412 route |= rhPort; 1413 1414 rhPort = hubDevice->HubPort(); 1415 } 1416 1417 uint32 dwslot0 = SLOT_0_NUM_ENTRIES(1) | SLOT_0_ROUTE(route); 1418 1419 // Get speed of port, only if device connected to root hub port 1420 // else we have to rely on value reported by the Hub Explore thread 1421 if (route == 0) { 1422 GetPortSpeed(hubPort - 1, &speed); 1423 TRACE("speed updated %d\n", speed); 1424 } 1425 1426 // add the speed 1427 switch (speed) { 1428 case USB_SPEED_LOWSPEED: 1429 dwslot0 |= SLOT_0_SPEED(2); 1430 break; 1431 case USB_SPEED_FULLSPEED: 1432 dwslot0 |= SLOT_0_SPEED(1); 1433 break; 1434 case USB_SPEED_HIGHSPEED: 1435 dwslot0 |= SLOT_0_SPEED(3); 1436 break; 1437 case USB_SPEED_SUPERSPEED: 1438 dwslot0 |= SLOT_0_SPEED(4); 1439 break; 1440 default: 1441 TRACE_ERROR("unknown usb speed\n"); 1442 break; 1443 } 1444 1445 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1446 // TODO enable power save 1447 _WriteContext(&device->input_ctx->slot.dwslot1, SLOT_1_RH_PORT(rhPort)); 1448 uint32 dwslot2 = SLOT_2_IRQ_TARGET(0); 1449 1450 // If LS/FS device connected to non-root HS device 1451 if (route != 0 && parent->Speed() == USB_SPEED_HIGHSPEED 1452 && (speed == USB_SPEED_LOWSPEED || speed == USB_SPEED_FULLSPEED)) { 1453 struct xhci_device *parenthub = (struct xhci_device *) 1454 parent->ControllerCookie(); 1455 dwslot2 |= SLOT_2_PORT_NUM(hubPort); 1456 dwslot2 |= SLOT_2_TT_HUB_SLOT(parenthub->slot); 1457 } 1458 1459 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1460 1461 _WriteContext(&device->input_ctx->slot.dwslot3, SLOT_3_SLOT_STATE(0) 1462 | SLOT_3_DEVICE_ADDRESS(0)); 1463 1464 TRACE("slot 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%08" B_PRIx32 1465 "\n", _ReadContext(&device->input_ctx->slot.dwslot0), 1466 _ReadContext(&device->input_ctx->slot.dwslot1), 1467 _ReadContext(&device->input_ctx->slot.dwslot2), 1468 _ReadContext(&device->input_ctx->slot.dwslot3)); 1469 1470 device->device_ctx_area = fStack->AllocateArea((void **)&device->device_ctx, 1471 &device->device_ctx_addr, sizeof(*device->device_ctx) << fContextSizeShift, 1472 "XHCI device context"); 1473 if (device->device_ctx_area < B_OK) { 1474 TRACE_ERROR("unable to create a device context area\n"); 1475 CleanupDevice(device); 1476 return NULL; 1477 } 1478 memset(device->device_ctx, 0, sizeof(*device->device_ctx) << fContextSizeShift); 1479 1480 device->trb_area = fStack->AllocateArea((void **)&device->trbs, 1481 &device->trb_addr, sizeof(xhci_trb) * (XHCI_MAX_ENDPOINTS - 1) 1482 * XHCI_ENDPOINT_RING_SIZE, "XHCI endpoint trbs"); 1483 if (device->trb_area < B_OK) { 1484 TRACE_ERROR("unable to create a device trbs area\n"); 1485 CleanupDevice(device); 1486 return NULL; 1487 } 1488 1489 // set up slot pointer to device context 1490 fDcba->baseAddress[slot] = device->device_ctx_addr; 1491 1492 size_t maxPacketSize; 1493 switch (speed) { 1494 case USB_SPEED_LOWSPEED: 1495 case USB_SPEED_FULLSPEED: 1496 maxPacketSize = 8; 1497 break; 1498 case USB_SPEED_HIGHSPEED: 1499 maxPacketSize = 64; 1500 break; 1501 default: 1502 maxPacketSize = 512; 1503 break; 1504 } 1505 1506 xhci_endpoint* endpoint0 = &device->endpoints[0]; 1507 mutex_init(&endpoint0->lock, "xhci endpoint lock"); 1508 endpoint0->device = device; 1509 endpoint0->id = 0; 1510 endpoint0->td_head = NULL; 1511 endpoint0->used = 0; 1512 endpoint0->current = 0; 1513 endpoint0->trbs = device->trbs; 1514 endpoint0->trb_addr = device->trb_addr; 1515 1516 // configure the Control endpoint 0 1517 if (ConfigureEndpoint(endpoint0, slot, 0, USB_OBJECT_CONTROL_PIPE, false, 1518 0, maxPacketSize, speed, 0, 0) != B_OK) { 1519 TRACE_ERROR("unable to configure default control endpoint\n"); 1520 CleanupDevice(device); 1521 return NULL; 1522 } 1523 1524 // device should get to addressed state (bsr = 0) 1525 status = SetAddress(device->input_ctx_addr, false, slot); 1526 if (status != B_OK) { 1527 TRACE_ERROR("unable to set address: %s\n", strerror(status)); 1528 CleanupDevice(device); 1529 return NULL; 1530 } 1531 1532 device->address = SLOT_3_DEVICE_ADDRESS_GET(_ReadContext( 1533 &device->device_ctx->slot.dwslot3)); 1534 1535 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", device->address, 1536 SLOT_3_SLOT_STATE_GET(_ReadContext( 1537 &device->device_ctx->slot.dwslot3))); 1538 TRACE("endpoint0 state 0x%08" B_PRIx32 "\n", 1539 ENDPOINT_0_STATE_GET(_ReadContext( 1540 &device->device_ctx->endpoints[0].dwendpoint0))); 1541 1542 // Wait a bit for the device to complete addressing 1543 snooze(USB_DELAY_SET_ADDRESS); 1544 1545 // Create a temporary pipe with the new address 1546 ControlPipe pipe(parent); 1547 pipe.SetControllerCookie(endpoint0); 1548 pipe.InitCommon(device->address + 1, 0, speed, Pipe::Default, maxPacketSize, 0, 1549 hubAddress, hubPort); 1550 1551 // Get the device descriptor 1552 // Just retrieve the first 8 bytes of the descriptor -> minimum supported 1553 // size of any device. It is enough because it includes the device type. 1554 1555 size_t actualLength = 0; 1556 usb_device_descriptor deviceDescriptor; 1557 1558 TRACE("getting the device descriptor\n"); 1559 status = pipe.SendRequest( 1560 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_STANDARD, // type 1561 USB_REQUEST_GET_DESCRIPTOR, // request 1562 USB_DESCRIPTOR_DEVICE << 8, // value 1563 0, // index 1564 8, // length 1565 (void *)&deviceDescriptor, // buffer 1566 8, // buffer length 1567 &actualLength); // actual length 1568 1569 if (actualLength != 8) { 1570 TRACE_ERROR("failed to get the device descriptor: %s\n", 1571 strerror(status)); 1572 CleanupDevice(device); 1573 return NULL; 1574 } 1575 1576 TRACE("device_class: %d device_subclass %d device_protocol %d\n", 1577 deviceDescriptor.device_class, deviceDescriptor.device_subclass, 1578 deviceDescriptor.device_protocol); 1579 1580 if (speed == USB_SPEED_FULLSPEED && deviceDescriptor.max_packet_size_0 != 8) { 1581 TRACE("Full speed device with different max packet size for Endpoint 0\n"); 1582 uint32 dwendpoint1 = _ReadContext( 1583 &device->input_ctx->endpoints[0].dwendpoint1); 1584 dwendpoint1 &= ~ENDPOINT_1_MAXPACKETSIZE(0xffff); 1585 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE( 1586 deviceDescriptor.max_packet_size_0); 1587 _WriteContext(&device->input_ctx->endpoints[0].dwendpoint1, 1588 dwendpoint1); 1589 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1590 _WriteContext(&device->input_ctx->input.addFlags, (1 << 1)); 1591 EvaluateContext(device->input_ctx_addr, device->slot); 1592 } 1593 1594 Device *deviceObject = NULL; 1595 if (deviceDescriptor.device_class == 0x09) { 1596 TRACE("creating new Hub\n"); 1597 TRACE("getting the hub descriptor\n"); 1598 size_t actualLength = 0; 1599 usb_hub_descriptor hubDescriptor; 1600 status = pipe.SendRequest( 1601 USB_REQTYPE_DEVICE_IN | USB_REQTYPE_CLASS, // type 1602 USB_REQUEST_GET_DESCRIPTOR, // request 1603 USB_DESCRIPTOR_HUB << 8, // value 1604 0, // index 1605 sizeof(usb_hub_descriptor), // length 1606 (void *)&hubDescriptor, // buffer 1607 sizeof(usb_hub_descriptor), // buffer length 1608 &actualLength); 1609 1610 if (actualLength != sizeof(usb_hub_descriptor)) { 1611 TRACE_ERROR("error while getting the hub descriptor: %s\n", 1612 strerror(status)); 1613 CleanupDevice(device); 1614 return NULL; 1615 } 1616 1617 uint32 dwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1618 dwslot0 |= SLOT_0_HUB_BIT; 1619 _WriteContext(&device->input_ctx->slot.dwslot0, dwslot0); 1620 uint32 dwslot1 = _ReadContext(&device->input_ctx->slot.dwslot1); 1621 dwslot1 |= SLOT_1_NUM_PORTS(hubDescriptor.num_ports); 1622 _WriteContext(&device->input_ctx->slot.dwslot1, dwslot1); 1623 if (speed == USB_SPEED_HIGHSPEED) { 1624 uint32 dwslot2 = _ReadContext(&device->input_ctx->slot.dwslot2); 1625 dwslot2 |= SLOT_2_TT_TIME(HUB_TTT_GET(hubDescriptor.characteristics)); 1626 _WriteContext(&device->input_ctx->slot.dwslot2, dwslot2); 1627 } 1628 1629 deviceObject = new(std::nothrow) Hub(parent, hubAddress, hubPort, 1630 deviceDescriptor, device->address + 1, speed, false, device); 1631 } else { 1632 TRACE("creating new device\n"); 1633 deviceObject = new(std::nothrow) Device(parent, hubAddress, hubPort, 1634 deviceDescriptor, device->address + 1, speed, false, device); 1635 } 1636 if (deviceObject == NULL || deviceObject->InitCheck() != B_OK) { 1637 if (deviceObject == NULL) { 1638 TRACE_ERROR("no memory to allocate device\n"); 1639 } else { 1640 TRACE_ERROR("device object failed to initialize\n"); 1641 } 1642 CleanupDevice(device); 1643 return NULL; 1644 } 1645 1646 // We don't want to disable the default endpoint, naturally, which would 1647 // otherwise happen when this Pipe object is destroyed. 1648 pipe.SetControllerCookie(NULL); 1649 1650 TRACE("AllocateDevice() port %d slot %d\n", hubPort, slot); 1651 return deviceObject; 1652 } 1653 1654 1655 void 1656 XHCI::FreeDevice(Device *usbDevice) 1657 { 1658 xhci_device* device = (xhci_device*)usbDevice->ControllerCookie(); 1659 TRACE("FreeDevice() slot %d\n", device->slot); 1660 1661 // Delete the device first, so it cleans up its pipes and tells us 1662 // what we need to destroy before we tear down our internal state. 1663 delete usbDevice; 1664 1665 CleanupDevice(device); 1666 } 1667 1668 1669 void 1670 XHCI::CleanupDevice(xhci_device *device) 1671 { 1672 if (device->slot != 0) { 1673 DisableSlot(device->slot); 1674 fDcba->baseAddress[device->slot] = 0; 1675 } 1676 1677 if (device->trb_addr != 0) 1678 delete_area(device->trb_area); 1679 if (device->input_ctx_addr != 0) 1680 delete_area(device->input_ctx_area); 1681 if (device->device_ctx_addr != 0) 1682 delete_area(device->device_ctx_area); 1683 1684 memset(device, 0, sizeof(xhci_device)); 1685 } 1686 1687 1688 uint8 1689 XHCI::_GetEndpointState(xhci_endpoint* endpoint) 1690 { 1691 struct xhci_device_ctx* device_ctx = endpoint->device->device_ctx; 1692 return ENDPOINT_0_STATE_GET( 1693 _ReadContext(&device_ctx->endpoints[endpoint->id].dwendpoint0)); 1694 } 1695 1696 1697 status_t 1698 XHCI::_InsertEndpointForPipe(Pipe *pipe) 1699 { 1700 TRACE("insert endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1701 1702 if (pipe->ControllerCookie() != NULL 1703 || pipe->Parent()->Type() != USB_OBJECT_DEVICE) { 1704 // default pipe is already referenced 1705 return B_OK; 1706 } 1707 1708 Device* usbDevice = (Device *)pipe->Parent(); 1709 if (usbDevice->Parent() == RootObject()) { 1710 // root hub needs no initialization 1711 return B_OK; 1712 } 1713 1714 struct xhci_device *device = (struct xhci_device *) 1715 usbDevice->ControllerCookie(); 1716 if (device == NULL) { 1717 panic("device is NULL\n"); 1718 return B_NO_INIT; 1719 } 1720 1721 const uint8 id = (2 * pipe->EndpointAddress() 1722 + (pipe->Direction() != Pipe::Out ? 1 : 0)) - 1; 1723 if (id >= XHCI_MAX_ENDPOINTS - 1) 1724 return B_BAD_VALUE; 1725 1726 if (id > 0) { 1727 uint32 devicedwslot0 = _ReadContext(&device->device_ctx->slot.dwslot0); 1728 if (SLOT_0_NUM_ENTRIES_GET(devicedwslot0) == 1) { 1729 uint32 inputdwslot0 = _ReadContext(&device->input_ctx->slot.dwslot0); 1730 inputdwslot0 &= ~(SLOT_0_NUM_ENTRIES(0x1f)); 1731 inputdwslot0 |= SLOT_0_NUM_ENTRIES(XHCI_MAX_ENDPOINTS - 1); 1732 _WriteContext(&device->input_ctx->slot.dwslot0, inputdwslot0); 1733 EvaluateContext(device->input_ctx_addr, device->slot); 1734 } 1735 1736 xhci_endpoint* endpoint = &device->endpoints[id]; 1737 mutex_init(&endpoint->lock, "xhci endpoint lock"); 1738 MutexLocker endpointLocker(endpoint->lock); 1739 1740 endpoint->device = device; 1741 endpoint->id = id; 1742 endpoint->td_head = NULL; 1743 endpoint->used = 0; 1744 endpoint->current = 0; 1745 1746 endpoint->trbs = device->trbs + id * XHCI_ENDPOINT_RING_SIZE; 1747 endpoint->trb_addr = device->trb_addr 1748 + id * XHCI_ENDPOINT_RING_SIZE * sizeof(xhci_trb); 1749 memset(endpoint->trbs, 0, 1750 sizeof(xhci_trb) * XHCI_ENDPOINT_RING_SIZE); 1751 1752 TRACE("insert endpoint for pipe: trbs, device %p endpoint %p\n", 1753 device->trbs, endpoint->trbs); 1754 TRACE("insert endpoint for pipe: trb_addr, device 0x%" B_PRIxPHYSADDR 1755 " endpoint 0x%" B_PRIxPHYSADDR "\n", device->trb_addr, 1756 endpoint->trb_addr); 1757 1758 const uint8 endpointNum = id + 1; 1759 1760 status_t status = ConfigureEndpoint(endpoint, device->slot, id, pipe->Type(), 1761 pipe->Direction() == Pipe::In, pipe->Interval(), pipe->MaxPacketSize(), 1762 usbDevice->Speed(), pipe->MaxBurst(), pipe->BytesPerInterval()); 1763 if (status != B_OK) { 1764 TRACE_ERROR("unable to configure endpoint: %s\n", strerror(status)); 1765 return status; 1766 } 1767 1768 _WriteContext(&device->input_ctx->input.dropFlags, 0); 1769 _WriteContext(&device->input_ctx->input.addFlags, 1770 (1 << endpointNum) | (1 << 0)); 1771 1772 ConfigureEndpoint(device->input_ctx_addr, false, device->slot); 1773 1774 TRACE("device: address 0x%x state 0x%08" B_PRIx32 "\n", 1775 device->address, SLOT_3_SLOT_STATE_GET(_ReadContext( 1776 &device->device_ctx->slot.dwslot3))); 1777 TRACE("endpoint[0] state 0x%08" B_PRIx32 "\n", 1778 ENDPOINT_0_STATE_GET(_ReadContext( 1779 &device->device_ctx->endpoints[0].dwendpoint0))); 1780 TRACE("endpoint[%d] state 0x%08" B_PRIx32 "\n", id, 1781 ENDPOINT_0_STATE_GET(_ReadContext( 1782 &device->device_ctx->endpoints[id].dwendpoint0))); 1783 } 1784 pipe->SetControllerCookie(&device->endpoints[id]); 1785 1786 return B_OK; 1787 } 1788 1789 1790 status_t 1791 XHCI::_RemoveEndpointForPipe(Pipe *pipe) 1792 { 1793 TRACE("remove endpoint for pipe %p (%d)\n", pipe, pipe->EndpointAddress()); 1794 1795 if (pipe->Parent()->Type() != USB_OBJECT_DEVICE) 1796 return B_OK; 1797 Device* usbDevice = (Device *)pipe->Parent(); 1798 if (usbDevice->Parent() == RootObject()) 1799 return B_BAD_VALUE; 1800 1801 xhci_endpoint *endpoint = (xhci_endpoint *)pipe->ControllerCookie(); 1802 if (endpoint == NULL || endpoint->trbs == NULL) 1803 return B_NO_INIT; 1804 1805 pipe->SetControllerCookie(NULL); 1806 1807 if (endpoint->id > 0) { 1808 xhci_device *device = endpoint->device; 1809 uint8 epNumber = endpoint->id + 1; 1810 StopEndpoint(true, endpoint); 1811 1812 mutex_lock(&endpoint->lock); 1813 1814 // See comment in CancelQueuedTransfers. 1815 xhci_td* td; 1816 while ((td = endpoint->td_head) != NULL) { 1817 endpoint->td_head = endpoint->td_head->next; 1818 FreeDescriptor(td); 1819 } 1820 1821 mutex_destroy(&endpoint->lock); 1822 memset(endpoint, 0, sizeof(xhci_endpoint)); 1823 1824 _WriteContext(&device->input_ctx->input.dropFlags, (1 << epNumber)); 1825 _WriteContext(&device->input_ctx->input.addFlags, (1 << 0)); 1826 1827 // The Deconfigure bit in the Configure Endpoint command indicates 1828 // that *all* endpoints are to be deconfigured, and not just the ones 1829 // specified in the context flags. (XHCI 1.2 § 4.6.6 p115.) 1830 ConfigureEndpoint(device->input_ctx_addr, false, device->slot); 1831 } 1832 1833 return B_OK; 1834 } 1835 1836 1837 status_t 1838 XHCI::_LinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1839 { 1840 TRACE("link descriptor for pipe\n"); 1841 1842 // Use mutex_trylock first, in case we are in KDL. 1843 MutexLocker endpointLocker(&endpoint->lock, mutex_trylock(&endpoint->lock) == B_OK); 1844 1845 // "used" refers to the number of currently linked TDs, not the number of 1846 // used TRBs on the ring (we use 2 TRBs on the ring per transfer.) 1847 if (endpoint->used >= (XHCI_MAX_TRANSFERS - 1)) { 1848 TRACE_ERROR("link descriptor for pipe: max transfers count exceeded\n"); 1849 return B_BAD_VALUE; 1850 } 1851 1852 // We do not support queuing other transfers in tandem with a fragmented one. 1853 if (endpoint->td_head != NULL && endpoint->td_head->transfer != NULL 1854 && endpoint->td_head->transfer->IsFragmented()) { 1855 TRACE_ERROR("cannot submit transfer: a fragmented transfer is queued\n"); 1856 return B_DEV_RESOURCE_CONFLICT; 1857 } 1858 1859 endpoint->used++; 1860 descriptor->next = endpoint->td_head; 1861 endpoint->td_head = descriptor; 1862 1863 const uint8 current = endpoint->current, 1864 eventdata = current + 1; 1865 uint8 next = eventdata + 1; 1866 1867 TRACE("link descriptor for pipe: current %d, next %d\n", current, next); 1868 1869 // Add a Link TRB to the end of the descriptor. 1870 phys_addr_t addr = endpoint->trb_addr + eventdata * sizeof(xhci_trb); 1871 descriptor->trbs[descriptor->trb_used].address = addr; 1872 descriptor->trbs[descriptor->trb_used].status = TRB_2_IRQ(0); 1873 descriptor->trbs[descriptor->trb_used].flags = TRB_3_TYPE(TRB_TYPE_LINK) 1874 | TRB_3_CHAIN_BIT | TRB_3_CYCLE_BIT; 1875 // It is specified that (XHCI 1.2 § 4.12.3 Note 2 p251) if the TRB 1876 // following one with the ENT bit set is a Link TRB, the Link TRB 1877 // shall be evaluated *and* the subsequent TRB shall be. Thus a 1878 // TRB_3_ENT_BIT is unnecessary here; and from testing seems to 1879 // break all transfers on a (very) small number of controllers. 1880 1881 #if !B_HOST_IS_LENDIAN 1882 // Convert endianness. 1883 for (uint32 i = 0; i <= descriptor->trb_used; i++) { 1884 descriptor->trbs[i].address = 1885 B_HOST_TO_LENDIAN_INT64(descriptor->trbs[i].address); 1886 descriptor->trbs[i].status = 1887 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].status); 1888 descriptor->trbs[i].flags = 1889 B_HOST_TO_LENDIAN_INT32(descriptor->trbs[i].flags); 1890 } 1891 #endif 1892 1893 // Link the descriptor. 1894 endpoint->trbs[current].address = 1895 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr); 1896 endpoint->trbs[current].status = 1897 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1898 endpoint->trbs[current].flags = 1899 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK)); 1900 1901 // Set up the Event Data TRB (XHCI 1.2 § 4.11.5.2 p230.) 1902 // 1903 // We do this on the main ring for two reasons: first, to avoid a small 1904 // potential race between the interrupt and the controller evaluating 1905 // the link TRB to get back onto the ring; and second, because many 1906 // controllers throw errors if the target of a Link TRB is not valid 1907 // (i.e. does not have its Cycle Bit set.) 1908 // 1909 // We also set the "address" field, which the controller will copy 1910 // verbatim into the TRB it posts to the event ring, to be the last 1911 // "real" TRB in the TD; this will allow us to determine what transfer 1912 // the resulting Transfer Event TRB refers to. 1913 endpoint->trbs[eventdata].address = 1914 B_HOST_TO_LENDIAN_INT64(descriptor->trb_addr 1915 + (descriptor->trb_used - 1) * sizeof(xhci_trb)); 1916 endpoint->trbs[eventdata].status = 1917 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1918 endpoint->trbs[eventdata].flags = 1919 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_EVENT_DATA) 1920 | TRB_3_IOC_BIT | TRB_3_CYCLE_BIT); 1921 1922 if (next == (XHCI_ENDPOINT_RING_SIZE - 1)) { 1923 // We always use 2 TRBs per _Link..() call, so if "next" is the last 1924 // TRB in the ring, we need to generate a link TRB at "next", and 1925 // then wrap it to 0. 1926 endpoint->trbs[next].address = 1927 B_HOST_TO_LENDIAN_INT64(endpoint->trb_addr); 1928 endpoint->trbs[next].status = 1929 B_HOST_TO_LENDIAN_INT32(TRB_2_IRQ(0)); 1930 endpoint->trbs[next].flags = 1931 B_HOST_TO_LENDIAN_INT32(TRB_3_TYPE(TRB_TYPE_LINK) | TRB_3_CYCLE_BIT); 1932 1933 next = 0; 1934 } 1935 1936 endpoint->trbs[next].address = 0; 1937 endpoint->trbs[next].status = 0; 1938 endpoint->trbs[next].flags = 0; 1939 1940 // Everything is ready, so write the cycle bit. 1941 endpoint->trbs[current].flags |= B_HOST_TO_LENDIAN_INT32(TRB_3_CYCLE_BIT); 1942 1943 TRACE("_LinkDescriptorForPipe pCurrent %p phys 0x%" B_PRIxPHYSADDR 1944 " 0x%" B_PRIxPHYSADDR " 0x%08" B_PRIx32 "\n", &endpoint->trbs[current], 1945 endpoint->trb_addr + current * sizeof(struct xhci_trb), 1946 endpoint->trbs[current].address, 1947 B_LENDIAN_TO_HOST_INT32(endpoint->trbs[current].flags)); 1948 1949 endpoint->current = next; 1950 endpointLocker.Unlock(); 1951 1952 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1953 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1954 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1955 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1956 1957 Ring(endpoint->device->slot, endpoint->id + 1); 1958 1959 TRACE("Endpoint status 0x%08" B_PRIx32 " 0x%08" B_PRIx32 " 0x%016" B_PRIx64 "\n", 1960 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint0), 1961 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].dwendpoint1), 1962 _ReadContext(&endpoint->device->device_ctx->endpoints[endpoint->id].qwendpoint2)); 1963 1964 return B_OK; 1965 } 1966 1967 1968 status_t 1969 XHCI::_UnlinkDescriptorForPipe(xhci_td *descriptor, xhci_endpoint *endpoint) 1970 { 1971 TRACE("unlink descriptor for pipe\n"); 1972 // We presume that the caller has already locked or owns the endpoint. 1973 1974 endpoint->used--; 1975 if (descriptor == endpoint->td_head) { 1976 endpoint->td_head = descriptor->next; 1977 descriptor->next = NULL; 1978 return B_OK; 1979 } else { 1980 for (xhci_td *td = endpoint->td_head; td->next != NULL; td = td->next) { 1981 if (td->next == descriptor) { 1982 td->next = descriptor->next; 1983 descriptor->next = NULL; 1984 return B_OK; 1985 } 1986 } 1987 } 1988 1989 endpoint->used++; 1990 return B_ERROR; 1991 } 1992 1993 1994 status_t 1995 XHCI::ConfigureEndpoint(xhci_endpoint* ep, uint8 slot, uint8 number, uint8 type, 1996 bool directionIn, uint16 interval, uint16 maxPacketSize, usb_speed speed, 1997 uint8 maxBurst, uint16 bytesPerInterval) 1998 { 1999 struct xhci_device* device = &fDevices[slot]; 2000 2001 uint32 dwendpoint0 = 0; 2002 uint32 dwendpoint1 = 0; 2003 uint64 qwendpoint2 = 0; 2004 uint32 dwendpoint4 = 0; 2005 2006 // Compute and assign the endpoint type. (XHCI 1.2 § 6.2.3 Table 6-9 p452.) 2007 uint8 xhciType = 4; 2008 if ((type & USB_OBJECT_INTERRUPT_PIPE) != 0) 2009 xhciType = 3; 2010 if ((type & USB_OBJECT_BULK_PIPE) != 0) 2011 xhciType = 2; 2012 if ((type & USB_OBJECT_ISO_PIPE) != 0) 2013 xhciType = 1; 2014 xhciType |= directionIn ? (1 << 2) : 0; 2015 dwendpoint1 |= ENDPOINT_1_EPTYPE(xhciType); 2016 2017 // Compute and assign interval. (XHCI 1.2 § 6.2.3.6 p456.) 2018 uint16 calcInterval; 2019 if ((type & USB_OBJECT_BULK_PIPE) != 0 2020 || (type & USB_OBJECT_CONTROL_PIPE) != 0) { 2021 // Bulk and Control endpoints never issue NAKs. 2022 calcInterval = 0; 2023 } else { 2024 switch (speed) { 2025 case USB_SPEED_FULLSPEED: 2026 if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2027 // Convert 1-16 into 3-18. 2028 calcInterval = min_c(max_c(interval, 1), 16) + 2; 2029 break; 2030 } 2031 2032 // fall through 2033 case USB_SPEED_LOWSPEED: { 2034 // Convert 1ms-255ms into 3-10. 2035 2036 // Find the index of the highest set bit in "interval". 2037 uint32 temp = min_c(max_c(interval, 1), 255); 2038 for (calcInterval = 0; temp != 1; calcInterval++) 2039 temp = temp >> 1; 2040 calcInterval += 3; 2041 break; 2042 } 2043 2044 case USB_SPEED_HIGHSPEED: 2045 case USB_SPEED_SUPERSPEED: 2046 default: 2047 // Convert 1-16 into 0-15. 2048 calcInterval = min_c(max_c(interval, 1), 16) - 1; 2049 break; 2050 } 2051 } 2052 dwendpoint0 |= ENDPOINT_0_INTERVAL(calcInterval); 2053 2054 // For non-isochronous endpoints, we want the controller to retry failed 2055 // transfers, if possible. (XHCI 1.2 § 4.10.2.3 p197.) 2056 if ((type & USB_OBJECT_ISO_PIPE) == 0) 2057 dwendpoint1 |= ENDPOINT_1_CERR(3); 2058 2059 // Assign maximum burst size. For USB3 devices this is passed in; for 2060 // all other devices we compute it. (XHCI 1.2 § 4.8.2 p161.) 2061 if (speed == USB_SPEED_HIGHSPEED && (type & (USB_OBJECT_INTERRUPT_PIPE 2062 | USB_OBJECT_ISO_PIPE)) != 0) { 2063 maxBurst = (maxPacketSize & 0x1800) >> 11; 2064 } else if (speed != USB_SPEED_SUPERSPEED) { 2065 maxBurst = 0; 2066 } 2067 dwendpoint1 |= ENDPOINT_1_MAXBURST(maxBurst); 2068 2069 // Assign maximum packet size, set the ring address, and set the 2070 // "Dequeue Cycle State" bit. (XHCI 1.2 § 6.2.3 Table 6-10 p453.) 2071 dwendpoint1 |= ENDPOINT_1_MAXPACKETSIZE(maxPacketSize); 2072 qwendpoint2 |= ENDPOINT_2_DCS_BIT | ep->trb_addr; 2073 2074 // The Max Burst Payload is the number of bytes moved by a 2075 // maximum sized burst. (XHCI 1.2 § 4.11.7.1 p236.) 2076 ep->max_burst_payload = (maxBurst + 1) * maxPacketSize; 2077 if (ep->max_burst_payload == 0) { 2078 TRACE_ERROR("ConfigureEndpoint() failed invalid max_burst_payload\n"); 2079 return B_BAD_VALUE; 2080 } 2081 2082 // Assign average TRB length. 2083 if ((type & USB_OBJECT_CONTROL_PIPE) != 0) { 2084 // Control pipes are a special case, as they rarely have 2085 // outbound transfers of any substantial size. 2086 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(8); 2087 } else if ((type & USB_OBJECT_ISO_PIPE) != 0) { 2088 // Isochronous pipes are another special case: the TRB size will be 2089 // one packet (which is normally smaller than the max packet size, 2090 // but we don't know what it is here.) 2091 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(maxPacketSize); 2092 } else { 2093 // Under all other circumstances, we put max_burst_payload in a TRB. 2094 dwendpoint4 |= ENDPOINT_4_AVGTRBLENGTH(ep->max_burst_payload); 2095 } 2096 2097 // Assign maximum ESIT payload. (XHCI 1.2 § 4.14.2 p259.) 2098 if ((type & (USB_OBJECT_INTERRUPT_PIPE | USB_OBJECT_ISO_PIPE)) != 0) { 2099 // TODO: For SuperSpeedPlus endpoints, there is yet another descriptor 2100 // for isochronous endpoints that specifies the maximum ESIT payload. 2101 // We don't fetch this yet, so just fall back to the USB2 computation 2102 // method if bytesPerInterval is 0. 2103 if (speed == USB_SPEED_SUPERSPEED && bytesPerInterval != 0) 2104 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD(bytesPerInterval); 2105 else if (speed >= USB_SPEED_HIGHSPEED) 2106 dwendpoint4 |= ENDPOINT_4_MAXESITPAYLOAD((maxBurst + 1) * maxPacketSize); 2107 } 2108 2109 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint0, 2110 dwendpoint0); 2111 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint1, 2112 dwendpoint1); 2113 _WriteContext(&device->input_ctx->endpoints[number].qwendpoint2, 2114 qwendpoint2); 2115 _WriteContext(&device->input_ctx->endpoints[number].dwendpoint4, 2116 dwendpoint4); 2117 2118 TRACE("endpoint 0x%" B_PRIx32 " 0x%" B_PRIx32 " 0x%" B_PRIx64 " 0x%" 2119 B_PRIx32 "\n", 2120 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint0), 2121 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint1), 2122 _ReadContext(&device->input_ctx->endpoints[number].qwendpoint2), 2123 _ReadContext(&device->input_ctx->endpoints[number].dwendpoint4)); 2124 2125 return B_OK; 2126 } 2127 2128 2129 status_t 2130 XHCI::GetPortSpeed(uint8 index, usb_speed* speed) 2131 { 2132 if (index >= fPortCount) 2133 return B_BAD_INDEX; 2134 2135 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2136 2137 switch (PS_SPEED_GET(portStatus)) { 2138 case 2: 2139 *speed = USB_SPEED_LOWSPEED; 2140 break; 2141 case 1: 2142 *speed = USB_SPEED_FULLSPEED; 2143 break; 2144 case 3: 2145 *speed = USB_SPEED_HIGHSPEED; 2146 break; 2147 case 4: 2148 *speed = USB_SPEED_SUPERSPEED; 2149 break; 2150 default: 2151 TRACE_ALWAYS("nonstandard port speed %" B_PRId32 ", assuming SuperSpeed\n", 2152 PS_SPEED_GET(portStatus)); 2153 *speed = USB_SPEED_SUPERSPEED; 2154 break; 2155 } 2156 2157 return B_OK; 2158 } 2159 2160 2161 status_t 2162 XHCI::GetPortStatus(uint8 index, usb_port_status* status) 2163 { 2164 if (index >= fPortCount) 2165 return B_BAD_INDEX; 2166 2167 status->status = status->change = 0; 2168 uint32 portStatus = ReadOpReg(XHCI_PORTSC(index)); 2169 TRACE("port %" B_PRId8 " status=0x%08" B_PRIx32 "\n", index, portStatus); 2170 2171 // build the status 2172 switch (PS_SPEED_GET(portStatus)) { 2173 case 3: 2174 status->status |= PORT_STATUS_HIGH_SPEED; 2175 break; 2176 case 2: 2177 status->status |= PORT_STATUS_LOW_SPEED; 2178 break; 2179 default: 2180 break; 2181 } 2182 2183 if (portStatus & PS_CCS) 2184 status->status |= PORT_STATUS_CONNECTION; 2185 if (portStatus & PS_PED) 2186 status->status |= PORT_STATUS_ENABLE; 2187 if (portStatus & PS_OCA) 2188 status->status |= PORT_STATUS_OVER_CURRENT; 2189 if (portStatus & PS_PR) 2190 status->status |= PORT_STATUS_RESET; 2191 if (portStatus & PS_PP) { 2192 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) 2193 status->status |= PORT_STATUS_SS_POWER; 2194 else 2195 status->status |= PORT_STATUS_POWER; 2196 } 2197 2198 // build the change 2199 if (portStatus & PS_CSC) 2200 status->change |= PORT_STATUS_CONNECTION; 2201 if (portStatus & PS_PEC) 2202 status->change |= PORT_STATUS_ENABLE; 2203 if (portStatus & PS_OCC) 2204 status->change |= PORT_STATUS_OVER_CURRENT; 2205 if (portStatus & PS_PRC) 2206 status->change |= PORT_STATUS_RESET; 2207 2208 if (fPortSpeeds[index] == USB_SPEED_SUPERSPEED) { 2209 if (portStatus & PS_PLC) 2210 status->change |= PORT_CHANGE_LINK_STATE; 2211 if (portStatus & PS_WRC) 2212 status->change |= PORT_CHANGE_BH_PORT_RESET; 2213 } 2214 2215 return B_OK; 2216 } 2217 2218 2219 status_t 2220 XHCI::SetPortFeature(uint8 index, uint16 feature) 2221 { 2222 TRACE("set port feature index %u feature %u\n", index, feature); 2223 if (index >= fPortCount) 2224 return B_BAD_INDEX; 2225 2226 uint32 portRegister = XHCI_PORTSC(index); 2227 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2228 2229 switch (feature) { 2230 case PORT_SUSPEND: 2231 if ((portStatus & PS_PED) == 0 || (portStatus & PS_PR) 2232 || (portStatus & PS_PLS_MASK) >= PS_XDEV_U3) { 2233 TRACE_ERROR("USB core suspending device not in U0/U1/U2.\n"); 2234 return B_BAD_VALUE; 2235 } 2236 portStatus &= ~PS_PLS_MASK; 2237 WriteOpReg(portRegister, portStatus | PS_LWS | PS_XDEV_U3); 2238 break; 2239 2240 case PORT_RESET: 2241 WriteOpReg(portRegister, portStatus | PS_PR); 2242 break; 2243 2244 case PORT_POWER: 2245 WriteOpReg(portRegister, portStatus | PS_PP); 2246 break; 2247 default: 2248 return B_BAD_VALUE; 2249 } 2250 ReadOpReg(portRegister); 2251 return B_OK; 2252 } 2253 2254 2255 status_t 2256 XHCI::ClearPortFeature(uint8 index, uint16 feature) 2257 { 2258 TRACE("clear port feature index %u feature %u\n", index, feature); 2259 if (index >= fPortCount) 2260 return B_BAD_INDEX; 2261 2262 uint32 portRegister = XHCI_PORTSC(index); 2263 uint32 portStatus = ReadOpReg(portRegister) & ~PS_CLEAR; 2264 2265 switch (feature) { 2266 case PORT_SUSPEND: 2267 portStatus = ReadOpReg(portRegister); 2268 if (portStatus & PS_PR) 2269 return B_BAD_VALUE; 2270 if (portStatus & PS_XDEV_U3) { 2271 if ((portStatus & PS_PED) == 0) 2272 return B_BAD_VALUE; 2273 portStatus &= ~PS_PLS_MASK; 2274 WriteOpReg(portRegister, portStatus | PS_XDEV_U0 | PS_LWS); 2275 } 2276 break; 2277 case PORT_ENABLE: 2278 WriteOpReg(portRegister, portStatus | PS_PED); 2279 break; 2280 case PORT_POWER: 2281 WriteOpReg(portRegister, portStatus & ~PS_PP); 2282 break; 2283 case C_PORT_CONNECTION: 2284 WriteOpReg(portRegister, portStatus | PS_CSC); 2285 break; 2286 case C_PORT_ENABLE: 2287 WriteOpReg(portRegister, portStatus | PS_PEC); 2288 break; 2289 case C_PORT_OVER_CURRENT: 2290 WriteOpReg(portRegister, portStatus | PS_OCC); 2291 break; 2292 case C_PORT_RESET: 2293 WriteOpReg(portRegister, portStatus | PS_PRC); 2294 break; 2295 case C_PORT_BH_PORT_RESET: 2296 WriteOpReg(portRegister, portStatus | PS_WRC); 2297 break; 2298 case C_PORT_LINK_STATE: 2299 WriteOpReg(portRegister, portStatus | PS_PLC); 2300 break; 2301 default: 2302 return B_BAD_VALUE; 2303 } 2304 2305 ReadOpReg(portRegister); 2306 return B_OK; 2307 } 2308 2309 2310 status_t 2311 XHCI::ControllerHalt() 2312 { 2313 // Mask off run state 2314 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) & ~CMD_RUN); 2315 2316 // wait for shutdown state 2317 if (WaitOpBits(XHCI_STS, STS_HCH, STS_HCH) != B_OK) { 2318 TRACE_ERROR("HCH shutdown timeout\n"); 2319 return B_ERROR; 2320 } 2321 return B_OK; 2322 } 2323 2324 2325 status_t 2326 XHCI::ControllerReset() 2327 { 2328 TRACE("ControllerReset() cmd: 0x%" B_PRIx32 " sts: 0x%" B_PRIx32 "\n", 2329 ReadOpReg(XHCI_CMD), ReadOpReg(XHCI_STS)); 2330 WriteOpReg(XHCI_CMD, ReadOpReg(XHCI_CMD) | CMD_HCRST); 2331 2332 if (WaitOpBits(XHCI_CMD, CMD_HCRST, 0) != B_OK) { 2333 TRACE_ERROR("ControllerReset() failed CMD_HCRST\n"); 2334 return B_ERROR; 2335 } 2336 2337 if (WaitOpBits(XHCI_STS, STS_CNR, 0) != B_OK) { 2338 TRACE_ERROR("ControllerReset() failed STS_CNR\n"); 2339 return B_ERROR; 2340 } 2341 2342 return B_OK; 2343 } 2344 2345 2346 int32 2347 XHCI::InterruptHandler(void* data) 2348 { 2349 return ((XHCI*)data)->Interrupt(); 2350 } 2351 2352 2353 int32 2354 XHCI::Interrupt() 2355 { 2356 SpinLocker _(&fSpinlock); 2357 2358 uint32 status = ReadOpReg(XHCI_STS); 2359 uint32 temp = ReadRunReg32(XHCI_IMAN(0)); 2360 WriteOpReg(XHCI_STS, status); 2361 WriteRunReg32(XHCI_IMAN(0), temp); 2362 2363 int32 result = B_HANDLED_INTERRUPT; 2364 2365 if ((status & STS_HCH) != 0) { 2366 TRACE_ERROR("Host Controller halted\n"); 2367 return result; 2368 } 2369 if ((status & STS_HSE) != 0) { 2370 TRACE_ERROR("Host System Error\n"); 2371 return result; 2372 } 2373 if ((status & STS_HCE) != 0) { 2374 TRACE_ERROR("Host Controller Error\n"); 2375 return result; 2376 } 2377 2378 if ((status & STS_EINT) == 0) { 2379 TRACE("STS: 0x%" B_PRIx32 " IRQ_PENDING: 0x%" B_PRIx32 "\n", 2380 status, temp); 2381 return B_UNHANDLED_INTERRUPT; 2382 } 2383 2384 TRACE("Event Interrupt\n"); 2385 release_sem_etc(fEventSem, 1, B_DO_NOT_RESCHEDULE); 2386 return B_INVOKE_SCHEDULER; 2387 } 2388 2389 2390 void 2391 XHCI::Ring(uint8 slot, uint8 endpoint) 2392 { 2393 TRACE("Ding Dong! slot:%d endpoint %d\n", slot, endpoint) 2394 if ((slot == 0 && endpoint > 0) || (slot > 0 && endpoint == 0)) 2395 panic("Ring() invalid slot/endpoint combination\n"); 2396 if (slot > fSlotCount || endpoint >= XHCI_MAX_ENDPOINTS) 2397 panic("Ring() invalid slot or endpoint\n"); 2398 2399 WriteDoorReg32(XHCI_DOORBELL(slot), XHCI_DOORBELL_TARGET(endpoint) 2400 | XHCI_DOORBELL_STREAMID(0)); 2401 ReadDoorReg32(XHCI_DOORBELL(slot)); 2402 // Flush PCI writes 2403 } 2404 2405 2406 void 2407 XHCI::QueueCommand(xhci_trb* trb) 2408 { 2409 uint8 i, j; 2410 uint32 temp; 2411 2412 i = fCmdIdx; 2413 j = fCmdCcs; 2414 2415 TRACE("command[%u] = %" B_PRId32 " (0x%016" B_PRIx64 ", 0x%08" B_PRIx32 2416 ", 0x%08" B_PRIx32 ")\n", i, TRB_3_TYPE_GET(trb->flags), trb->address, 2417 trb->status, trb->flags); 2418 2419 fCmdRing[i].address = trb->address; 2420 fCmdRing[i].status = trb->status; 2421 temp = trb->flags; 2422 2423 if (j) 2424 temp |= TRB_3_CYCLE_BIT; 2425 else 2426 temp &= ~TRB_3_CYCLE_BIT; 2427 temp &= ~TRB_3_TC_BIT; 2428 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2429 2430 fCmdAddr = fErst->rs_addr + (XHCI_MAX_EVENTS + i) * sizeof(xhci_trb); 2431 2432 i++; 2433 2434 if (i == (XHCI_MAX_COMMANDS - 1)) { 2435 temp = TRB_3_TYPE(TRB_TYPE_LINK) | TRB_3_TC_BIT; 2436 if (j) 2437 temp |= TRB_3_CYCLE_BIT; 2438 fCmdRing[i].flags = B_HOST_TO_LENDIAN_INT32(temp); 2439 2440 i = 0; 2441 j ^= 1; 2442 } 2443 2444 fCmdIdx = i; 2445 fCmdCcs = j; 2446 } 2447 2448 2449 void 2450 XHCI::HandleCmdComplete(xhci_trb* trb) 2451 { 2452 if (fCmdAddr == trb->address) { 2453 TRACE("Received command event\n"); 2454 fCmdResult[0] = trb->status; 2455 fCmdResult[1] = B_LENDIAN_TO_HOST_INT32(trb->flags); 2456 release_sem_etc(fCmdCompSem, 1, B_DO_NOT_RESCHEDULE); 2457 } else 2458 TRACE_ERROR("received command event for unknown command!\n") 2459 } 2460 2461 2462 void 2463 XHCI::HandleTransferComplete(xhci_trb* trb) 2464 { 2465 const uint32 flags = B_LENDIAN_TO_HOST_INT32(trb->flags); 2466 const uint8 endpointNumber = TRB_3_ENDPOINT_GET(flags), 2467 slot = TRB_3_SLOT_GET(flags); 2468 2469 if (slot > fSlotCount) 2470 TRACE_ERROR("invalid slot\n"); 2471 if (endpointNumber == 0 || endpointNumber >= XHCI_MAX_ENDPOINTS) { 2472 TRACE_ERROR("invalid endpoint\n"); 2473 return; 2474 } 2475 2476 xhci_device *device = &fDevices[slot]; 2477 xhci_endpoint *endpoint = &device->endpoints[endpointNumber - 1]; 2478 2479 if (endpoint->trbs == NULL) { 2480 TRACE_ERROR("got TRB but endpoint is not allocated!\n"); 2481 return; 2482 } 2483 2484 // Use mutex_trylock first, in case we are in KDL. 2485 MutexLocker endpointLocker(endpoint->lock, mutex_trylock(&endpoint->lock) == B_OK); 2486 if (!endpointLocker.IsLocked()) { 2487 // We failed to get the lock. Most likely it was destroyed 2488 // while we were waiting for it. 2489 return; 2490 } 2491 2492 // In the case of an Event Data TRB, the "transferred" field refers 2493 // to the actual number of bytes transferred across the whole TD. 2494 // (XHCI 1.2 § 6.4.2.1 Table 6-38 p478.) 2495 const uint8 completionCode = TRB_2_COMP_CODE_GET(trb->status); 2496 int32 transferred = TRB_2_REM_GET(trb->status), remainder = -1; 2497 2498 TRACE("HandleTransferComplete: ed %" B_PRIu32 ", code %" B_PRIu8 ", transferred %" B_PRId32 "\n", 2499 (flags & TRB_3_EVENT_DATA_BIT), completionCode, transferred); 2500 2501 if ((flags & TRB_3_EVENT_DATA_BIT) == 0) { 2502 // This should only occur under error conditions. 2503 TRACE("got an interrupt for a non-Event Data TRB!\n"); 2504 remainder = transferred; 2505 transferred = -1; 2506 } 2507 2508 if (completionCode != COMP_SUCCESS && completionCode != COMP_SHORT_PACKET 2509 && completionCode != COMP_STOPPED) { 2510 TRACE_ALWAYS("transfer error on slot %" B_PRId8 " endpoint %" B_PRId8 2511 ": %s\n", slot, endpointNumber, xhci_error_string(completionCode)); 2512 } 2513 2514 const phys_addr_t source = B_LENDIAN_TO_HOST_INT64(trb->address); 2515 for (xhci_td *td = endpoint->td_head; td != NULL; td = td->next) { 2516 int64 offset = (source - td->trb_addr) / sizeof(xhci_trb); 2517 if (offset < 0 || offset >= td->trb_count) 2518 continue; 2519 2520 TRACE("HandleTransferComplete td %p trb %" B_PRId64 " found\n", 2521 td, offset); 2522 2523 // The TRB at offset trb_used will be the link TRB, which we do not 2524 // care about (and should not generate an interrupt at all.) We really 2525 // care about the properly last TRB, at index "count - 1", which the 2526 // Event Data TRB that _LinkDescriptorForPipe creates points to. 2527 // 2528 // But if we have an unsuccessful completion code, the transfer 2529 // likely failed midway; so just accept it anyway. 2530 if (offset == (td->trb_used - 1) || completionCode != COMP_SUCCESS) { 2531 _UnlinkDescriptorForPipe(td, endpoint); 2532 endpointLocker.Unlock(); 2533 2534 td->trb_completion_code = completionCode; 2535 td->td_transferred = transferred; 2536 td->trb_left = remainder; 2537 2538 // add descriptor to finished list 2539 if (mutex_trylock(&fFinishedLock) != B_OK) 2540 mutex_lock(&fFinishedLock); 2541 td->next = fFinishedHead; 2542 fFinishedHead = td; 2543 mutex_unlock(&fFinishedLock); 2544 2545 release_sem_etc(fFinishTransfersSem, 1, B_DO_NOT_RESCHEDULE); 2546 TRACE("HandleTransferComplete td %p done\n", td); 2547 } else { 2548 TRACE_ERROR("successful TRB 0x%" B_PRIxPHYSADDR " was found, but it wasn't " 2549 "the last in the TD!\n", source); 2550 } 2551 return; 2552 } 2553 TRACE_ERROR("TRB 0x%" B_PRIxPHYSADDR " was not found in the endpoint!\n", source); 2554 } 2555 2556 2557 void 2558 XHCI::DumpRing(xhci_trb *trbs, uint32 size) 2559 { 2560 if (!Lock()) { 2561 TRACE("Unable to get lock!\n"); 2562 return; 2563 } 2564 2565 for (uint32 i = 0; i < size; i++) { 2566 TRACE("command[%" B_PRId32 "] = %" B_PRId32 " (0x%016" B_PRIx64 "," 2567 " 0x%08" B_PRIx32 ", 0x%08" B_PRIx32 ")\n", i, 2568 TRB_3_TYPE_GET(B_LENDIAN_TO_HOST_INT32(trbs[i].flags)), 2569 trbs[i].address, trbs[i].status, trbs[i].flags); 2570 } 2571 2572 Unlock(); 2573 } 2574 2575 2576 status_t 2577 XHCI::DoCommand(xhci_trb* trb) 2578 { 2579 if (!Lock()) { 2580 TRACE("Unable to get lock!\n"); 2581 return B_ERROR; 2582 } 2583 2584 QueueCommand(trb); 2585 Ring(0, 0); 2586 2587 // Begin with a 50ms timeout. 2588 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 50 * 1000) != B_OK) { 2589 // We've hit the timeout. In some error cases, interrupts are not 2590 // generated; so here we force the event ring to be polled once. 2591 release_sem(fEventSem); 2592 2593 // Now try again, this time with a 750ms timeout. 2594 if (acquire_sem_etc(fCmdCompSem, 1, B_RELATIVE_TIMEOUT, 2595 750 * 1000) != B_OK) { 2596 TRACE("Unable to obtain fCmdCompSem!\n"); 2597 fCmdAddr = 0; 2598 Unlock(); 2599 return B_TIMED_OUT; 2600 } 2601 } 2602 2603 // eat up sems that have been released by multiple interrupts 2604 int32 semCount = 0; 2605 get_sem_count(fCmdCompSem, &semCount); 2606 if (semCount > 0) 2607 acquire_sem_etc(fCmdCompSem, semCount, B_RELATIVE_TIMEOUT, 0); 2608 2609 status_t status = B_OK; 2610 uint32 completionCode = TRB_2_COMP_CODE_GET(fCmdResult[0]); 2611 TRACE("command complete\n"); 2612 if (completionCode != COMP_SUCCESS) { 2613 TRACE_ERROR("unsuccessful command %" B_PRId32 ", error %s (%" B_PRId32 ")\n", 2614 TRB_3_TYPE_GET(trb->flags), xhci_error_string(completionCode), 2615 completionCode); 2616 status = B_IO_ERROR; 2617 } 2618 2619 trb->status = fCmdResult[0]; 2620 trb->flags = fCmdResult[1]; 2621 2622 fCmdAddr = 0; 2623 Unlock(); 2624 return status; 2625 } 2626 2627 2628 status_t 2629 XHCI::Noop() 2630 { 2631 TRACE("Issue No-Op\n"); 2632 xhci_trb trb; 2633 trb.address = 0; 2634 trb.status = 0; 2635 trb.flags = TRB_3_TYPE(TRB_TYPE_CMD_NOOP); 2636 2637 return DoCommand(&trb); 2638 } 2639 2640 2641 status_t 2642 XHCI::EnableSlot(uint8* slot) 2643 { 2644 TRACE("Enable Slot\n"); 2645 xhci_trb trb; 2646 trb.address = 0; 2647 trb.status = 0; 2648 trb.flags = TRB_3_TYPE(TRB_TYPE_ENABLE_SLOT); 2649 2650 status_t status = DoCommand(&trb); 2651 if (status != B_OK) 2652 return status; 2653 2654 *slot = TRB_3_SLOT_GET(trb.flags); 2655 return *slot != 0 ? B_OK : B_BAD_VALUE; 2656 } 2657 2658 2659 status_t 2660 XHCI::DisableSlot(uint8 slot) 2661 { 2662 TRACE("Disable Slot\n"); 2663 xhci_trb trb; 2664 trb.address = 0; 2665 trb.status = 0; 2666 trb.flags = TRB_3_TYPE(TRB_TYPE_DISABLE_SLOT) | TRB_3_SLOT(slot); 2667 2668 return DoCommand(&trb); 2669 } 2670 2671 2672 status_t 2673 XHCI::SetAddress(uint64 inputContext, bool bsr, uint8 slot) 2674 { 2675 TRACE("Set Address\n"); 2676 xhci_trb trb; 2677 trb.address = inputContext; 2678 trb.status = 0; 2679 trb.flags = TRB_3_TYPE(TRB_TYPE_ADDRESS_DEVICE) | TRB_3_SLOT(slot); 2680 2681 if (bsr) 2682 trb.flags |= TRB_3_BSR_BIT; 2683 2684 return DoCommand(&trb); 2685 } 2686 2687 2688 status_t 2689 XHCI::ConfigureEndpoint(uint64 inputContext, bool deconfigure, uint8 slot) 2690 { 2691 TRACE("Configure Endpoint\n"); 2692 xhci_trb trb; 2693 trb.address = inputContext; 2694 trb.status = 0; 2695 trb.flags = TRB_3_TYPE(TRB_TYPE_CONFIGURE_ENDPOINT) | TRB_3_SLOT(slot); 2696 2697 if (deconfigure) 2698 trb.flags |= TRB_3_DCEP_BIT; 2699 2700 return DoCommand(&trb); 2701 } 2702 2703 2704 status_t 2705 XHCI::EvaluateContext(uint64 inputContext, uint8 slot) 2706 { 2707 TRACE("Evaluate Context\n"); 2708 xhci_trb trb; 2709 trb.address = inputContext; 2710 trb.status = 0; 2711 trb.flags = TRB_3_TYPE(TRB_TYPE_EVALUATE_CONTEXT) | TRB_3_SLOT(slot); 2712 2713 return DoCommand(&trb); 2714 } 2715 2716 2717 status_t 2718 XHCI::ResetEndpoint(bool preserve, xhci_endpoint* endpoint) 2719 { 2720 TRACE("Reset Endpoint\n"); 2721 2722 switch (_GetEndpointState(endpoint)) { 2723 case ENDPOINT_STATE_STOPPED: 2724 TRACE("Reset Endpoint: already stopped"); 2725 return B_OK; 2726 case ENDPOINT_STATE_HALTED: 2727 TRACE("Reset Endpoint: warning, weird state!"); 2728 default: 2729 break; 2730 } 2731 2732 xhci_trb trb; 2733 trb.address = 0; 2734 trb.status = 0; 2735 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_ENDPOINT) 2736 | TRB_3_SLOT(endpoint->device->slot) | TRB_3_ENDPOINT(endpoint->id + 1); 2737 if (preserve) 2738 trb.flags |= TRB_3_PRSV_BIT; 2739 2740 return DoCommand(&trb); 2741 } 2742 2743 2744 status_t 2745 XHCI::StopEndpoint(bool suspend, xhci_endpoint* endpoint) 2746 { 2747 TRACE("Stop Endpoint\n"); 2748 2749 switch (_GetEndpointState(endpoint)) { 2750 case ENDPOINT_STATE_HALTED: 2751 TRACE("Stop Endpoint: error, halted"); 2752 return B_DEV_STALLED; 2753 case ENDPOINT_STATE_STOPPED: 2754 TRACE("Stop Endpoint: already stopped"); 2755 return B_OK; 2756 default: 2757 break; 2758 } 2759 2760 xhci_trb trb; 2761 trb.address = 0; 2762 trb.status = 0; 2763 trb.flags = TRB_3_TYPE(TRB_TYPE_STOP_ENDPOINT) 2764 | TRB_3_SLOT(endpoint->device->slot) | TRB_3_ENDPOINT(endpoint->id + 1); 2765 if (suspend) 2766 trb.flags |= TRB_3_SUSPEND_ENDPOINT_BIT; 2767 2768 return DoCommand(&trb); 2769 } 2770 2771 2772 status_t 2773 XHCI::SetTRDequeue(uint64 dequeue, uint16 stream, uint8 endpoint, uint8 slot) 2774 { 2775 TRACE("Set TR Dequeue\n"); 2776 xhci_trb trb; 2777 trb.address = dequeue | ENDPOINT_2_DCS_BIT; 2778 // The DCS bit is copied from the address field as in ConfigureEndpoint. 2779 // (XHCI 1.2 § 4.6.10 p142.) 2780 trb.status = TRB_2_STREAM(stream); 2781 trb.flags = TRB_3_TYPE(TRB_TYPE_SET_TR_DEQUEUE) 2782 | TRB_3_SLOT(slot) | TRB_3_ENDPOINT(endpoint); 2783 2784 return DoCommand(&trb); 2785 } 2786 2787 2788 status_t 2789 XHCI::ResetDevice(uint8 slot) 2790 { 2791 TRACE("Reset Device\n"); 2792 xhci_trb trb; 2793 trb.address = 0; 2794 trb.status = 0; 2795 trb.flags = TRB_3_TYPE(TRB_TYPE_RESET_DEVICE) | TRB_3_SLOT(slot); 2796 2797 return DoCommand(&trb); 2798 } 2799 2800 2801 int32 2802 XHCI::EventThread(void* data) 2803 { 2804 ((XHCI *)data)->CompleteEvents(); 2805 return B_OK; 2806 } 2807 2808 2809 void 2810 XHCI::CompleteEvents() 2811 { 2812 while (!fStopThreads) { 2813 if (acquire_sem(fEventSem) < B_OK) 2814 continue; 2815 2816 // eat up sems that have been released by multiple interrupts 2817 int32 semCount = 0; 2818 get_sem_count(fEventSem, &semCount); 2819 if (semCount > 0) 2820 acquire_sem_etc(fEventSem, semCount, B_RELATIVE_TIMEOUT, 0); 2821 2822 ProcessEvents(); 2823 } 2824 } 2825 2826 2827 void 2828 XHCI::ProcessEvents() 2829 { 2830 // Use mutex_trylock first, in case we are in KDL. 2831 MutexLocker locker(fEventLock, mutex_trylock(&fEventLock) == B_OK); 2832 if (!locker.IsLocked()) { 2833 // We failed to get the lock. This really should not happen. 2834 TRACE_ERROR("failed to acquire event lock!\n"); 2835 return; 2836 } 2837 2838 uint16 i = fEventIdx; 2839 uint8 j = fEventCcs; 2840 uint8 t = 2; 2841 2842 while (1) { 2843 uint32 temp = B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags); 2844 uint8 event = TRB_3_TYPE_GET(temp); 2845 TRACE("event[%u] = %u (0x%016" B_PRIx64 " 0x%08" B_PRIx32 " 0x%08" 2846 B_PRIx32 ")\n", i, event, fEventRing[i].address, 2847 fEventRing[i].status, B_LENDIAN_TO_HOST_INT32(fEventRing[i].flags)); 2848 uint8 k = (temp & TRB_3_CYCLE_BIT) ? 1 : 0; 2849 if (j != k) 2850 break; 2851 2852 switch (event) { 2853 case TRB_TYPE_COMMAND_COMPLETION: 2854 HandleCmdComplete(&fEventRing[i]); 2855 break; 2856 case TRB_TYPE_TRANSFER: 2857 HandleTransferComplete(&fEventRing[i]); 2858 break; 2859 case TRB_TYPE_PORT_STATUS_CHANGE: 2860 TRACE("port change detected\n"); 2861 break; 2862 default: 2863 TRACE_ERROR("Unhandled event = %u\n", event); 2864 break; 2865 } 2866 2867 i++; 2868 if (i == XHCI_MAX_EVENTS) { 2869 i = 0; 2870 j ^= 1; 2871 if (!--t) 2872 break; 2873 } 2874 } 2875 2876 fEventIdx = i; 2877 fEventCcs = j; 2878 2879 uint64 addr = fErst->rs_addr + i * sizeof(xhci_trb); 2880 WriteRunReg32(XHCI_ERDP_LO(0), (uint32)addr | ERDP_BUSY); 2881 WriteRunReg32(XHCI_ERDP_HI(0), (uint32)(addr >> 32)); 2882 } 2883 2884 2885 int32 2886 XHCI::FinishThread(void* data) 2887 { 2888 ((XHCI *)data)->FinishTransfers(); 2889 return B_OK; 2890 } 2891 2892 2893 void 2894 XHCI::FinishTransfers() 2895 { 2896 while (!fStopThreads) { 2897 if (acquire_sem(fFinishTransfersSem) < B_OK) 2898 continue; 2899 2900 // eat up sems that have been released by multiple interrupts 2901 int32 semCount = 0; 2902 get_sem_count(fFinishTransfersSem, &semCount); 2903 if (semCount > 0) 2904 acquire_sem_etc(fFinishTransfersSem, semCount, B_RELATIVE_TIMEOUT, 0); 2905 2906 mutex_lock(&fFinishedLock); 2907 TRACE("finishing transfers\n"); 2908 while (fFinishedHead != NULL) { 2909 xhci_td* td = fFinishedHead; 2910 fFinishedHead = td->next; 2911 td->next = NULL; 2912 mutex_unlock(&fFinishedLock); 2913 2914 TRACE("finishing transfer td %p\n", td); 2915 2916 Transfer* transfer = td->transfer; 2917 if (transfer == NULL) { 2918 // No transfer? Quick way out. 2919 FreeDescriptor(td); 2920 mutex_lock(&fFinishedLock); 2921 continue; 2922 } 2923 2924 bool directionIn = (transfer->TransferPipe()->Direction() != Pipe::Out); 2925 2926 status_t callbackStatus = B_OK; 2927 const uint8 completionCode = td->trb_completion_code; 2928 switch (completionCode) { 2929 case COMP_SHORT_PACKET: 2930 case COMP_SUCCESS: 2931 callbackStatus = B_OK; 2932 break; 2933 case COMP_DATA_BUFFER: 2934 callbackStatus = directionIn ? B_DEV_DATA_OVERRUN 2935 : B_DEV_DATA_UNDERRUN; 2936 break; 2937 case COMP_BABBLE: 2938 callbackStatus = directionIn ? B_DEV_FIFO_OVERRUN 2939 : B_DEV_FIFO_UNDERRUN; 2940 break; 2941 case COMP_USB_TRANSACTION: 2942 callbackStatus = B_DEV_CRC_ERROR; 2943 break; 2944 case COMP_STALL: 2945 callbackStatus = B_DEV_STALLED; 2946 break; 2947 default: 2948 callbackStatus = B_DEV_STALLED; 2949 break; 2950 } 2951 2952 size_t actualLength = transfer->FragmentLength(); 2953 if (completionCode != COMP_SUCCESS) { 2954 actualLength = td->td_transferred; 2955 if (td->td_transferred == -1) 2956 actualLength = transfer->FragmentLength() - td->trb_left; 2957 TRACE("transfer not successful, actualLength=%" B_PRIuSIZE "\n", 2958 actualLength); 2959 } 2960 2961 usb_isochronous_data* isochronousData = transfer->IsochronousData(); 2962 if (isochronousData != NULL) { 2963 size_t packetSize = transfer->DataLength() 2964 / isochronousData->packet_count, 2965 left = actualLength; 2966 for (uint32 i = 0; i < isochronousData->packet_count; i++) { 2967 size_t size = min_c(packetSize, left); 2968 isochronousData->packet_descriptors[i].actual_length = size; 2969 isochronousData->packet_descriptors[i].status = (size > 0) 2970 ? B_OK : B_DEV_FIFO_UNDERRUN; 2971 left -= size; 2972 } 2973 } 2974 2975 if (callbackStatus == B_OK && directionIn && actualLength > 0) { 2976 TRACE("copying in iov count %ld\n", transfer->VectorCount()); 2977 status_t status = transfer->PrepareKernelAccess(); 2978 if (status == B_OK) { 2979 ReadDescriptor(td, transfer->Vector(), 2980 transfer->VectorCount()); 2981 } else { 2982 callbackStatus = status; 2983 } 2984 } 2985 2986 FreeDescriptor(td); 2987 2988 // this transfer may still have data left 2989 bool finished = true; 2990 transfer->AdvanceByFragment(actualLength); 2991 if (completionCode == COMP_SUCCESS 2992 && transfer->FragmentLength() > 0) { 2993 TRACE("still %" B_PRIuSIZE " bytes left on transfer\n", 2994 transfer->FragmentLength()); 2995 callbackStatus = SubmitTransfer(transfer); 2996 finished = (callbackStatus != B_OK); 2997 } 2998 if (finished) { 2999 // The actualLength was already handled in AdvanceByFragment. 3000 transfer->Finished(callbackStatus, 0); 3001 delete transfer; 3002 } 3003 3004 mutex_lock(&fFinishedLock); 3005 } 3006 mutex_unlock(&fFinishedLock); 3007 } 3008 } 3009 3010 3011 inline void 3012 XHCI::WriteOpReg(uint32 reg, uint32 value) 3013 { 3014 *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg) = value; 3015 } 3016 3017 3018 inline uint32 3019 XHCI::ReadOpReg(uint32 reg) 3020 { 3021 return *(volatile uint32 *)(fRegisters + fOperationalRegisterOffset + reg); 3022 } 3023 3024 3025 inline status_t 3026 XHCI::WaitOpBits(uint32 reg, uint32 mask, uint32 expected) 3027 { 3028 int loops = 0; 3029 uint32 value = ReadOpReg(reg); 3030 while ((value & mask) != expected) { 3031 snooze(1000); 3032 value = ReadOpReg(reg); 3033 if (loops == 100) { 3034 TRACE("delay waiting on reg 0x%" B_PRIX32 " match 0x%" B_PRIX32 3035 " (0x%" B_PRIX32 ")\n", reg, expected, mask); 3036 } else if (loops > 250) { 3037 TRACE_ERROR("timeout waiting on reg 0x%" B_PRIX32 3038 " match 0x%" B_PRIX32 " (0x%" B_PRIX32 ")\n", reg, expected, 3039 mask); 3040 return B_ERROR; 3041 } 3042 loops++; 3043 } 3044 return B_OK; 3045 } 3046 3047 3048 inline uint32 3049 XHCI::ReadCapReg32(uint32 reg) 3050 { 3051 return *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg); 3052 } 3053 3054 3055 inline void 3056 XHCI::WriteCapReg32(uint32 reg, uint32 value) 3057 { 3058 *(volatile uint32 *)(fRegisters + fCapabilityRegisterOffset + reg) = value; 3059 } 3060 3061 3062 inline uint32 3063 XHCI::ReadRunReg32(uint32 reg) 3064 { 3065 return *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg); 3066 } 3067 3068 3069 inline void 3070 XHCI::WriteRunReg32(uint32 reg, uint32 value) 3071 { 3072 *(volatile uint32 *)(fRegisters + fRuntimeRegisterOffset + reg) = value; 3073 } 3074 3075 3076 inline uint32 3077 XHCI::ReadDoorReg32(uint32 reg) 3078 { 3079 return *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg); 3080 } 3081 3082 3083 inline void 3084 XHCI::WriteDoorReg32(uint32 reg, uint32 value) 3085 { 3086 *(volatile uint32 *)(fRegisters + fDoorbellRegisterOffset + reg) = value; 3087 } 3088 3089 3090 inline addr_t 3091 XHCI::_OffsetContextAddr(addr_t p) 3092 { 3093 if (fContextSizeShift == 1) { 3094 // each structure is page aligned, each pointer is 32 bits aligned 3095 uint32 offset = p & ((B_PAGE_SIZE - 1) & ~31U); 3096 p += offset; 3097 } 3098 return p; 3099 } 3100 3101 inline uint32 3102 XHCI::_ReadContext(uint32* p) 3103 { 3104 p = (uint32*)_OffsetContextAddr((addr_t)p); 3105 return *p; 3106 } 3107 3108 3109 inline void 3110 XHCI::_WriteContext(uint32* p, uint32 value) 3111 { 3112 p = (uint32*)_OffsetContextAddr((addr_t)p); 3113 *p = value; 3114 } 3115 3116 3117 inline uint64 3118 XHCI::_ReadContext(uint64* p) 3119 { 3120 p = (uint64*)_OffsetContextAddr((addr_t)p); 3121 return *p; 3122 } 3123 3124 3125 inline void 3126 XHCI::_WriteContext(uint64* p, uint64 value) 3127 { 3128 p = (uint64*)_OffsetContextAddr((addr_t)p); 3129 *p = value; 3130 } 3131