23 #include <linux/pci.h>
26 #include <linux/module.h>
28 #include <linux/slab.h>
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37 static int link_quirk;
61 result = xhci_readl(xhci, ptr);
62 if (result == ~(
u32)0)
87 cmd = xhci_readl(xhci, &xhci->
op_regs->command);
89 xhci_writel(xhci, cmd, &xhci->
op_regs->command);
112 xhci_warn(xhci,
"Host not halted after %u microseconds.\n",
125 temp = xhci_readl(xhci, &xhci->
op_regs->command);
127 xhci_dbg(xhci,
"// Turn on HC, cmd = 0x%x.\n",
129 xhci_writel(xhci, temp, &xhci->
op_regs->command);
138 xhci_err(xhci,
"Host took too long to start, "
139 "waited %u microseconds.\n",
159 state = xhci_readl(xhci, &xhci->
op_regs->status);
161 xhci_warn(xhci,
"Host controller not halted, aborting reset.\n");
165 xhci_dbg(xhci,
"// Reset the HC\n");
166 command = xhci_readl(xhci, &xhci->
op_regs->command);
168 xhci_writel(xhci, command, &xhci->
op_regs->command);
175 xhci_dbg(xhci,
"Wait for controller to be ready for doorbell rings\n");
183 for (i = 0; i < 2; ++
i) {
193 static int xhci_free_msi(
struct xhci_hcd *xhci)
210 static int xhci_setup_msi(
struct xhci_hcd *xhci)
215 ret = pci_enable_msi(pdev);
217 xhci_dbg(xhci,
"failed to allocate MSI entry\n");
222 0,
"xhci_hcd", xhci_to_hcd(xhci));
224 xhci_dbg(xhci,
"disable MSI interrupt\n");
235 static void xhci_free_irq(
struct xhci_hcd *xhci)
241 if (xhci_to_hcd(xhci)->
irq > 0)
244 ret = xhci_free_msi(xhci);
256 static int xhci_setup_msix(
struct xhci_hcd *xhci)
259 struct usb_hcd *hcd = xhci_to_hcd(xhci);
276 xhci_err(xhci,
"Failed to allocate MSI-X entries\n");
287 xhci_dbg(xhci,
"Failed to enable MSI-X\n");
294 0,
"xhci_hcd", xhci_to_hcd(xhci));
299 hcd->msix_enabled = 1;
303 xhci_dbg(xhci,
"disable MSI-X interrupt\n");
313 static void xhci_cleanup_msix(
struct xhci_hcd *xhci)
315 struct usb_hcd *hcd = xhci_to_hcd(xhci);
328 hcd->msix_enabled = 0;
332 static void xhci_msix_sync_irqs(
struct xhci_hcd *xhci)
342 static int xhci_try_enable_msi(
struct usb_hcd *hcd)
344 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
360 ret = xhci_setup_msix(xhci);
363 ret = xhci_setup_msi(xhci);
370 xhci_err(xhci,
"No msi-x/msi found and no IRQ in BIOS\n");
376 hcd->irq_descr, hcd);
378 xhci_err(xhci,
"request interrupt %d failed\n",
382 hcd->irq = pdev->
irq;
388 static int xhci_try_enable_msi(
struct usb_hcd *hcd)
393 static void xhci_cleanup_msix(
struct xhci_hcd *xhci)
397 static void xhci_msix_sync_irqs(
struct xhci_hcd *xhci)
403 static void compliance_mode_recovery(
unsigned long arg)
419 xhci_dbg(xhci,
"Compliance Mode Detected->Port %d!\n",
421 xhci_dbg(xhci,
"Attempting Recovery routine!\n");
424 if (hcd->state == HC_STATE_SUSPENDED)
425 usb_hcd_resume_root_hub(hcd);
446 static void compliance_mode_recovery_timer_init(
struct xhci_hcd *xhci)
459 xhci_dbg(xhci,
"Compliance Mode Recovery Timer Initialized.\n");
468 static bool compliance_mode_recovery_timer_quirk_check(
void)
470 const char *dmi_product_name, *dmi_sys_vendor;
474 if (!dmi_product_name || !dmi_sys_vendor)
477 if (!(
strstr(dmi_sys_vendor,
"Hewlett-Packard")))
480 if (
strstr(dmi_product_name,
"Z420") ||
481 strstr(dmi_product_name,
"Z620") ||
482 strstr(dmi_product_name,
"Z820") ||
483 strstr(dmi_product_name,
"Z1"))
489 static int xhci_all_ports_seen_u0(
struct xhci_hcd *xhci)
504 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
510 xhci_dbg(xhci,
"QUIRK: Not clearing Link TRB chain bits.\n");
513 xhci_dbg(xhci,
"xHCI doesn't need link TRB QUIRK\n");
516 xhci_dbg(xhci,
"Finished xhci_init\n");
519 if (compliance_mode_recovery_timer_quirk_check()) {
521 compliance_mode_recovery_timer_init(xhci);
530 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
531 static void xhci_event_ring_work(
unsigned long arg)
539 xhci_dbg(xhci,
"Poll event ring: %lu\n", jiffies);
542 temp = xhci_readl(xhci, &xhci->
op_regs->status);
543 xhci_dbg(xhci,
"op reg status = 0x%x\n", temp);
546 xhci_dbg(xhci,
"HW died, polling stopped.\n");
547 spin_unlock_irqrestore(&xhci->
lock, flags);
551 temp = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
552 xhci_dbg(xhci,
"ir_set 0 pending = 0x%x\n", temp);
558 temp_64 = xhci_read_64(xhci, &xhci->
ir_set->erst_dequeue);
560 xhci_dbg(xhci,
"ERST deq = 64'h%0lx\n", (
long unsigned int) temp_64);
568 for (j = 0; j < 31; ++
j) {
572 spin_unlock_irqrestore(&xhci->
lock, flags);
577 xhci_dbg(xhci,
"Quit polling the event ring.\n");
581 static int xhci_run_finished(
struct xhci_hcd *xhci)
583 if (xhci_start(xhci)) {
593 xhci_dbg(xhci,
"Finished xhci_run for USB3 roothub\n");
614 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
620 hcd->uses_new_polling = 1;
622 return xhci_run_finished(xhci);
626 ret = xhci_try_enable_msi(hcd);
630 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
632 xhci->event_ring_timer.data = (
unsigned long) xhci;
633 xhci->event_ring_timer.function = xhci_event_ring_work;
637 xhci_dbg(xhci,
"Setting event ring polling timer\n");
641 xhci_dbg(xhci,
"Command ring memory map follows:\n");
646 xhci_dbg(xhci,
"ERST memory map follows:\n");
651 temp_64 = xhci_read_64(xhci, &xhci->
ir_set->erst_dequeue);
653 xhci_dbg(xhci,
"ERST deq = 64'h%0lx\n", (
long unsigned int) temp_64);
655 xhci_dbg(xhci,
"// Set the interrupt modulation register\n");
656 temp = xhci_readl(xhci, &xhci->
ir_set->irq_control);
659 xhci_writel(xhci, temp, &xhci->
ir_set->irq_control);
662 temp = xhci_readl(xhci, &xhci->
op_regs->command);
664 xhci_dbg(xhci,
"// Enable interrupts, cmd = 0x%x.\n",
666 xhci_writel(xhci, temp, &xhci->
op_regs->command);
668 temp = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
669 xhci_dbg(xhci,
"// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
672 &xhci->
ir_set->irq_pending);
679 xhci_dbg(xhci,
"Finished xhci_run for USB2 roothub\n");
683 static void xhci_only_stop_hcd(
struct usb_hcd *hcd)
685 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
687 spin_lock_irq(&xhci->
lock);
695 spin_unlock_irq(&xhci->
lock);
710 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
717 spin_lock_irq(&xhci->
lock);
723 spin_unlock_irq(&xhci->
lock);
725 xhci_cleanup_msix(xhci);
727 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
735 (!(xhci_all_ports_seen_u0(xhci))))
741 xhci_dbg(xhci,
"// Disabling event ring interrupts\n");
742 temp = xhci_readl(xhci, &xhci->
op_regs->status);
744 temp = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
746 &xhci->
ir_set->irq_pending);
749 xhci_dbg(xhci,
"cleaning up memory\n");
751 xhci_dbg(xhci,
"xhci_stop completed - status = %x\n",
752 xhci_readl(xhci, &xhci->
op_regs->status));
766 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
771 spin_lock_irq(&xhci->
lock);
773 spin_unlock_irq(&xhci->
lock);
775 xhci_cleanup_msix(xhci);
777 xhci_dbg(xhci,
"xhci_shutdown completed - status = %x\n",
778 xhci_readl(xhci, &xhci->
op_regs->status));
782 static void xhci_save_registers(
struct xhci_hcd *xhci)
784 xhci->
s3.command = xhci_readl(xhci, &xhci->
op_regs->command);
785 xhci->
s3.dev_nt = xhci_readl(xhci, &xhci->
op_regs->dev_notification);
786 xhci->
s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->
op_regs->dcbaa_ptr);
787 xhci->
s3.config_reg = xhci_readl(xhci, &xhci->
op_regs->config_reg);
788 xhci->
s3.erst_size = xhci_readl(xhci, &xhci->
ir_set->erst_size);
789 xhci->
s3.erst_base = xhci_read_64(xhci, &xhci->
ir_set->erst_base);
790 xhci->
s3.erst_dequeue = xhci_read_64(xhci, &xhci->
ir_set->erst_dequeue);
791 xhci->
s3.irq_pending = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
792 xhci->
s3.irq_control = xhci_readl(xhci, &xhci->
ir_set->irq_control);
795 static void xhci_restore_registers(
struct xhci_hcd *xhci)
797 xhci_writel(xhci, xhci->
s3.command, &xhci->
op_regs->command);
798 xhci_writel(xhci, xhci->
s3.dev_nt, &xhci->
op_regs->dev_notification);
799 xhci_write_64(xhci, xhci->
s3.dcbaa_ptr, &xhci->
op_regs->dcbaa_ptr);
800 xhci_writel(xhci, xhci->
s3.config_reg, &xhci->
op_regs->config_reg);
801 xhci_writel(xhci, xhci->
s3.erst_size, &xhci->
ir_set->erst_size);
802 xhci_write_64(xhci, xhci->
s3.erst_base, &xhci->
ir_set->erst_base);
803 xhci_write_64(xhci, xhci->
s3.erst_dequeue, &xhci->
ir_set->erst_dequeue);
804 xhci_writel(xhci, xhci->
s3.irq_pending, &xhci->
ir_set->irq_pending);
805 xhci_writel(xhci, xhci->
s3.irq_control, &xhci->
ir_set->irq_control);
808 static void xhci_set_cmd_ring_deq(
struct xhci_hcd *xhci)
813 val_64 = xhci_read_64(xhci, &xhci->
op_regs->cmd_ring);
819 xhci_dbg(xhci,
"// Setting command ring address to 0x%llx\n",
820 (
long unsigned long) val_64);
821 xhci_write_64(xhci, val_64, &xhci->
op_regs->cmd_ring);
833 static void xhci_clear_command_ring(
struct xhci_hcd *xhci)
846 }
while (seg != ring->
deq_seg);
868 xhci_set_cmd_ring_deq(xhci);
880 struct usb_hcd *hcd = xhci_to_hcd(xhci);
883 spin_lock_irq(&xhci->
lock);
884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
890 command = xhci_readl(xhci, &xhci->
op_regs->command);
892 xhci_writel(xhci, command, &xhci->
op_regs->command);
895 xhci_warn(xhci,
"WARN: xHC CMD_RUN timeout\n");
896 spin_unlock_irq(&xhci->
lock);
899 xhci_clear_command_ring(xhci);
902 xhci_save_registers(xhci);
905 command = xhci_readl(xhci, &xhci->
op_regs->command);
907 xhci_writel(xhci, command, &xhci->
op_regs->command);
909 xhci_warn(xhci,
"WARN: xHC save state timeout\n");
910 spin_unlock_irq(&xhci->
lock);
913 spin_unlock_irq(&xhci->
lock);
920 (!(xhci_all_ports_seen_u0(xhci)))) {
922 xhci_dbg(xhci,
"Compliance Mode Recovery Timer Deleted!\n");
927 xhci_msix_sync_irqs(xhci);
941 struct usb_hcd *hcd = xhci_to_hcd(xhci);
942 struct usb_hcd *secondary_hcd;
953 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
956 spin_lock_irq(&xhci->
lock);
962 xhci_restore_registers(xhci);
964 xhci_set_cmd_ring_deq(xhci);
967 command = xhci_readl(xhci, &xhci->
op_regs->command);
969 xhci_writel(xhci, command, &xhci->
op_regs->command);
972 xhci_warn(xhci,
"WARN: xHC restore state timeout\n");
973 spin_unlock_irq(&xhci->
lock);
976 temp = xhci_readl(xhci, &xhci->
op_regs->status);
980 if ((temp &
STS_SRE) || hibernated) {
982 usb_root_hub_lost_power(xhci->
main_hcd->self.root_hub);
983 usb_root_hub_lost_power(xhci->
shared_hcd->self.root_hub);
988 spin_unlock_irq(&xhci->
lock);
989 xhci_cleanup_msix(xhci);
991 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
997 xhci_dbg(xhci,
"// Disabling event ring interrupts\n");
998 temp = xhci_readl(xhci, &xhci->
op_regs->status);
1000 temp = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
1002 &xhci->
ir_set->irq_pending);
1005 xhci_dbg(xhci,
"cleaning up memory\n");
1007 xhci_dbg(xhci,
"xhci_stop completed - status = %x\n",
1008 xhci_readl(xhci, &xhci->
op_regs->status));
1015 secondary_hcd = hcd;
1019 xhci_dbg(xhci,
"Initialize the xhci_hcd\n");
1023 xhci_dbg(xhci,
"Start the primary HCD\n");
1024 retval =
xhci_run(hcd->primary_hcd);
1026 xhci_dbg(xhci,
"Start the secondary HCD\n");
1029 hcd->state = HC_STATE_SUSPENDED;
1030 xhci->
shared_hcd->state = HC_STATE_SUSPENDED;
1035 command = xhci_readl(xhci, &xhci->
op_regs->command);
1037 xhci_writel(xhci, command, &xhci->
op_regs->command);
1050 spin_unlock_irq(&xhci->
lock);
1054 usb_hcd_resume_root_hub(hcd);
1065 compliance_mode_recovery_timer_init(xhci);
1086 if (usb_endpoint_xfer_control(desc))
1087 index = (
unsigned int) (usb_endpoint_num(desc)*2);
1089 index = (
unsigned int) (usb_endpoint_num(desc)*2) +
1090 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1109 return 1 << (ep_index + 1);
1120 return fls(added_ctxs) - 1;
1126 static int xhci_check_args(
struct usb_hcd *hcd,
struct usb_device *
udev,
1127 struct usb_host_endpoint *ep,
int check_ep,
bool check_virt_dev,
1132 if (!hcd || (check_ep && !ep) || !udev) {
1137 if (!udev->parent) {
1143 xhci = hcd_to_xhci(hcd);
1147 if (check_virt_dev) {
1148 if (!udev->slot_id || !xhci->
devs[udev->slot_id]) {
1154 virt_dev = xhci->
devs[udev->slot_id];
1155 if (virt_dev->
udev != udev) {
1157 "virt_dev does not match\n", func);
1165 static int xhci_configure_endpoint(
struct xhci_hcd *xhci,
1167 bool ctx_change,
bool must_succeed);
1175 static int xhci_check_maxpacket(
struct xhci_hcd *xhci,
unsigned int slot_id,
1182 int max_packet_size;
1183 int hw_max_packet_size;
1186 out_ctx = xhci->
devs[slot_id]->out_ctx;
1189 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1190 if (hw_max_packet_size != max_packet_size) {
1191 xhci_dbg(xhci,
"Max Packet Size for ep 0 changed.\n");
1192 xhci_dbg(xhci,
"Max packet size in usb_device = %d\n",
1194 xhci_dbg(xhci,
"Max packet size in xHCI HW = %d\n",
1195 hw_max_packet_size);
1196 xhci_dbg(xhci,
"Issuing evaluate context command.\n");
1200 xhci->
devs[slot_id]->out_ctx, ep_index);
1201 in_ctx = xhci->
devs[slot_id]->in_ctx;
1214 xhci_dbg(xhci,
"Slot %d input context\n", slot_id);
1216 xhci_dbg(xhci,
"Slot %d output context\n", slot_id);
1219 ret = xhci_configure_endpoint(xhci, urb->dev,
NULL,
1236 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1238 unsigned long flags;
1244 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1245 true,
true, __func__) <= 0)
1248 slot_id = urb->dev->slot_id;
1251 if (!HCD_HW_ACCESSIBLE(hcd)) {
1253 xhci_dbg(xhci,
"urb submitted during PCI suspend\n");
1258 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1259 size = urb->number_of_packets;
1263 urb_priv = kzalloc(
sizeof(
struct urb_priv) +
1264 size *
sizeof(
struct xhci_td *), mem_flags);
1268 buffer = kzalloc(size *
sizeof(
struct xhci_td), mem_flags);
1274 for (i = 0; i <
size; i++) {
1281 urb->hcpriv = urb_priv;
1283 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1288 ret = xhci_check_maxpacket(xhci, slot_id,
1307 spin_unlock_irqrestore(&xhci->
lock, flags);
1308 }
else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1312 if (xhci->
devs[slot_id]->eps[ep_index].ep_state &
1314 xhci_warn(xhci,
"WARN: Can't enqueue URB while bulk ep "
1315 "is transitioning to using streams.\n");
1317 }
else if (xhci->
devs[slot_id]->eps[ep_index].ep_state &
1319 xhci_warn(xhci,
"WARN: Can't enqueue URB while bulk ep "
1320 "is transitioning to "
1321 "not having streams.\n");
1329 spin_unlock_irqrestore(&xhci->
lock, flags);
1330 }
else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1338 spin_unlock_irqrestore(&xhci->
lock, flags);
1347 spin_unlock_irqrestore(&xhci->
lock, flags);
1352 xhci_dbg(xhci,
"Ep 0x%x: URB %p submitted for "
1353 "non-responsive xHCI host.\n",
1354 urb->ep->desc.bEndpointAddress, urb);
1359 spin_unlock_irqrestore(&xhci->
lock, flags);
1370 unsigned int slot_id;
1375 slot_id = urb->dev->slot_id;
1377 stream_id = urb->stream_id;
1383 if (stream_id == 0) {
1385 "WARN: Slot ID %u, ep index %u has streams, "
1386 "but URB has no stream ID.\n",
1395 "WARN: Slot ID %u, ep index %u has "
1396 "stream IDs 1 to %u allocated, "
1397 "but stream ID %u is requested.\n",
1437 unsigned long flags;
1447 xhci = hcd_to_xhci(hcd);
1451 if (ret || !urb->hcpriv)
1453 temp = xhci_readl(xhci, &xhci->
op_regs->status);
1455 xhci_dbg(xhci,
"HW died, freeing TD.\n");
1456 urb_priv = urb->hcpriv;
1457 for (i = urb_priv->
td_cnt; i < urb_priv->
length; i++) {
1458 td = urb_priv->
td[
i];
1459 if (!list_empty(&td->
td_list))
1466 spin_unlock_irqrestore(&xhci->
lock, flags);
1473 xhci_dbg(xhci,
"Ep 0x%x: URB %p to be canceled on "
1474 "non-responsive xHCI host.\n",
1475 urb->ep->desc.bEndpointAddress, urb);
1486 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1492 urb_priv = urb->hcpriv;
1494 if (i < urb_priv->
length)
1495 xhci_dbg(xhci,
"Cancel URB %p, dev %s, ep 0x%x, "
1496 "starting at offset 0x%llx\n",
1497 urb, urb->dev->devpath,
1498 urb->ep->desc.bEndpointAddress,
1500 urb_priv->
td[i]->start_seg,
1501 urb_priv->
td[i]->first_trb));
1503 for (; i < urb_priv->
length; i++) {
1504 td = urb_priv->
td[
i];
1521 spin_unlock_irqrestore(&xhci->
lock, flags);
1539 struct usb_host_endpoint *ep)
1545 unsigned int last_ctx;
1549 u32 new_add_flags, new_drop_flags, new_slot_info;
1552 ret = xhci_check_args(hcd, udev, ep, 1,
true, __func__);
1555 xhci = hcd_to_xhci(hcd);
1559 xhci_dbg(xhci,
"%s called for udev %p\n", __func__, udev);
1562 xhci_dbg(xhci,
"xHCI %s - can't drop slot or ep 0 %#x\n",
1563 __func__, drop_flag);
1567 in_ctx = xhci->
devs[udev->slot_id]->in_ctx;
1568 out_ctx = xhci->
devs[udev->slot_id]->out_ctx;
1579 xhci_warn(xhci,
"xHCI %s called with disabled ep %p\n",
1602 xhci_dbg(xhci,
"drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1603 (
unsigned int) ep->desc.bEndpointAddress,
1605 (
unsigned int) new_drop_flags,
1606 (
unsigned int) new_add_flags,
1607 (
unsigned int) new_slot_info);
1625 struct usb_host_endpoint *ep)
1633 unsigned int last_ctx;
1634 u32 new_add_flags, new_drop_flags, new_slot_info;
1638 ret = xhci_check_args(hcd, udev, ep, 1,
true, __func__);
1644 xhci = hcd_to_xhci(hcd);
1655 xhci_dbg(xhci,
"xHCI %s - can't add slot or ep 0 %#x\n",
1656 __func__, added_ctxs);
1660 virt_dev = xhci->
devs[udev->slot_id];
1661 in_ctx = virt_dev->
in_ctx;
1669 if (virt_dev->
eps[ep_index].ring &&
1672 xhci_warn(xhci,
"Trying to add endpoint 0x%x "
1673 "without dropping it.\n",
1674 (
unsigned int) ep->desc.bEndpointAddress);
1683 xhci_warn(xhci,
"xHCI %s called with enabled ep %p\n",
1694 dev_dbg(&udev->dev,
"%s - could not initialize ep %#x\n",
1695 __func__, ep->desc.bEndpointAddress);
1722 xhci_dbg(xhci,
"add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1723 (
unsigned int) ep->desc.bEndpointAddress,
1725 (
unsigned int) new_drop_flags,
1726 (
unsigned int) new_add_flags,
1727 (
unsigned int) new_slot_info);
1750 for (i = 1; i < 31; ++
i) {
1759 static int xhci_configure_endpoint_result(
struct xhci_hcd *xhci,
1764 switch (*cmd_status) {
1766 dev_warn(&udev->dev,
"Not enough host controller resources "
1767 "for new device state.\n");
1773 dev_warn(&udev->dev,
"Not enough bandwidth "
1774 "for new device state.\n");
1780 dev_warn(&udev->dev,
"ERROR: Endpoint drop flag = 0, "
1782 "and endpoint is not disabled.\n");
1786 dev_warn(&udev->dev,
"ERROR: Incompatible device for endpoint "
1787 "configure command.\n");
1791 dev_dbg(&udev->dev,
"Successful Endpoint Configure command\n");
1795 xhci_err(xhci,
"ERROR: unexpected command completion "
1796 "code 0x%x.\n", *cmd_status);
1803 static int xhci_evaluate_context_result(
struct xhci_hcd *xhci,
1804 struct usb_device *udev,
u32 *cmd_status)
1809 switch (*cmd_status) {
1811 dev_warn(&udev->dev,
"WARN: xHCI driver setup invalid evaluate "
1812 "context command.\n");
1816 dev_warn(&udev->dev,
"WARN: slot not enabled for"
1817 "evaluate context command.\n");
1821 dev_warn(&udev->dev,
"WARN: invalid context state for "
1822 "evaluate context command.\n");
1827 dev_warn(&udev->dev,
"ERROR: Incompatible device for evaluate "
1828 "context command.\n");
1833 dev_warn(&udev->dev,
"WARN: Max Exit Latency too large\n");
1837 dev_dbg(&udev->dev,
"Successful evaluate context command\n");
1841 xhci_err(xhci,
"ERROR: unexpected command completion "
1842 "code 0x%x.\n", *cmd_status);
1849 static u32 xhci_count_num_new_endpoints(
struct xhci_hcd *xhci,
1853 u32 valid_add_flags;
1854 u32 valid_drop_flags;
1861 valid_add_flags = ctrl_ctx->
add_flags >> 2;
1862 valid_drop_flags = ctrl_ctx->
drop_flags >> 2;
1869 hweight32(valid_add_flags & valid_drop_flags);
1872 static unsigned int xhci_count_num_dropped_endpoints(
struct xhci_hcd *xhci,
1876 u32 valid_add_flags;
1877 u32 valid_drop_flags;
1880 valid_add_flags = ctrl_ctx->
add_flags >> 2;
1881 valid_drop_flags = ctrl_ctx->
drop_flags >> 2;
1884 hweight32(valid_add_flags & valid_drop_flags);
1900 static int xhci_reserve_host_resources(
struct xhci_hcd *xhci,
1905 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1907 xhci_dbg(xhci,
"Not enough ep ctxs: "
1908 "%u active, need to add %u, limit is %u.\n",
1914 xhci_dbg(xhci,
"Adding %u ep ctxs, %u now active.\n", added_eps,
1925 static void xhci_free_host_resources(
struct xhci_hcd *xhci,
1930 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1932 xhci_dbg(xhci,
"Removing %u failed ep ctxs, %u now active.\n",
1943 static void xhci_finish_resource_reservation(
struct xhci_hcd *xhci,
1946 u32 num_dropped_eps;
1948 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1950 if (num_dropped_eps)
1951 xhci_dbg(xhci,
"Removing %u dropped ep ctxs, %u now active.\n",
1956 static unsigned int xhci_get_block_size(
struct usb_device *udev)
1958 switch (udev->speed) {
1988 static int xhci_check_tt_bw_table(
struct xhci_hcd *xhci,
2004 if (old_active_eps == 0 && tt_info->
active_eps != 0) {
2018 static int xhci_check_ss_bw(
struct xhci_hcd *xhci,
2021 unsigned int bw_reserved;
2075 static int xhci_check_bw_table(
struct xhci_hcd *xhci,
2079 unsigned int bw_reserved;
2080 unsigned int max_bandwidth;
2081 unsigned int bw_used;
2084 unsigned int packet_size = 0;
2085 unsigned int overhead = 0;
2086 unsigned int packets_transmitted = 0;
2087 unsigned int packets_remaining = 0;
2091 return xhci_check_ss_bw(xhci, virt_dev);
2106 block_size = xhci_get_block_size(virt_dev->
udev);
2112 xhci_dbg(xhci,
"Recalculating BW for rootport %u\n",
2114 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2115 xhci_warn(xhci,
"Not enough bandwidth on HS bus for "
2116 "newly activated TT.\n");
2119 xhci_dbg(xhci,
"Recalculating BW for TT slot %u port %u\n",
2123 xhci_dbg(xhci,
"Recalculating BW for rootport %u\n",
2132 xhci_get_largest_overhead(&bw_table->
interval_bw[0]);
2135 unsigned int bw_added;
2136 unsigned int largest_mps;
2137 unsigned int interval_overhead;
2144 packets_remaining = 2 * packets_remaining +
2150 if (list_empty(&bw_table->
interval_bw[i].endpoints))
2161 virt_ep->
bw_info.max_packet_size,
2164 if (largest_mps > packet_size)
2165 packet_size = largest_mps;
2168 interval_overhead = xhci_get_largest_overhead(
2170 if (interval_overhead > overhead)
2171 overhead = interval_overhead;
2176 packets_transmitted = packets_remaining >> (i + 1);
2179 bw_added = packets_transmitted * (overhead + packet_size);
2182 packets_remaining = packets_remaining % (1 << (i + 1));
2188 if (packets_remaining == 0) {
2191 }
else if (packets_transmitted > 0) {
2197 packet_size = largest_mps;
2198 overhead = interval_overhead;
2203 bw_used += bw_added;
2204 if (bw_used > max_bandwidth) {
2205 xhci_warn(xhci,
"Not enough bandwidth. "
2206 "Proposed: %u, Max: %u\n",
2207 bw_used, max_bandwidth);
2217 if (packets_remaining > 0)
2218 bw_used += overhead + packet_size;
2221 unsigned int port_index = virt_dev->
real_port - 1;
2228 xhci->
rh_bw[port_index].num_active_tts;
2231 xhci_dbg(xhci,
"Final bandwidth: %u, Limit: %u, Reserved: %u, "
2232 "Available: %u " "percent\n",
2233 bw_used, max_bandwidth, bw_reserved,
2234 (max_bandwidth - bw_used - bw_reserved) * 100 /
2237 bw_used += bw_reserved;
2238 if (bw_used > max_bandwidth) {
2239 xhci_warn(xhci,
"Not enough bandwidth. Proposed: %u, Max: %u\n",
2240 bw_used, max_bandwidth);
2248 static bool xhci_is_async_ep(
unsigned int ep_type)
2255 static bool xhci_is_sync_in_ep(
unsigned int ep_type)
2260 static unsigned int xhci_get_ss_bw_consumed(
struct xhci_bw_info *ep_bw)
2277 struct usb_device *udev,
2282 int normalized_interval;
2284 if (xhci_is_async_ep(ep_bw->
type))
2288 if (xhci_is_sync_in_ep(ep_bw->
type))
2289 xhci->
devs[udev->slot_id]->bw_table->ss_bw_in -=
2290 xhci_get_ss_bw_consumed(ep_bw);
2292 xhci->
devs[udev->slot_id]->bw_table->ss_bw_out -=
2293 xhci_get_ss_bw_consumed(ep_bw);
2310 if (normalized_interval == 0)
2312 interval_bw = &bw_table->
interval_bw[normalized_interval];
2314 switch (udev->speed) {
2337 static void xhci_add_ep_to_interval_table(
struct xhci_hcd *xhci,
2340 struct usb_device *udev,
2346 int normalized_interval;
2348 if (xhci_is_async_ep(ep_bw->
type))
2352 if (xhci_is_sync_in_ep(ep_bw->
type))
2353 xhci->
devs[udev->slot_id]->bw_table->ss_bw_in +=
2354 xhci_get_ss_bw_consumed(ep_bw);
2356 xhci->
devs[udev->slot_id]->bw_table->ss_bw_out +=
2357 xhci_get_ss_bw_consumed(ep_bw);
2369 if (normalized_interval == 0)
2371 interval_bw = &bw_table->
interval_bw[normalized_interval];
2373 switch (udev->speed) {
2398 smaller_ep->
bw_info.max_packet_size) {
2419 if (old_active_eps == 0 &&
2420 virt_dev->
tt_info->active_eps != 0) {
2423 }
else if (old_active_eps != 0 &&
2424 virt_dev->
tt_info->active_eps == 0) {
2430 static int xhci_reserve_bandwidth(
struct xhci_hcd *xhci,
2437 int old_active_eps = 0;
2440 old_active_eps = virt_dev->
tt_info->active_eps;
2444 for (i = 0; i < 31; i++) {
2449 memcpy(&ep_bw_info[i], &virt_dev->
eps[i].bw_info,
2450 sizeof(ep_bw_info[i]));
2456 &virt_dev->
eps[i].bw_info,
2464 for (i = 0; i < 31; i++) {
2467 xhci_add_ep_to_interval_table(xhci,
2468 &virt_dev->
eps[i].bw_info,
2475 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2484 for (i = 0; i < 31; i++) {
2493 &virt_dev->
eps[i].bw_info,
2500 memcpy(&virt_dev->
eps[i].bw_info, &ep_bw_info[i],
2501 sizeof(ep_bw_info[i]));
2504 xhci_add_ep_to_interval_table(xhci,
2505 &virt_dev->
eps[i].bw_info,
2518 static int xhci_configure_endpoint(
struct xhci_hcd *xhci,
2519 struct usb_device *udev,
2521 bool ctx_change,
bool must_succeed)
2525 unsigned long flags;
2533 virt_dev = xhci->
devs[udev->slot_id];
2536 in_ctx = command->
in_ctx;
2538 in_ctx = virt_dev->
in_ctx;
2541 xhci_reserve_host_resources(xhci, in_ctx)) {
2542 spin_unlock_irqrestore(&xhci->
lock, flags);
2543 xhci_warn(xhci,
"Not enough host resources, "
2544 "active endpoint contexts = %u\n",
2549 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2551 xhci_free_host_resources(xhci, in_ctx);
2552 spin_unlock_irqrestore(&xhci->
lock, flags);
2553 xhci_warn(xhci,
"Not enough bandwidth\n");
2559 cmd_status = &command->
status;
2567 xhci->
cmd_ring->enq_seg->next->trbs;
2574 init_completion(cmd_completion);
2579 udev->slot_id, must_succeed);
2582 udev->slot_id, must_succeed);
2587 xhci_free_host_resources(xhci, in_ctx);
2588 spin_unlock_irqrestore(&xhci->
lock, flags);
2589 xhci_dbg(xhci,
"FIXME allocate a new ring segment\n");
2593 spin_unlock_irqrestore(&xhci->
lock, flags);
2599 if (timeleft <= 0) {
2600 xhci_warn(xhci,
"%s while waiting for %s command\n",
2601 timeleft == 0 ?
"Timeout" :
"Signal",
2603 "configure endpoint" :
2604 "evaluate context");
2613 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2615 ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2623 xhci_free_host_resources(xhci, in_ctx);
2625 xhci_finish_resource_reservation(xhci, in_ctx);
2626 spin_unlock_irqrestore(&xhci->
lock, flags);
2650 ret = xhci_check_args(hcd, udev,
NULL, 0,
true, __func__);
2653 xhci = hcd_to_xhci(hcd);
2657 xhci_dbg(xhci,
"%s called for udev %p\n", __func__, udev);
2658 virt_dev = xhci->
devs[udev->slot_id];
2671 xhci_dbg(xhci,
"New Input Control Context:\n");
2676 ret = xhci_configure_endpoint(xhci, udev,
NULL,
2683 xhci_dbg(xhci,
"Output context after successful config ep cmd:\n");
2688 for (i = 1; i < 31; ++
i) {
2693 xhci_zero_in_ctx(xhci, virt_dev);
2698 for (i = 1; i < 31; ++
i) {
2699 if (!virt_dev->
eps[i].new_ring)
2704 if (virt_dev->
eps[i].ring) {
2707 virt_dev->
eps[
i].ring = virt_dev->
eps[
i].new_ring;
2720 ret = xhci_check_args(hcd, udev,
NULL, 0,
true, __func__);
2723 xhci = hcd_to_xhci(hcd);
2725 xhci_dbg(xhci,
"%s called for udev %p\n", __func__, udev);
2726 virt_dev = xhci->
devs[udev->slot_id];
2728 for (i = 0; i < 31; ++
i) {
2729 if (virt_dev->
eps[i].new_ring) {
2734 xhci_zero_in_ctx(xhci, virt_dev);
2737 static void xhci_setup_input_ctx_for_config_ep(
struct xhci_hcd *xhci,
2740 u32 add_flags,
u32 drop_flags)
2749 xhci_dbg(xhci,
"Input Context:\n");
2753 static void xhci_setup_input_ctx_for_quirk(
struct xhci_hcd *xhci,
2754 unsigned int slot_id,
unsigned int ep_index,
2763 xhci->
devs[slot_id]->out_ctx, ep_index);
2764 in_ctx = xhci->
devs[slot_id]->in_ctx;
2769 xhci_warn(xhci,
"WARN Cannot submit config ep after "
2770 "reset ep command\n");
2771 xhci_warn(xhci,
"WARN deq seg = %p, deq ptr = %p\n",
2779 xhci_setup_input_ctx_for_config_ep(xhci, xhci->
devs[slot_id]->in_ctx,
2780 xhci->
devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2784 struct usb_device *udev,
unsigned int ep_index)
2789 xhci_dbg(xhci,
"Cleaning up stalled endpoint ring\n");
2802 xhci_dbg(xhci,
"Queueing new dequeue state\n");
2811 xhci_dbg(xhci,
"Setting up input context for "
2812 "configure endpoint command\n");
2813 xhci_setup_input_ctx_for_quirk(xhci, udev->
slot_id,
2814 ep_index, &deq_state);
2825 struct usb_host_endpoint *ep)
2828 struct usb_device *
udev;
2830 unsigned long flags;
2834 xhci = hcd_to_xhci(hcd);
2835 udev = (
struct usb_device *) ep->hcpriv;
2844 xhci_dbg(xhci,
"Endpoint 0x%x not halted, refusing to reset.\n",
2845 ep->desc.bEndpointAddress);
2848 if (usb_endpoint_xfer_control(&ep->desc)) {
2849 xhci_dbg(xhci,
"Control endpoint stall already handled.\n");
2853 xhci_dbg(xhci,
"Queueing reset endpoint command\n");
2869 spin_unlock_irqrestore(&xhci->
lock, flags);
2872 xhci_warn(xhci,
"FIXME allocate a new ring segment\n");
2875 static int xhci_check_streams_endpoint(
struct xhci_hcd *xhci,
2876 struct usb_device *udev,
struct usb_host_endpoint *ep,
2877 unsigned int slot_id)
2885 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1,
true, __func__);
2888 if (ep->ss_ep_comp.bmAttributes == 0) {
2889 xhci_warn(xhci,
"WARN: SuperSpeed Endpoint Companion"
2890 " descriptor for ep 0x%x does not support streams\n",
2891 ep->desc.bEndpointAddress);
2896 ep_state = xhci->
devs[slot_id]->eps[
ep_index].ep_state;
2899 xhci_warn(xhci,
"WARN: SuperSpeed bulk endpoint 0x%x "
2900 "already has streams set up.\n",
2901 ep->desc.bEndpointAddress);
2902 xhci_warn(xhci,
"Send email to xHCI maintainer and ask for "
2903 "dynamic stream context array reallocation.\n");
2906 if (!list_empty(&xhci->
devs[slot_id]->eps[ep_index].ring->td_list)) {
2907 xhci_warn(xhci,
"Cannot setup streams for SuperSpeed bulk "
2908 "endpoint 0x%x; URBs are pending.\n",
2909 ep->desc.bEndpointAddress);
2915 static void xhci_calculate_streams_entries(
struct xhci_hcd *xhci,
2916 unsigned int *num_streams,
unsigned int *num_stream_ctxs)
2918 unsigned int max_streams;
2929 if (*num_stream_ctxs > max_streams) {
2930 xhci_dbg(xhci,
"xHCI HW only supports %u stream ctx entries.\n",
2932 *num_stream_ctxs = max_streams;
2933 *num_streams = max_streams;
2941 static int xhci_calculate_streams_and_bitmask(
struct xhci_hcd *xhci,
2942 struct usb_device *udev,
2943 struct usb_host_endpoint **eps,
unsigned int num_eps,
2944 unsigned int *num_streams,
u32 *changed_ep_bitmask)
2946 unsigned int max_streams;
2947 unsigned int endpoint_flag;
2951 for (i = 0; i < num_eps; i++) {
2952 ret = xhci_check_streams_endpoint(xhci, udev,
2957 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2958 if (max_streams < (*num_streams - 1)) {
2959 xhci_dbg(xhci,
"Ep 0x%x only supports %u stream IDs.\n",
2960 eps[i]->
desc.bEndpointAddress,
2962 *num_streams = max_streams+1;
2966 if (*changed_ep_bitmask & endpoint_flag)
2968 *changed_ep_bitmask |= endpoint_flag;
2973 static u32 xhci_calculate_no_streams_bitmask(
struct xhci_hcd *xhci,
2974 struct usb_device *udev,
2975 struct usb_host_endpoint **eps,
unsigned int num_eps)
2977 u32 changed_ep_bitmask = 0;
2978 unsigned int slot_id;
2983 slot_id = udev->slot_id;
2984 if (!xhci->
devs[slot_id])
2987 for (i = 0; i < num_eps; i++) {
2989 ep_state = xhci->
devs[slot_id]->eps[
ep_index].ep_state;
2992 xhci_warn(xhci,
"WARN Can't disable streams for "
2994 "streams are being disabled already.",
2995 eps[i]->
desc.bEndpointAddress);
3000 !(ep_state & EP_GETTING_STREAMS)) {
3001 xhci_warn(xhci,
"WARN Can't disable streams for "
3003 "streams are already disabled!",
3004 eps[i]->
desc.bEndpointAddress);
3005 xhci_warn(xhci,
"WARN xhci_free_streams() called "
3006 "with non-streams endpoint\n");
3011 return changed_ep_bitmask;
3031 struct usb_host_endpoint **eps,
unsigned int num_eps,
3032 unsigned int num_streams,
gfp_t mem_flags)
3039 unsigned int num_stream_ctxs;
3040 unsigned long flags;
3041 u32 changed_ep_bitmask = 0;
3050 xhci = hcd_to_xhci(hcd);
3051 xhci_dbg(xhci,
"Driver wants %u stream IDs (including stream 0).\n",
3056 xhci_dbg(xhci,
"Could not allocate xHCI command structure.\n");
3065 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3066 num_eps, &num_streams, &changed_ep_bitmask);
3069 spin_unlock_irqrestore(&xhci->
lock, flags);
3072 if (num_streams <= 1) {
3073 xhci_warn(xhci,
"WARN: endpoints can't handle "
3074 "more than one stream.\n");
3076 spin_unlock_irqrestore(&xhci->
lock, flags);
3079 vdev = xhci->
devs[udev->slot_id];
3083 for (i = 0; i < num_eps; i++) {
3087 spin_unlock_irqrestore(&xhci->
lock, flags);
3093 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3094 xhci_dbg(xhci,
"Need %u stream ctx entries for %u stream IDs.\n",
3095 num_stream_ctxs, num_streams);
3097 for (i = 0; i < num_eps; i++) {
3101 num_streams, mem_flags);
3102 if (!vdev->
eps[ep_index].stream_info)
3110 for (i = 0; i < num_eps; i++) {
3119 vdev->
eps[ep_index].stream_info);
3124 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->
in_ctx,
3125 vdev->
out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3128 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3139 for (i = 0; i < num_eps; i++) {
3141 vdev->
eps[
ep_index].ep_state &= ~EP_GETTING_STREAMS;
3142 xhci_dbg(xhci,
"Slot %u ep ctx %u now has streams.\n",
3147 spin_unlock_irqrestore(&xhci->
lock, flags);
3150 return num_streams - 1;
3154 for (i = 0; i < num_eps; i++) {
3161 vdev->
eps[
ep_index].ep_state &= ~EP_GETTING_STREAMS;
3176 struct usb_host_endpoint **eps,
unsigned int num_eps,
3184 unsigned long flags;
3185 u32 changed_ep_bitmask;
3187 xhci = hcd_to_xhci(hcd);
3188 vdev = xhci->
devs[udev->slot_id];
3192 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3193 udev, eps, num_eps);
3194 if (changed_ep_bitmask == 0) {
3195 spin_unlock_irqrestore(&xhci->
lock, flags);
3204 command = vdev->
eps[
ep_index].stream_info->free_streams_command;
3205 for (i = 0; i < num_eps; i++) {
3216 &vdev->
eps[ep_index]);
3218 xhci_setup_input_ctx_for_config_ep(xhci, command->
in_ctx,
3219 vdev->
out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3220 spin_unlock_irqrestore(&xhci->
lock, flags);
3225 ret = xhci_configure_endpoint(xhci, udev, command,
3235 for (i = 0; i < num_eps; i++) {
3242 vdev->
eps[
ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3245 spin_unlock_irqrestore(&xhci->
lock, flags);
3261 unsigned int num_dropped_eps = 0;
3262 unsigned int drop_flags = 0;
3264 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3265 if (virt_dev->
eps[i].ring) {
3266 drop_flags |= 1 <<
i;
3271 if (num_dropped_eps)
3272 xhci_dbg(xhci,
"Dropped %u ep ctxs, flags = 0x%x, "
3274 num_dropped_eps, drop_flags,
3299 unsigned long flags;
3305 int last_freed_endpoint;
3307 int old_active_eps = 0;
3309 ret = xhci_check_args(hcd, udev,
NULL, 0,
false, __func__);
3312 xhci = hcd_to_xhci(hcd);
3313 slot_id = udev->slot_id;
3314 virt_dev = xhci->
devs[slot_id];
3316 xhci_dbg(xhci,
"The device to be reset with slot ID %u does "
3317 "not exist. Re-allocate the device\n", slot_id);
3325 if (virt_dev->
udev != udev) {
3330 xhci_dbg(xhci,
"The device to be reset with slot ID %u does "
3331 "not match the udev. Re-allocate the device\n",
3346 xhci_dbg(xhci,
"Resetting device with slot ID %u\n", slot_id);
3354 if (!reset_device_cmd) {
3355 xhci_dbg(xhci,
"Couldn't allocate command structure.\n");
3368 xhci->
cmd_ring->enq_seg->next->trbs;
3373 xhci_dbg(xhci,
"FIXME: allocate a command ring segment\n");
3375 spin_unlock_irqrestore(&xhci->
lock, flags);
3376 goto command_cleanup;
3379 spin_unlock_irqrestore(&xhci->
lock, flags);
3384 USB_CTRL_SET_TIMEOUT);
3385 if (timeleft <= 0) {
3386 xhci_warn(xhci,
"%s while waiting for reset device command\n",
3387 timeleft == 0 ?
"Timeout" :
"Signal");
3394 spin_unlock_irqrestore(&xhci->
lock, flags);
3396 goto command_cleanup;
3403 ret = reset_device_cmd->
status;
3407 xhci_info(xhci,
"Can't reset device (slot ID %u) in %s state\n",
3410 xhci_info(xhci,
"Not freeing device rings.\n");
3413 goto command_cleanup;
3415 xhci_dbg(xhci,
"Successful reset device command.\n");
3420 xhci_warn(xhci,
"Unknown completion code %u for "
3421 "reset device command.\n", ret);
3423 goto command_cleanup;
3431 spin_unlock_irqrestore(&xhci->
lock, flags);
3435 last_freed_endpoint = 1;
3436 for (i = 1; i < 31; ++
i) {
3447 last_freed_endpoint =
i;
3449 if (!list_empty(&virt_dev->
eps[i].bw_endpoint_list))
3451 &virt_dev->
eps[i].bw_info,
3461 xhci_dbg(xhci,
"Output context after successful reset device cmd:\n");
3477 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3479 unsigned long flags;
3483 ret = xhci_check_args(hcd, udev,
NULL, 0,
true, __func__);
3487 if (ret <= 0 && ret != -
ENODEV)
3490 virt_dev = xhci->
devs[udev->slot_id];
3493 for (i = 0; i < 31; ++
i) {
3498 if (udev->usb2_hw_lpm_enabled) {
3500 udev->usb2_hw_lpm_enabled = 0;
3505 state = xhci_readl(xhci, &xhci->
op_regs->status);
3509 spin_unlock_irqrestore(&xhci->
lock, flags);
3514 spin_unlock_irqrestore(&xhci->
lock, flags);
3515 xhci_dbg(xhci,
"FIXME: allocate a command ring segment\n");
3519 spin_unlock_irqrestore(&xhci->
lock, flags);
3532 static int xhci_reserve_host_control_ep_resources(
struct xhci_hcd *xhci)
3535 xhci_dbg(xhci,
"Not enough ep ctxs: "
3536 "%u active, need to add 1, limit is %u.\n",
3541 xhci_dbg(xhci,
"Adding 1 ep ctx, %u now active.\n",
3553 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3554 unsigned long flags;
3563 spin_unlock_irqrestore(&xhci->
lock, flags);
3564 xhci_dbg(xhci,
"FIXME: allocate a command ring segment\n");
3568 spin_unlock_irqrestore(&xhci->
lock, flags);
3573 if (timeleft <= 0) {
3574 xhci_warn(xhci,
"%s while waiting for a slot\n",
3575 timeleft == 0 ?
"Timeout" :
"Signal");
3581 xhci_err(xhci,
"Error while assigning device slot ID\n");
3587 ret = xhci_reserve_host_control_ep_resources(xhci);
3589 spin_unlock_irqrestore(&xhci->
lock, flags);
3590 xhci_warn(xhci,
"Not enough host resources, "
3591 "active endpoint contexts = %u\n",
3595 spin_unlock_irqrestore(&xhci->
lock, flags);
3602 xhci_warn(xhci,
"Could not allocate xHCI USB device data structures\n");
3605 udev->slot_id = xhci->
slot_id;
3615 spin_unlock_irqrestore(&xhci->
lock, flags);
3630 unsigned long flags;
3634 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3640 if (!udev->slot_id) {
3645 virt_dev = xhci->
devs[udev->slot_id];
3653 xhci_warn(xhci,
"Virt dev invalid for slot_id 0x%x!\n",
3681 spin_unlock_irqrestore(&xhci->
lock, flags);
3682 xhci_dbg(xhci,
"FIXME: allocate a command ring segment\n");
3686 spin_unlock_irqrestore(&xhci->
lock, flags);
3695 if (timeleft <= 0) {
3696 xhci_warn(xhci,
"%s while waiting for address device command\n",
3697 timeleft == 0 ?
"Timeout" :
"Signal");
3708 xhci_err(xhci,
"Setup ERROR: address device command for slot %d.\n",
3713 dev_warn(&udev->dev,
"Device not responding to set address.\n");
3717 dev_warn(&udev->dev,
"ERROR: Incompatible device for address "
3718 "device command.\n");
3722 xhci_dbg(xhci,
"Successful Address Device command\n");
3725 xhci_err(xhci,
"ERROR: unexpected command completion "
3735 temp_64 = xhci_read_64(xhci, &xhci->
op_regs->dcbaa_ptr);
3736 xhci_dbg(xhci,
"Op regs DCBAA ptr = %#016llx\n", temp_64);
3737 xhci_dbg(xhci,
"Slot ID %d dcbaa entry @%p = %#016llx\n",
3739 &xhci->
dcbaa->dev_context_ptrs[udev->slot_id],
3740 (
unsigned long long)
3742 xhci_dbg(xhci,
"Output Context DMA address = %#08llx\n",
3743 (
unsigned long long)virt_dev->
out_ctx->dma);
3766 #ifdef CONFIG_USB_SUSPEND
3769 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3770 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3773 static int xhci_calculate_hird_besl(
struct xhci_hcd *xhci,
3774 struct usb_device *udev)
3776 int u2del, besl, besl_host;
3777 int besl_device = 0;
3781 field =
le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3784 for (besl_host = 0; besl_host < 16; besl_host++) {
3785 if (xhci_besl_encoding[besl_host] >= u2del)
3797 besl_host = (u2del - 51) / 75 + 1;
3800 besl = besl_host + besl_device;
3807 static int xhci_usb2_software_lpm_test(
struct usb_hcd *hcd,
3808 struct usb_device *udev)
3810 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3816 unsigned long flags;
3825 if (!udev->parent || udev->parent->parent ||
3832 dev_id =
le16_to_cpu(udev->descriptor.idVendor) << 16 |
3835 if (dev_info->
dev_id == dev_id) {
3842 port_num = udev->portnum - 1;
3845 xhci_dbg(xhci,
"invalid port number %d\n", udev->portnum);
3856 xhci_dbg(xhci,
"test port %d software LPM\n", port_num);
3862 pm_addr = port_array[
port_num] + 1;
3863 hird = xhci_calculate_hird_besl(xhci, udev);
3865 xhci_writel(xhci, temp, pm_addr);
3872 spin_unlock_irqrestore(&xhci->
lock, flags);
3880 temp = xhci_readl(xhci, addr);
3881 xhci_dbg(xhci,
"port %d entered L1 state, port status 0x%x\n",
3885 temp = xhci_readl(xhci, pm_addr);
3886 xhci_dbg(xhci,
"port %d software lpm failed, L1 status %d\n",
3894 spin_unlock_irqrestore(&xhci->
lock, flags);
3903 temp = xhci_readl(xhci, addr);
3904 xhci_dbg(xhci,
"resumed port %d status 0x%x\n", port_num, temp);
3906 (temp & PORT_PLS_MASK) !=
XDEV_U0) {
3907 xhci_dbg(xhci,
"port L1 resume fail\n");
3914 xhci_warn(xhci,
"device LPM test failed, may disconnect and "
3916 dev_info = kzalloc(
sizeof(
struct dev_info),
GFP_ATOMIC);
3922 INIT_LIST_HEAD(&dev_info->
list);
3929 spin_unlock_irqrestore(&xhci->
lock, flags);
3934 struct usb_device *udev,
int enable)
3936 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3941 unsigned long flags;
3948 if (!udev->parent || udev->parent->parent ||
3952 if (udev->usb2_hw_lpm_capable != 1)
3958 port_num = udev->portnum - 1;
3959 pm_addr = port_array[
port_num] + 1;
3960 temp = xhci_readl(xhci, pm_addr);
3962 xhci_dbg(xhci,
"%s port %d USB2 hardware LPM\n",
3963 enable ?
"enable" :
"disable", port_num);
3965 hird = xhci_calculate_hird_besl(xhci, udev);
3970 xhci_writel(xhci, temp, pm_addr);
3971 temp = xhci_readl(xhci, pm_addr);
3973 xhci_writel(xhci, temp, pm_addr);
3976 xhci_writel(xhci, temp, pm_addr);
3979 spin_unlock_irqrestore(&xhci->
lock, flags);
3985 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3988 ret = xhci_usb2_software_lpm_test(hcd, udev);
3990 xhci_dbg(xhci,
"software LPM test succeed\n");
3992 udev->usb2_hw_lpm_capable = 1;
3995 udev->usb2_hw_lpm_enabled = 1;
4005 struct usb_device *udev,
int enable)
4021 static unsigned long long xhci_service_interval_to_ns(
4024 return (1ULL << (desc->
bInterval - 1)) * 125 * 1000;
4027 static u16 xhci_get_timeout_no_hub_lpm(
struct usb_device *udev,
4030 unsigned long long sel;
4031 unsigned long long pel;
4032 unsigned int max_sel_pel;
4050 dev_warn(&udev->dev,
"%s: Can't get timeout for non-U1 or U2 state.\n",
4055 if (sel <= max_sel_pel && pel <= max_sel_pel)
4058 if (sel > max_sel_pel)
4059 dev_dbg(&udev->dev,
"Device-initiated %s disabled "
4060 "due to long SEL %llu ms\n",
4063 dev_dbg(&udev->dev,
"Device-initiated %s disabled "
4064 "due to long PEL %llu\n ms",
4078 static u16 xhci_calculate_intel_u1_timeout(
struct usb_device *udev,
4081 unsigned long long timeout_ns;
4085 ep_type = usb_endpoint_type(desc);
4088 timeout_ns = udev->u1_params.sel * 3;
4091 timeout_ns = udev->u1_params.sel * 5;
4094 intr_type = usb_endpoint_interrupt_type(desc);
4096 timeout_ns = udev->u1_params.sel * 3;
4101 timeout_ns = xhci_service_interval_to_ns(desc);
4103 if (timeout_ns < udev->u1_params.sel * 2)
4104 timeout_ns = udev->u1_params.sel * 2;
4121 dev_dbg(&udev->dev,
"Hub-initiated U1 disabled "
4122 "due to long timeout %llu ms\n", timeout_ns);
4123 return xhci_get_timeout_no_hub_lpm(udev,
USB3_LPM_U1);
4133 static u16 xhci_calculate_intel_u2_timeout(
struct usb_device *udev,
4136 unsigned long long timeout_ns;
4137 unsigned long long u2_del_ns;
4139 timeout_ns = 10 * 1000 * 1000;
4141 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4142 (xhci_service_interval_to_ns(desc) > timeout_ns))
4143 timeout_ns = xhci_service_interval_to_ns(desc);
4145 u2_del_ns =
le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4146 if (u2_del_ns > timeout_ns)
4147 timeout_ns = u2_del_ns;
4156 dev_dbg(&udev->dev,
"Hub-initiated U2 disabled "
4157 "due to long timeout %llu ms\n", timeout_ns);
4158 return xhci_get_timeout_no_hub_lpm(udev,
USB3_LPM_U2);
4161 static u16 xhci_call_host_update_timeout_for_endpoint(
struct xhci_hcd *xhci,
4162 struct usb_device *udev,
4169 return xhci_calculate_intel_u1_timeout(udev, desc);
4172 return xhci_calculate_intel_u2_timeout(udev, desc);
4178 static int xhci_update_timeout_for_endpoint(
struct xhci_hcd *xhci,
4179 struct usb_device *udev,
4186 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4187 desc, state, timeout);
4195 *timeout = alt_timeout;
4198 if (alt_timeout > *timeout)
4199 *timeout = alt_timeout;
4203 static int xhci_update_timeout_for_interface(
struct xhci_hcd *xhci,
4204 struct usb_device *udev,
4205 struct usb_host_interface *alt,
4211 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4212 if (xhci_update_timeout_for_endpoint(xhci, udev,
4213 &alt->endpoint[j].desc, state, timeout))
4220 static int xhci_check_intel_tier_policy(
struct usb_device *udev,
4223 struct usb_device *
parent;
4224 unsigned int num_hubs;
4230 for (parent = udev->parent, num_hubs = 0; parent->parent;
4231 parent = parent->parent)
4237 dev_dbg(&udev->dev,
"Disabling U1 link state for device"
4238 " below second-tier hub.\n");
4239 dev_dbg(&udev->dev,
"Plug device into first-tier hub "
4240 "to decrease power consumption.\n");
4244 static int xhci_check_tier_policy(
struct xhci_hcd *xhci,
4245 struct usb_device *udev,
4249 return xhci_check_intel_tier_policy(udev, state);
4258 static u16 xhci_calculate_lpm_timeout(
struct usb_hcd *hcd,
4261 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4262 struct usb_host_config *
config;
4272 dev_warn(&udev->dev,
"Can't enable unknown link state %i\n",
4277 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4283 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4287 config = udev->actconfig;
4291 for (i = 0; i < USB_MAXINTERFACES; i++) {
4292 struct usb_driver *
driver;
4301 if (intf->dev.driver) {
4302 driver = to_usb_driver(intf->dev.driver);
4303 if (driver && driver->disable_hub_initiated_lpm) {
4304 dev_dbg(&udev->dev,
"Hub-initiated %s disabled "
4305 "at request of driver %s\n",
4306 state_name, driver->name);
4307 return xhci_get_timeout_no_hub_lpm(udev, state);
4312 if (!intf->cur_altsetting)
4315 if (xhci_update_timeout_for_interface(xhci, udev,
4316 intf->cur_altsetting,
4327 static int xhci_change_max_exit_latency(
struct xhci_hcd *xhci,
4328 struct usb_device *udev,
u16 max_exit_latency)
4334 unsigned long flags;
4338 if (max_exit_latency == xhci->
devs[udev->slot_id]->current_mel) {
4339 spin_unlock_irqrestore(&xhci->
lock, flags);
4344 virt_dev = xhci->
devs[udev->slot_id];
4347 spin_unlock_irqrestore(&xhci->
lock, flags);
4355 xhci_dbg(xhci,
"Set up evaluate context for LPM MEL change.\n");
4360 ret = xhci_configure_endpoint(xhci, udev, command,
4368 spin_unlock_irqrestore(&xhci->
lock, flags);
4373 static int calculate_max_exit_latency(
struct usb_device *udev,
4375 u16 hub_encoded_timeout)
4377 unsigned long long u1_mel_us = 0;
4378 unsigned long long u2_mel_us = 0;
4379 unsigned long long mel_us = 0;
4405 if (u1_mel_us > u2_mel_us)
4411 dev_warn(&udev->dev,
"Link PM max exit latency of %lluus "
4412 "is too big.\n", mel_us);
4423 u16 hub_encoded_timeout;
4427 xhci = hcd_to_xhci(hcd);
4433 !xhci->
devs[udev->slot_id])
4436 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4437 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4444 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4447 return hub_encoded_timeout;
4457 xhci = hcd_to_xhci(hcd);
4459 !xhci->
devs[udev->slot_id])
4463 ret = xhci_change_max_exit_latency(xhci, udev, mel);
4489 struct usb_tt *
tt,
gfp_t mem_flags)
4491 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4496 unsigned long flags;
4497 unsigned think_time;
4504 vdev = xhci->
devs[hdev->slot_id];
4506 xhci_warn(xhci,
"Cannot update hub desc for unknown device.\n");
4511 xhci_dbg(xhci,
"Could not allocate xHCI command structure.\n");
4518 xhci_dbg(xhci,
"Could not allocate xHCI TT structure.\n");
4520 spin_unlock_irqrestore(&xhci->
lock, flags);
4532 xhci_dbg(xhci,
"xHCI version %x needs hub "
4533 "TT think time and number of ports\n",
4543 think_time = tt->think_time;
4544 if (think_time != 0)
4545 think_time = (think_time / 666) - 1;
4550 xhci_dbg(xhci,
"xHCI version %x doesn't need hub "
4551 "TT think time or number of ports\n",
4555 spin_unlock_irqrestore(&xhci->
lock, flags);
4557 xhci_dbg(xhci,
"Set up %s for hub device.\n",
4559 "configure endpoint" :
"evaluate context");
4567 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4570 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4582 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4584 return xhci_readl(xhci, &xhci->
run_regs->microframe_index) >> 3;
4590 struct device *
dev = hcd->self.controller;
4595 hcd->self.sg_tablesize = ~0;
4597 hcd->self.no_stop_on_short = 1;
4603 *((
struct xhci_hcd **) hcd->hcd_priv) = xhci;
4608 hcd->speed = HCD_USB2;
4620 xhci = hcd_to_xhci(hcd);
4621 temp = xhci_readl(xhci, &xhci->
cap_regs->hcc_params);
4623 xhci_dbg(xhci,
"Enabling 64-bit DMA addresses.\n");
4645 get_quirks(dev, xhci);
4657 xhci_dbg(xhci,
"Reset complete\n");
4659 temp = xhci_readl(xhci, &xhci->
cap_regs->hcc_params);
4661 xhci_dbg(xhci,
"Enabling 64-bit DMA addresses.\n");
4667 xhci_dbg(xhci,
"Calling HCD init\n");
4672 xhci_dbg(xhci,
"Called HCD init\n");
4683 static int __init xhci_hcd_init(
void)
4721 static void __exit xhci_hcd_cleanup(
void)