68 #include <linux/slab.h>
71 static int handle_cmd_in_cmd_wait_list(
struct xhci_hcd *xhci,
82 unsigned long segment_offset;
84 if (!seg || !trb || trb < seg->trbs)
87 segment_offset = trb - seg->
trbs;
90 return seg->
dma + (segment_offset *
sizeof(*trb));
119 static int enqueue_is_link_trb(
struct xhci_ring *ring)
129 static void next_trb(
struct xhci_hcd *xhci,
134 if (last_trb(xhci, ring, *seg, *trb)) {
136 *trb = ((*seg)->trbs);
148 unsigned long long addr;
168 last_trb_on_last_seg(xhci, ring,
200 bool more_trbs_coming)
204 unsigned long long addr;
217 while (last_trb(xhci, ring, ring->
enq_seg, next)) {
227 if (!chain && !more_trbs_coming)
237 && !xhci_link_trb_quirk(xhci)) {
238 next->
link.control &=
240 next->
link.control |=
248 if (last_trb_on_last_seg(xhci, ring, ring->
enq_seg, next)) {
264 unsigned int num_trbs)
266 int num_trbs_in_deq_seg;
289 xhci_readl(xhci, &xhci->
dba->doorbell[0]);
292 static int xhci_abort_cmd_ring(
struct xhci_hcd *xhci)
297 xhci_dbg(xhci,
"Abort command ring\n");
300 xhci_dbg(xhci,
"The command ring isn't running, "
301 "Have the command ring been stopped?\n");
305 temp_64 = xhci_read_64(xhci, &xhci->
op_regs->cmd_ring);
307 xhci_dbg(xhci,
"Command ring had been stopped\n");
322 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
324 xhci_err(xhci,
"Stopped the command ring failed, "
325 "maybe the host is dead\n");
335 static int xhci_queue_cd(
struct xhci_hcd *xhci,
370 xhci_warn(xhci,
"Abort the command ring,"
371 " but the xHCI is dead.\n");
377 retval = xhci_queue_cd(xhci, command, cmd_trb);
379 xhci_warn(xhci,
"Queuing command descriptor failed.\n");
384 retval = xhci_abort_cmd_ring(xhci);
386 xhci_err(xhci,
"Abort command ring failed\n");
388 spin_unlock_irqrestore(&xhci->
lock, flags);
390 xhci_dbg(xhci,
"xHCI host controller is dead.\n");
396 spin_unlock_irqrestore(&xhci->
lock, flags);
401 unsigned int slot_id,
403 unsigned int stream_id)
419 xhci_writel(xhci,
DB_VALUE(ep_index, stream_id), db_addr);
426 static void ring_doorbell_for_active_rings(
struct xhci_hcd *xhci,
427 unsigned int slot_id,
430 unsigned int stream_id;
437 if (!(list_empty(&ep->
ring->td_list)))
442 for (stream_id = 1; stream_id < ep->
stream_info->num_streams;
445 if (!list_empty(&stream_info->
stream_rings[stream_id]->td_list))
458 union xhci_trb *trb,
int *cycle_state)
463 while (cur_seg->
trbs > trb ||
468 cur_seg = cur_seg->
next;
469 if (cur_seg == start_seg)
478 unsigned int slot_id,
unsigned int ep_index,
479 unsigned int stream_id)
488 if (stream_id == 0) {
490 "WARN: Slot ID %u, ep index %u has streams, "
491 "but URB has no stream ID.\n",
500 "WARN: Slot ID %u, ep index %u has "
501 "stream IDs 1 to %u allocated, "
502 "but stream ID %u is requested.\n",
516 return xhci_triad_to_transfer_ring(xhci, urb->dev->
slot_id,
539 unsigned int slot_id,
unsigned int ep_index,
540 unsigned int stream_id,
struct xhci_td *cur_td,
549 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
550 ep_index, stream_id);
552 xhci_warn(xhci,
"WARN can't find new dequeue state "
553 "for invalid stream ID %u.\n",
558 xhci_dbg(xhci,
"Finding segment containing stopped TRB.\n");
560 dev->
eps[ep_index].stopped_trb,
568 xhci_dbg(xhci,
"Finding endpoint context\n");
573 xhci_dbg(xhci,
"Finding segment containing last TRB in TD.\n");
603 xhci_dbg(xhci,
"New dequeue segment = %p (virtual)\n",
606 xhci_dbg(xhci,
"New dequeue pointer = 0x%llx (DMA)\n",
607 (
unsigned long long) addr);
615 struct xhci_td *cur_td,
bool flip_cycle)
622 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
634 xhci_dbg(xhci,
"Cancel (unchain) link TRB\n");
635 xhci_dbg(xhci,
"Address = %p (0x%llx dma); "
636 "in seg %p (0x%llx dma)\n",
640 (
unsigned long long)cur_seg->
dma);
648 if (flip_cycle && cur_trb != cur_td->
first_trb &&
654 xhci_dbg(xhci,
"TRB to noop at offset 0x%llx\n",
663 static int queue_set_tr_deq(
struct xhci_hcd *xhci,
int slot_id,
664 unsigned int ep_index,
unsigned int stream_id,
669 unsigned int slot_id,
unsigned int ep_index,
670 unsigned int stream_id,
675 xhci_dbg(xhci,
"Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
676 "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
682 queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
694 static void xhci_stop_watchdog_timer_in_irq(
struct xhci_hcd *xhci,
707 static void xhci_giveback_urb_in_irq(
struct xhci_hcd *xhci,
715 urb_priv = urb->hcpriv;
717 hcd = bus_to_hcd(urb->dev->bus);
721 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
722 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
723 if (xhci_to_hcd(xhci)->
self.bandwidth_isoc_reqs == 0) {
730 spin_unlock(&xhci->
lock);
733 spin_lock(&xhci->
lock);
747 static void handle_stopped_endpoint(
struct xhci_hcd *xhci,
750 unsigned int slot_id;
757 struct xhci_td *last_unlinked_td;
765 virt_dev = xhci->
devs[slot_id];
767 handle_cmd_in_cmd_wait_list(xhci, virt_dev,
771 "completion for disabled slot %u\n",
776 memset(&deq_state, 0,
sizeof(deq_state));
782 xhci_stop_watchdog_timer_in_irq(xhci, ep);
785 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
796 xhci_dbg(xhci,
"Removing canceled TD starting at 0x%llx (dma).\n",
799 ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->
urb);
813 "has invalid stream ID %u.\n",
815 cur_td->
urb->stream_id);
816 goto remove_finished_td;
824 cur_td->
urb->stream_id,
827 td_to_noop(xhci, ep_ring, cur_td,
false);
834 list_del_init(&cur_td->
td_list);
836 last_unlinked_td = cur_td;
837 xhci_stop_watchdog_timer_in_irq(xhci, ep);
840 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
848 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
861 struct xhci_td, cancelled_td_list);
868 xhci_giveback_urb_in_irq(xhci, cur_td, 0,
"cancelled");
875 }
while (cur_td != last_unlinked_td);
916 xhci_dbg(xhci,
"Stop EP timer ran, but another timer marked "
917 "xHCI as DYING, exiting.\n");
918 spin_unlock_irqrestore(&xhci->
lock, flags);
922 xhci_dbg(xhci,
"Stop EP timer ran, but no command pending, "
924 spin_unlock_irqrestore(&xhci->
lock, flags);
928 xhci_warn(xhci,
"xHCI host not responding to stop endpoint command.\n");
929 xhci_warn(xhci,
"Assuming host is dying, halting host.\n");
936 spin_unlock_irqrestore(&xhci->
lock, flags);
949 xhci_warn(xhci,
"Non-responsive xHCI host is not halting.\n");
950 xhci_warn(xhci,
"Completing active URBs anyway.\n");
960 for (j = 0; j < 31; j++) {
961 temp_ep = &xhci->
devs[
i]->eps[
j];
962 ring = temp_ep->
ring;
965 xhci_dbg(xhci,
"Killing URBs for slot ID %u, "
966 "ep index %u\n", i, j);
967 while (!list_empty(&ring->
td_list)) {
971 list_del_init(&cur_td->
td_list);
974 xhci_giveback_urb_in_irq(xhci, cur_td,
983 xhci_giveback_urb_in_irq(xhci, cur_td,
988 spin_unlock_irqrestore(&xhci->
lock, flags);
989 xhci_dbg(xhci,
"Calling usb_hc_died()\n");
991 xhci_dbg(xhci,
"xHCI host controller is dead.\n");
995 static void update_ring_for_set_deq_completion(
struct xhci_hcd *xhci,
998 unsigned int ep_index)
1001 int num_trbs_free_temp;
1002 bool revert =
false;
1005 dequeue_temp = ep_ring->
dequeue;
1013 if (last_trb(xhci, ep_ring, ep_ring->
deq_seg, ep_ring->
dequeue)) {
1018 while (ep_ring->
dequeue != dev->
eps[ep_index].queued_deq_ptr) {
1022 if (last_trb(xhci, ep_ring, ep_ring->
deq_seg,
1025 dev->
eps[ep_index].queued_deq_ptr)
1030 if (ep_ring->
dequeue == dequeue_temp) {
1037 xhci_dbg(xhci,
"Unable to find new dequeue pointer\n");
1049 static void handle_set_deq_completion(
struct xhci_hcd *xhci,
1053 unsigned int slot_id;
1055 unsigned int stream_id;
1064 dev = xhci->
devs[slot_id];
1068 xhci_warn(xhci,
"WARN Set TR deq ptr command for "
1069 "freed stream ID %u\n",
1081 unsigned int slot_state;
1085 xhci_warn(xhci,
"WARN Set TR Deq Ptr cmd invalid because "
1086 "of stream ID configuration\n");
1089 xhci_warn(xhci,
"WARN Set TR Deq Ptr cmd failed due "
1090 "to incorrect slot or ep state.\n");
1095 xhci_dbg(xhci,
"Slot state = %u, EP state = %u\n",
1096 slot_state, ep_state);
1099 xhci_warn(xhci,
"WARN Set TR Deq Ptr cmd failed because "
1100 "slot %u was not enabled.\n", slot_id);
1103 xhci_warn(xhci,
"WARN Set TR Deq Ptr cmd with unknown "
1104 "completion code of %u.\n",
1115 xhci_dbg(xhci,
"Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
1118 dev->
eps[ep_index].queued_deq_ptr) ==
1123 update_ring_for_set_deq_completion(xhci, dev,
1126 xhci_warn(xhci,
"Mismatch between completed Set TR Deq "
1127 "Ptr command & xHCI internal state.\n");
1128 xhci_warn(xhci,
"ep deq seg = %p, deq ptr = %p\n",
1129 dev->
eps[ep_index].queued_deq_seg,
1130 dev->
eps[ep_index].queued_deq_ptr);
1138 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1141 static void handle_reset_ep_completion(
struct xhci_hcd *xhci,
1153 xhci_dbg(xhci,
"Ignoring reset ep completion code of %u\n",
1161 xhci_dbg(xhci,
"Queueing configure endpoint command\n");
1163 xhci->
devs[slot_id]->in_ctx->dma, slot_id,
1169 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1175 static void xhci_complete_cmd_in_cmd_wait_list(
struct xhci_hcd *xhci,
1191 static int handle_cmd_in_cmd_wait_list(
struct xhci_hcd *xhci,
1197 if (list_empty(&virt_dev->
cmd_list))
1205 xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1218 static void xhci_cmd_to_noop(
struct xhci_hcd *xhci,
struct xhci_cd *cur_cd)
1228 cur_seg = find_trb_seg(xhci->
cmd_ring->first_seg,
1229 xhci->
cmd_ring->dequeue, &cycle_state);
1232 xhci_warn(xhci,
"Command ring mismatch, dequeue = %p %llx (dma)\n",
1234 (
unsigned long long)
1243 for (cmd_trb = xhci->
cmd_ring->dequeue;
1244 cmd_trb != xhci->
cmd_ring->enqueue;
1245 next_trb(xhci, xhci->
cmd_ring, &cur_seg, &cmd_trb)) {
1250 if (cur_cd->
cmd_trb == cmd_trb) {
1256 xhci_complete_cmd_in_cmd_wait_list(xhci,
1264 cmd_trb->
generic.field[0] = 0;
1265 cmd_trb->
generic.field[1] = 0;
1266 cmd_trb->
generic.field[2] = 0;
1274 static void xhci_cancel_cmd_in_cd_list(
struct xhci_hcd *xhci)
1276 struct xhci_cd *cur_cd, *next_cd;
1283 xhci_cmd_to_noop(xhci, cur_cd);
1294 static int xhci_search_cmd_trb_in_cd_list(
struct xhci_hcd *xhci,
1297 struct xhci_cd *cur_cd, *next_cd;
1304 if (cur_cd->
cmd_trb == cmd_trb) {
1306 xhci_complete_cmd_in_cmd_wait_list(xhci,
1324 static int handle_stopped_cmd_ring(
struct xhci_hcd *xhci,
1325 int cmd_trb_comp_code)
1327 int cur_trb_is_good = 0;
1332 cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1341 xhci_cancel_cmd_in_cd_list(xhci);
1351 return cur_trb_is_good;
1354 static void handle_cmd_completion(
struct xhci_hcd *xhci,
1370 if (cmd_dequeue_dma == 0) {
1375 if (cmd_dma != (
u64) cmd_dequeue_dma) {
1388 if (handle_stopped_cmd_ring(xhci,
1399 xhci->slot_id = slot_id;
1405 if (xhci->devs[slot_id]) {
1409 xhci->
devs[slot_id],
true);
1414 virt_dev = xhci->devs[slot_id];
1415 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1436 ep_index != (
unsigned int) -1 &&
1440 ep_state = xhci->
devs[slot_id]->eps[
ep_index].ep_state;
1442 goto bandwidth_change;
1443 xhci_dbg(xhci,
"Completed config ep cmd - "
1444 "last ep index = %d, state = %d\n",
1445 ep_index, ep_state);
1449 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1453 xhci_dbg(xhci,
"Completed config ep cmd\n");
1454 xhci->
devs[slot_id]->cmd_status =
1459 virt_dev = xhci->devs[slot_id];
1460 if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1470 handle_stopped_endpoint(xhci, xhci->cmd_ring->
dequeue, event);
1473 handle_set_deq_completion(xhci, event, xhci->cmd_ring->
dequeue);
1478 handle_reset_ep_completion(xhci, event, xhci->cmd_ring->
dequeue);
1481 xhci_dbg(xhci,
"Completed reset device command.\n");
1484 virt_dev = xhci->
devs[slot_id];
1486 handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1488 xhci_warn(xhci,
"Reset device command completion "
1489 "for disabled slot %u\n", slot_id);
1496 xhci_dbg(xhci,
"NEC firmware version %2x.%02x\n",
1508 static void handle_vendor_event(
struct xhci_hcd *xhci,
1514 xhci_dbg(xhci,
"Vendor specific event TRB type = %u\n", trb_type);
1516 handle_cmd_completion(xhci, &event->
event_cmd);
1526 static unsigned int find_faked_portnum_from_hw_portnum(
struct usb_hcd *hcd,
1530 unsigned int num_similar_speed_ports = 0;
1536 for (i = 0; i < (port_id - 1); i++) {
1551 if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1552 num_similar_speed_ports++;
1554 return num_similar_speed_ports;
1557 static void handle_device_notification(
struct xhci_hcd *xhci,
1561 struct usb_device *
udev;
1564 if (!xhci->
devs[slot_id]) {
1565 xhci_warn(xhci,
"Device Notification event for "
1566 "unused slot %u\n", slot_id);
1570 xhci_dbg(xhci,
"Device Wake Notification event for slot ID %u\n",
1572 udev = xhci->
devs[slot_id]->udev;
1573 if (udev && udev->parent)
1577 static void handle_port_status(
struct xhci_hcd *xhci,
1580 struct usb_hcd *hcd;
1585 unsigned int faked_port_index;
1589 bool bogus_port_status =
false;
1593 xhci_warn(xhci,
"WARN: xHC returned failed port status event\n");
1597 xhci_dbg(xhci,
"Port Status Change Event for port %d\n", port_id);
1600 if ((port_id <= 0) || (port_id > max_ports)) {
1601 xhci_warn(xhci,
"Invalid port id %d\n", port_id);
1602 bogus_port_status =
true;
1609 major_revision = xhci->
port_array[port_id - 1];
1610 if (major_revision == 0) {
1611 xhci_warn(xhci,
"Event for port %u not in "
1612 "Extended Capabilities, ignoring.\n",
1614 bogus_port_status =
true;
1618 xhci_warn(xhci,
"Event for port %u duplicated in"
1619 "Extended Capabilities, ignoring.\n",
1621 bogus_port_status =
true;
1633 hcd = xhci_to_hcd(xhci);
1634 if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1636 bus_state = &xhci->
bus_state[hcd_index(hcd)];
1637 if (hcd->speed == HCD_USB3)
1642 faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1645 temp = xhci_readl(xhci, port_array[faked_port_index]);
1646 if (hcd->state == HC_STATE_SUSPENDED) {
1647 xhci_dbg(xhci,
"resume root hub\n");
1648 usb_hcd_resume_root_hub(hcd);
1652 xhci_dbg(xhci,
"port resume event for port %d\n", port_id);
1654 temp1 = xhci_readl(xhci, &xhci->
op_regs->command);
1656 xhci_warn(xhci,
"xHC is not running.\n");
1661 xhci_dbg(xhci,
"remote wake SS port %d\n", port_id);
1668 faked_port_index, PORT_PLC);
1674 bogus_port_status =
true;
1677 xhci_dbg(xhci,
"resume HS port %d\n", port_id);
1687 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) ==
XDEV_U0 &&
1689 xhci_dbg(xhci,
"resume SS port %d finished\n", port_id);
1698 faked_port_index + 1);
1699 if (slot_id && xhci->
devs[slot_id])
1703 ~(1 << faked_port_index);
1705 faked_port_index, PORT_PLC);
1707 faked_port_index + 1);
1708 bogus_port_status =
true;
1713 if (hcd->speed != HCD_USB3)
1725 if (bogus_port_status)
1728 spin_unlock(&xhci->
lock);
1731 spin_lock(&xhci->
lock);
1751 cur_seg = start_seg;
1762 if (end_trb_dma > 0) {
1764 if (start_dma <= end_trb_dma) {
1765 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1771 if ((suspect_dma >= start_dma &&
1772 suspect_dma <= end_seg_dma) ||
1773 (suspect_dma >= cur_seg->
dma &&
1774 suspect_dma <= end_trb_dma))
1780 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1783 cur_seg = cur_seg->
next;
1785 }
while (cur_seg != start_seg);
1790 static void xhci_cleanup_halted_endpoint(
struct xhci_hcd *xhci,
1791 unsigned int slot_id,
unsigned int ep_index,
1792 unsigned int stream_id,
1817 static int xhci_requires_manual_halt_cleanup(
struct xhci_hcd *xhci,
1819 unsigned int trb_comp_code)
1840 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1844 xhci_dbg(xhci,
"Vendor defined info completion code %u\n",
1846 xhci_dbg(xhci,
"Treating code as success.\n");
1862 unsigned int slot_id;
1864 struct urb *urb =
NULL;
1867 struct urb_priv *urb_priv;
1871 xdev = xhci->
devs[slot_id];
1901 }
else if (xhci_requires_manual_halt_cleanup(xhci,
1902 ep_ctx, trb_comp_code)) {
1908 xhci_cleanup_halted_endpoint(xhci,
1914 inc_deq(xhci, ep_ring);
1915 inc_deq(xhci, ep_ring);
1921 urb_priv = urb->hcpriv;
1929 if (urb->actual_length > urb->transfer_buffer_length) {
1930 xhci_warn(xhci,
"URB transfer length is wrong, "
1931 "xHC issue? req. len = %u, "
1933 urb->transfer_buffer_length,
1934 urb->actual_length);
1935 urb->actual_length = 0;
1936 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
1950 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1951 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1952 if (xhci_to_hcd(xhci)->
self.bandwidth_isoc_reqs
1973 unsigned int slot_id;
1979 xdev = xhci->
devs[slot_id];
1985 switch (trb_comp_code) {
1987 if (event_trb == ep_ring->
dequeue) {
1988 xhci_warn(xhci,
"WARN: Success on ctrl setup TRB "
1989 "without IOC set??\n");
1991 }
else if (event_trb != td->
last_trb) {
1992 xhci_warn(xhci,
"WARN: Success on ctrl data TRB "
1993 "without IOC set??\n");
2000 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
2007 return finish_td(xhci, td, event_trb, event, ep, status,
false);
2009 if (!xhci_requires_manual_halt_cleanup(xhci,
2010 ep_ctx, trb_comp_code))
2012 xhci_dbg(xhci,
"TRB error code %u, "
2013 "halted endpoint index = %u\n",
2014 trb_comp_code, ep_index);
2018 if (event_trb != ep_ring->
dequeue &&
2020 td->
urb->actual_length =
2021 td->
urb->transfer_buffer_length
2024 td->
urb->actual_length = 0;
2026 xhci_cleanup_halted_endpoint(xhci,
2027 slot_id, ep_index, 0, td, event_trb);
2028 return finish_td(xhci, td, event_trb, event, ep, status,
true);
2034 if (event_trb != ep_ring->
dequeue) {
2037 if (td->
urb->actual_length != 0) {
2041 (td->
urb->transfer_flags
2042 & URB_SHORT_NOT_OK))
2047 td->
urb->actual_length =
2048 td->
urb->transfer_buffer_length;
2052 td->
urb->actual_length =
2053 td->
urb->transfer_buffer_length -
2055 xhci_dbg(xhci,
"Waiting for status "
2061 return finish_td(xhci, td, event_trb, event, ep, status,
false);
2072 struct urb_priv *urb_priv;
2077 struct usb_iso_packet_descriptor *
frame;
2079 bool skip_td =
false;
2083 urb_priv = td->
urb->hcpriv;
2085 frame = &td->
urb->iso_frame_desc[
idx];
2088 switch (trb_comp_code) {
2097 frame->status = td->
urb->transfer_flags & URB_SHORT_NOT_OK ?
2101 frame->status = -
ECOMM;
2124 frame->actual_length = frame->length;
2125 td->
urb->actual_length += frame->length;
2127 for (cur_trb = ep_ring->
dequeue,
2128 cur_seg = ep_ring->
deq_seg; cur_trb != event_trb;
2129 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2138 frame->actual_length = len;
2139 td->
urb->actual_length += len;
2143 return finish_td(xhci, td, event_trb, event, ep, status,
false);
2151 struct urb_priv *urb_priv;
2152 struct usb_iso_packet_descriptor *
frame;
2156 urb_priv = td->
urb->hcpriv;
2158 frame = &td->
urb->iso_frame_desc[
idx];
2161 frame->status = -
EXDEV;
2164 frame->actual_length = 0;
2168 inc_deq(xhci, ep_ring);
2169 inc_deq(xhci, ep_ring);
2171 return finish_td(xhci, td,
NULL, event, ep, status,
true);
2177 static int process_bulk_intr_td(
struct xhci_hcd *xhci,
struct xhci_td *td,
2189 switch (trb_comp_code) {
2194 xhci_warn(xhci,
"WARN Successful completion "
2196 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
2207 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
2217 xhci_dbg(xhci,
"ep %#x - asked for %d bytes, "
2218 "%d bytes untransferred\n",
2219 td->
urb->ep->desc.bEndpointAddress,
2220 td->
urb->transfer_buffer_length,
2225 td->
urb->actual_length =
2226 td->
urb->transfer_buffer_length -
2228 if (td->
urb->transfer_buffer_length <
2229 td->
urb->actual_length) {
2231 "of %d bytes left\n",
2233 td->
urb->actual_length = 0;
2234 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
2241 if (td->
urb->transfer_flags & URB_SHORT_NOT_OK)
2247 td->
urb->actual_length =
2248 td->
urb->transfer_buffer_length;
2259 td->
urb->actual_length = 0;
2261 cur_trb != event_trb;
2262 next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2265 td->
urb->actual_length +=
2272 td->
urb->actual_length +=
2277 return finish_td(xhci, td, event_trb, event, ep, status,
false);
2285 static int handle_tx_event(
struct xhci_hcd *xhci,
2293 unsigned int slot_id;
2299 struct urb *urb =
NULL;
2301 struct urb_priv *urb_priv;
2309 xdev = xhci->
devs[slot_id];
2311 xhci_err(xhci,
"ERROR Transfer event pointed to bad slot\n");
2312 xhci_err(xhci,
"@%016llx %08x %08x %08x %08x\n",
2333 xhci_err(xhci,
"ERROR Transfer event for disabled endpoint "
2334 "or incorrect stream ring\n");
2335 xhci_err(xhci,
"@%016llx %08x %08x %08x %08x\n",
2357 switch (trb_comp_code) {
2368 "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2372 xhci_dbg(xhci,
"Stopped on Transfer TRB\n");
2375 xhci_dbg(xhci,
"Stopped on No-op or Link TRB\n");
2378 xhci_dbg(xhci,
"Stalled endpoint\n");
2383 xhci_warn(xhci,
"WARN: TRB error on endpoint\n");
2388 xhci_dbg(xhci,
"Transfer error on endpoint\n");
2392 xhci_dbg(xhci,
"Babble error on endpoint\n");
2396 xhci_warn(xhci,
"WARN: HC couldn't access mem fast enough\n");
2400 xhci_warn(xhci,
"WARN: bandwidth overrun event on endpoint\n");
2403 xhci_warn(xhci,
"WARN: buffer overrun event on endpoint\n");
2411 xhci_dbg(xhci,
"underrun event on endpoint\n");
2412 if (!list_empty(&ep_ring->
td_list))
2413 xhci_dbg(xhci,
"Underrun Event for slot %d ep %d "
2414 "still with TDs queued?\n",
2419 xhci_dbg(xhci,
"overrun event on endpoint\n");
2420 if (!list_empty(&ep_ring->
td_list))
2421 xhci_dbg(xhci,
"Overrun Event for slot %d ep %d "
2422 "still with TDs queued?\n",
2427 xhci_warn(xhci,
"WARN: detect an incompatible device");
2438 xhci_dbg(xhci,
"Miss service interval error, set skip flag\n");
2445 xhci_warn(xhci,
"ERROR Unknown event condition, HC probably "
2454 if (list_empty(&ep_ring->
td_list)) {
2455 xhci_warn(xhci,
"WARN Event TRB for slot %d ep %d "
2456 "with no TDs queued?\n",
2459 xhci_dbg(xhci,
"Event TRB with TRB type ID %u\n",
2465 xhci_dbg(xhci,
"td_list is empty while skip "
2466 "flag set. Clear skip flag.\n");
2473 if (ep->
skip && td_num == 0) {
2475 xhci_dbg(xhci,
"All tds on the ep_ring skipped. "
2476 "Clear skip flag.\n");
2504 !usb_endpoint_xfer_isoc(&td->
urb->ep->desc)) {
2517 "ERROR Transfer event TRB DMA ptr not "
2518 "part of current TD\n");
2522 ret = skip_isoc_td(xhci, td, event, ep, &status);
2531 xhci_dbg(xhci,
"Found td. Clear skip flag.\n");
2535 event_trb = &event_seg->
trbs[(event_dma - event_seg->
dma) /
2536 sizeof(*event_trb)];
2545 "event_trb is a no-op TRB. Skip it\n");
2552 if (usb_endpoint_xfer_control(&td->
urb->ep->desc))
2553 ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2555 else if (usb_endpoint_xfer_isoc(&td->
urb->ep->desc))
2556 ret = process_isoc_td(xhci, td, event_trb, event, ep,
2559 ret = process_bulk_intr_td(xhci, td, event_trb, event,
2573 urb_priv = urb->hcpriv;
2579 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2585 if ((urb->actual_length != urb->transfer_buffer_length &&
2586 (urb->transfer_flags &
2587 URB_SHORT_NOT_OK)) ||
2589 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2590 xhci_dbg(xhci,
"Giveback URB %p, len = %d, "
2591 "expected = %d, status = %d\n",
2592 urb, urb->actual_length,
2593 urb->transfer_buffer_length,
2595 spin_unlock(&xhci->
lock);
2599 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2602 spin_lock(&xhci->
lock);
2622 static int xhci_handle_event(
struct xhci_hcd *xhci)
2625 int update_ptrs = 1;
2649 handle_cmd_completion(xhci, &event->
event_cmd);
2652 handle_port_status(xhci, event);
2663 handle_device_notification(xhci, event);
2668 handle_vendor_event(xhci, event);
2676 xhci_dbg(xhci,
"xHCI host dying, returning from "
2677 "event handler.\n");
2698 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2705 spin_lock(&xhci->
lock);
2708 status = xhci_readl(xhci, &xhci->
op_regs->status);
2709 if (status == 0xffffffff)
2713 spin_unlock(&xhci->
lock);
2717 xhci_warn(xhci,
"WARNING: Host System Error\n");
2720 spin_unlock(&xhci->
lock);
2730 xhci_writel(xhci, status, &xhci->
op_regs->status);
2737 irq_pending = xhci_readl(xhci, &xhci->
ir_set->irq_pending);
2739 xhci_writel(xhci, irq_pending, &xhci->
ir_set->irq_pending);
2743 xhci_dbg(xhci,
"xHCI dying, ignoring interrupt. "
2744 "Shouldn't IRQs be disabled?\n");
2748 temp_64 = xhci_read_64(xhci, &xhci->
ir_set->erst_dequeue);
2749 xhci_write_64(xhci, temp_64 |
ERST_EHB,
2750 &xhci->
ir_set->erst_dequeue);
2751 spin_unlock(&xhci->
lock);
2760 while (xhci_handle_event(xhci) > 0) {}
2762 temp_64 = xhci_read_64(xhci, &xhci->
ir_set->erst_dequeue);
2764 if (event_ring_deq != xhci->
event_ring->dequeue) {
2768 xhci_warn(xhci,
"WARN something wrong with SW event "
2769 "ring dequeue ptr.\n");
2777 xhci_write_64(xhci, temp_64, &xhci->
ir_set->erst_dequeue);
2779 spin_unlock(&xhci->
lock);
2799 bool more_trbs_coming,
2809 inc_enq(xhci, ring, more_trbs_coming);
2817 u32 ep_state,
unsigned int num_trbs,
gfp_t mem_flags)
2819 unsigned int num_trbs_needed;
2828 xhci_warn(xhci,
"WARN urb submitted to disabled ep\n");
2831 xhci_warn(xhci,
"WARN waiting for error on ep to be cleared\n");
2836 xhci_dbg(xhci,
"WARN halted endpoint, queueing URB anyway.\n");
2841 xhci_err(xhci,
"ERROR unknown endpoint state for ep\n");
2850 if (room_on_ring(xhci, ep_ring, num_trbs))
2854 xhci_err(xhci,
"Do not support expand command ring\n");
2858 xhci_dbg(xhci,
"ERROR no room on ep ring, "
2859 "try ring expansion\n");
2863 xhci_err(xhci,
"Ring expansion failed\n");
2868 if (enqueue_is_link_trb(ep_ring)) {
2874 while (last_trb(xhci, ring, ring->
enq_seg, next)) {
2878 if (!xhci_link_trb_quirk(xhci) &&
2889 if (last_trb_on_last_seg(xhci, ring, ring->
enq_seg, next)) {
2901 static int prepare_transfer(
struct xhci_hcd *xhci,
2903 unsigned int ep_index,
2904 unsigned int stream_id,
2905 unsigned int num_trbs,
2907 unsigned int td_index,
2911 struct urb_priv *urb_priv;
2918 xhci_dbg(xhci,
"Can't prepare ring for bad stream ID %u\n",
2923 ret = prepare_ring(xhci, ep_ring,
2925 num_trbs, mem_flags);
2929 urb_priv = urb->hcpriv;
2930 td = urb_priv->
td[td_index];
2935 if (td_index == 0) {
2947 urb_priv->
td[td_index] =
td;
2952 static unsigned int count_sg_trbs_needed(
struct xhci_hcd *xhci,
struct urb *urb)
2954 int num_sgs, num_trbs, running_total,
temp,
i;
2958 num_sgs = urb->num_mapped_sgs;
2959 temp = urb->transfer_buffer_length;
2969 if (running_total != 0)
2973 while (running_total <
sg_dma_len(sg) && running_total < temp) {
2977 len =
min_t(
int, len, temp);
2985 static void check_trb_math(
struct urb *urb,
int num_trbs,
int running_total)
2988 dev_err(&urb->dev->dev,
"%s - ep %#x - Miscalculated number of "
2989 "TRBs, %d left\n", __func__,
2990 urb->ep->desc.bEndpointAddress, num_trbs);
2991 if (running_total != urb->transfer_buffer_length)
2992 dev_err(&urb->dev->dev,
"%s - ep %#x - Miscalculated tx length, "
2993 "queued %#x (%d), asked for %#x (%d)\n",
2995 urb->ep->desc.bEndpointAddress,
2996 running_total, running_total,
2997 urb->transfer_buffer_length,
2998 urb->transfer_buffer_length);
3001 static void giveback_first_trb(
struct xhci_hcd *xhci,
int slot_id,
3002 unsigned int ep_index,
unsigned int stream_id,
int start_cycle,
3024 struct urb *urb,
int slot_id,
unsigned int ep_index)
3027 xhci->
devs[slot_id]->out_ctx, ep_index);
3032 ep_interval = urb->interval;
3040 if (xhci_interval != ep_interval) {
3041 if (printk_ratelimit())
3042 dev_dbg(&urb->dev->dev,
"Driver uses different interval"
3043 " (%d microframe%s) than xHCI "
3044 "(%d microframe%s)\n",
3046 ep_interval == 1 ?
"" :
"s",
3048 xhci_interval == 1 ?
"" :
"s");
3049 urb->interval = xhci_interval;
3063 static u32 xhci_td_remainder(
unsigned int remainder)
3065 u32 max = (1 << (21 - 17 + 1)) - 1;
3067 if ((remainder >> 10) >=
max)
3070 return (remainder >> 10) << 17;
3088 static u32 xhci_v1_0_td_remainder(
int running_total,
int trb_buff_len,
3089 unsigned int total_packet_count,
struct urb *urb)
3091 int packets_transferred;
3094 if (running_total == 0 && trb_buff_len == 0)
3100 packets_transferred = (running_total + trb_buff_len) /
3101 usb_endpoint_maxp(&urb->ep->desc);
3103 return xhci_td_remainder(total_packet_count - packets_transferred);
3106 static int queue_bulk_sg_tx(
struct xhci_hcd *xhci,
gfp_t mem_flags,
3107 struct urb *urb,
int slot_id,
unsigned int ep_index)
3110 unsigned int num_trbs;
3111 struct urb_priv *urb_priv;
3115 int trb_buff_len, this_sg_len, running_total;
3116 unsigned int total_packet_count;
3119 bool more_trbs_coming;
3124 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3128 num_trbs = count_sg_trbs_needed(xhci, urb);
3129 num_sgs = urb->num_mapped_sgs;
3130 total_packet_count =
roundup(urb->transfer_buffer_length,
3131 usb_endpoint_maxp(&urb->ep->desc));
3133 trb_buff_len = prepare_transfer(xhci, xhci->
devs[slot_id],
3134 ep_index, urb->stream_id,
3135 num_trbs, urb, 0, mem_flags);
3136 if (trb_buff_len < 0)
3137 return trb_buff_len;
3139 urb_priv = urb->hcpriv;
3140 td = urb_priv->
td[0];
3164 trb_buff_len =
min_t(
int, trb_buff_len, this_sg_len);
3165 if (trb_buff_len > urb->transfer_buffer_length)
3166 trb_buff_len = urb->transfer_buffer_length;
3172 u32 length_field = 0;
3178 if (start_cycle == 0)
3195 if (usb_urb_dir_in(urb))
3200 xhci_warn(xhci,
"WARN: sg dma xfer crosses 64KB boundaries!\n");
3201 xhci_dbg(xhci,
"Next boundary at %#x, end dma = %#x\n",
3203 (
unsigned int) addr + trb_buff_len);
3208 remainder = xhci_td_remainder(
3209 urb->transfer_buffer_length -
3212 remainder = xhci_v1_0_td_remainder(running_total,
3213 trb_buff_len, total_packet_count, urb);
3215 length_field =
TRB_LEN(trb_buff_len) |
3220 more_trbs_coming =
true;
3222 more_trbs_coming =
false;
3223 queue_trb(xhci, ep_ring, more_trbs_coming,
3229 running_total += trb_buff_len;
3234 this_sg_len -= trb_buff_len;
3235 if (this_sg_len == 0) {
3243 addr += trb_buff_len;
3248 trb_buff_len =
min_t(
int, trb_buff_len, this_sg_len);
3249 if (running_total + trb_buff_len > urb->transfer_buffer_length)
3251 urb->transfer_buffer_length - running_total;
3252 }
while (running_total < urb->transfer_buffer_length);
3254 check_trb_math(urb, num_trbs, running_total);
3255 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3256 start_cycle, start_trb);
3262 struct urb *urb,
int slot_id,
unsigned int ep_index)
3265 struct urb_priv *urb_priv;
3270 bool more_trbs_coming;
3274 int running_total, trb_buff_len,
ret;
3275 unsigned int total_packet_count;
3279 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3281 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3294 if (running_total != 0 || urb->transfer_buffer_length == 0)
3297 while (running_total < urb->transfer_buffer_length) {
3303 ret = prepare_transfer(xhci, xhci->
devs[slot_id],
3304 ep_index, urb->stream_id,
3305 num_trbs, urb, 0, mem_flags);
3309 urb_priv = urb->hcpriv;
3310 td = urb_priv->
td[0];
3321 total_packet_count =
roundup(urb->transfer_buffer_length,
3322 usb_endpoint_maxp(&urb->ep->desc));
3324 addr = (
u64) urb->transfer_dma;
3327 if (trb_buff_len > urb->transfer_buffer_length)
3328 trb_buff_len = urb->transfer_buffer_length;
3340 if (start_cycle == 0)
3357 if (usb_urb_dir_in(urb))
3362 remainder = xhci_td_remainder(
3363 urb->transfer_buffer_length -
3366 remainder = xhci_v1_0_td_remainder(running_total,
3367 trb_buff_len, total_packet_count, urb);
3369 length_field =
TRB_LEN(trb_buff_len) |
3374 more_trbs_coming =
true;
3376 more_trbs_coming =
false;
3377 queue_trb(xhci, ep_ring, more_trbs_coming,
3383 running_total += trb_buff_len;
3386 addr += trb_buff_len;
3387 trb_buff_len = urb->transfer_buffer_length - running_total;
3390 }
while (running_total < urb->transfer_buffer_length);
3392 check_trb_math(urb, num_trbs, running_total);
3393 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3394 start_cycle, start_trb);
3400 struct urb *urb,
int slot_id,
unsigned int ep_index)
3409 struct urb_priv *urb_priv;
3412 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3420 if (!urb->setup_packet)
3430 if (urb->transfer_buffer_length > 0)
3432 ret = prepare_transfer(xhci, xhci->
devs[slot_id],
3433 ep_index, urb->stream_id,
3434 num_trbs, urb, 0, mem_flags);
3438 urb_priv = urb->hcpriv;
3439 td = urb_priv->
td[0];
3454 if (start_cycle == 0)
3459 if (urb->transfer_buffer_length > 0) {
3467 queue_trb(xhci, ep_ring,
true,
3476 if (usb_urb_dir_in(urb))
3481 length_field =
TRB_LEN(urb->transfer_buffer_length) |
3482 xhci_td_remainder(urb->transfer_buffer_length) |
3484 if (urb->transfer_buffer_length > 0) {
3487 queue_trb(xhci, ep_ring,
true,
3503 queue_trb(xhci, ep_ring,
false,
3510 giveback_first_trb(xhci, slot_id, ep_index, 0,
3511 start_cycle, start_trb);
3515 static int count_isoc_trbs_needed(
struct xhci_hcd *xhci,
3516 struct urb *urb,
int i)
3521 addr = (
u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3522 td_len = urb->iso_frame_desc[
i].length;
3540 static unsigned int xhci_get_burst_count(
struct xhci_hcd *xhci,
3541 struct usb_device *udev,
3542 struct urb *urb,
unsigned int total_packet_count)
3544 unsigned int max_burst;
3549 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3550 return roundup(total_packet_count, max_burst + 1) - 1;
3561 static unsigned int xhci_get_last_burst_packet_count(
struct xhci_hcd *xhci,
3562 struct usb_device *udev,
3563 struct urb *urb,
unsigned int total_packet_count)
3565 unsigned int max_burst;
3566 unsigned int residue;
3571 switch (udev->speed) {
3574 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3575 residue = total_packet_count % (max_burst + 1);
3583 if (total_packet_count == 0)
3585 return total_packet_count - 1;
3590 static int xhci_queue_isoc_tx(
struct xhci_hcd *xhci,
gfp_t mem_flags,
3591 struct urb *urb,
int slot_id,
unsigned int ep_index)
3594 struct urb_priv *urb_priv;
3596 int num_tds, trbs_per_td;
3601 int running_total, trb_buff_len, td_len, td_remain_len,
ret;
3604 bool more_trbs_coming;
3608 num_tds = urb->number_of_packets;
3610 xhci_dbg(xhci,
"Isoc URB with zero packets?\n");
3614 start_addr = (
u64) urb->transfer_dma;
3618 urb_priv = urb->hcpriv;
3620 for (i = 0; i < num_tds; i++) {
3621 unsigned int total_packet_count;
3622 unsigned int burst_count;
3623 unsigned int residue;
3627 addr = start_addr + urb->iso_frame_desc[
i].offset;
3628 td_len = urb->iso_frame_desc[
i].length;
3629 td_remain_len = td_len;
3630 total_packet_count =
roundup(td_len,
3631 usb_endpoint_maxp(&urb->ep->desc));
3633 if (total_packet_count == 0)
3634 total_packet_count++;
3635 burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3636 total_packet_count);
3637 residue = xhci_get_last_burst_packet_count(xhci,
3638 urb->dev, urb, total_packet_count);
3640 trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3642 ret = prepare_transfer(xhci, xhci->
devs[slot_id], ep_index,
3643 urb->stream_id, trbs_per_td, urb, i, mem_flags);
3650 td = urb_priv->
td[
i];
3651 for (j = 0; j < trbs_per_td; j++) {
3661 if (start_cycle == 0)
3673 if (usb_urb_dir_in(urb))
3680 if (j < trbs_per_td - 1) {
3682 more_trbs_coming =
true;
3690 if (i < num_tds - 1)
3693 more_trbs_coming =
false;
3699 if (trb_buff_len > td_remain_len)
3700 trb_buff_len = td_remain_len;
3704 remainder = xhci_td_remainder(
3705 td_len - running_total);
3707 remainder = xhci_v1_0_td_remainder(
3708 running_total, trb_buff_len,
3709 total_packet_count, urb);
3711 length_field =
TRB_LEN(trb_buff_len) |
3715 queue_trb(xhci, ep_ring, more_trbs_coming,
3720 running_total += trb_buff_len;
3722 addr += trb_buff_len;
3723 td_remain_len -= trb_buff_len;
3727 if (running_total != td_len) {
3728 xhci_err(xhci,
"ISOC TD length unmatch\n");
3734 if (xhci_to_hcd(xhci)->
self.bandwidth_isoc_reqs == 0) {
3738 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3740 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3741 start_cycle, start_trb);
3746 for (i--; i >= 0; i--)
3747 list_del_init(&urb_priv->
td[i]->td_list);
3754 urb_priv->
td[0]->last_trb = ep_ring->
enqueue;
3756 td_to_noop(xhci, ep_ring, urb_priv->
td[0],
true);
3759 ep_ring->
enqueue = urb_priv->
td[0]->first_trb;
3760 ep_ring->
enq_seg = urb_priv->
td[0]->start_seg;
3775 struct urb *urb,
int slot_id,
unsigned int ep_index)
3783 int num_tds, num_trbs,
i;
3786 xdev = xhci->
devs[slot_id];
3791 num_tds = urb->number_of_packets;
3792 for (i = 0; i < num_tds; i++)
3793 num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3799 num_trbs, mem_flags);
3803 start_frame = xhci_readl(xhci, &xhci->
run_regs->microframe_index);
3804 start_frame &= 0x3fff;
3806 urb->start_frame = start_frame;
3809 urb->start_frame >>= 3;
3812 ep_interval = urb->interval;
3820 if (xhci_interval != ep_interval) {
3821 if (printk_ratelimit())
3822 dev_dbg(&urb->dev->dev,
"Driver uses different interval"
3823 " (%d microframe%s) than xHCI "
3824 "(%d microframe%s)\n",
3826 ep_interval == 1 ?
"" :
"s",
3828 xhci_interval == 1 ?
"" :
"s");
3829 urb->interval = xhci_interval;
3837 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3850 static int queue_command(
struct xhci_hcd *xhci,
u32 field1,
u32 field2,
3851 u32 field3,
u32 field4,
bool command_must_succeed)
3856 if (!command_must_succeed)
3862 xhci_err(xhci,
"ERR: No room for command on command ring\n");
3863 if (command_must_succeed)
3864 xhci_err(xhci,
"ERR: Reserved TRB counting for "
3865 "unfailable commands failed.\n");
3868 queue_trb(xhci, xhci->
cmd_ring,
false, field1, field2, field3,
3869 field4 | xhci->
cmd_ring->cycle_state);
3876 return queue_command(xhci, 0, 0, 0,
3893 return queue_command(xhci, field1, field2, field3, field4,
false);
3899 return queue_command(xhci, 0, 0, 0,
3906 u32 slot_id,
bool command_must_succeed)
3911 command_must_succeed);
3916 u32 slot_id,
bool command_must_succeed)
3921 command_must_succeed);
3929 unsigned int ep_index,
int suspend)
3936 return queue_command(xhci, 0, 0, 0,
3937 trb_slot_id | trb_ep_index | type | trb_suspend,
false);
3943 static int queue_set_tr_deq(
struct xhci_hcd *xhci,
int slot_id,
3944 unsigned int ep_index,
unsigned int stream_id,
3957 xhci_warn(xhci,
"WARN Cannot submit Set TR Deq Ptr\n");
3958 xhci_warn(xhci,
"WARN deq seg = %p, deq pt = %p\n",
3964 xhci_warn(xhci,
"WARN Cannot submit Set TR Deq Ptr\n");
3965 xhci_warn(xhci,
"A Set TR Deq Ptr command is pending.\n");
3970 return queue_command(xhci,
lower_32_bits(addr) | cycle_state,
3972 trb_slot_id | trb_ep_index | type,
false);
3976 unsigned int ep_index)
3982 return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,