Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xhci-ring.c
Go to the documentation of this file.
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 /*
24  * Ring initialization rules:
25  * 1. Each segment is initialized to zero, except for link TRBs.
26  * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27  * Consumer Cycle State (CCS), depending on ring function.
28  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29  *
30  * Ring behavior rules:
31  * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32  * least one free TRB in the ring. This is useful if you want to turn that
33  * into a link TRB and expand the ring.
34  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35  * link TRB, then load the pointer with the address in the link TRB. If the
36  * link TRB had its toggle bit set, you may need to update the ring cycle
37  * state (see cycle bit rules). You may have to do this multiple times
38  * until you reach a non-link TRB.
39  * 3. A ring is full if enqueue++ (for the definition of increment above)
40  * equals the dequeue pointer.
41  *
42  * Cycle bit rules:
43  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44  * in a link TRB, it must toggle the ring cycle state.
45  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46  * in a link TRB, it must toggle the ring cycle state.
47  *
48  * Producer rules:
49  * 1. Check if ring is full before you enqueue.
50  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51  * Update enqueue pointer between each write (which may update the ring
52  * cycle state).
53  * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54  * and endpoint rings. If HC is the producer for the event ring,
55  * and it generates an interrupt according to interrupt modulation rules.
56  *
57  * Consumer rules:
58  * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59  * the TRB is owned by the consumer.
60  * 2. Update dequeue pointer (which may update the ring cycle state) and
61  * continue processing TRBs until you reach a TRB which is not owned by you.
62  * 3. Notify the producer. SW is the consumer for the event ring, and it
63  * updates event ring dequeue pointer. HC is the consumer for the command and
64  * endpoint rings; it generates events on the event ring for these.
65  */
66 
67 #include <linux/scatterlist.h>
68 #include <linux/slab.h>
69 #include "xhci.h"
70 
71 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
72  struct xhci_virt_device *virt_dev,
73  struct xhci_event_cmd *event);
74 
75 /*
76  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
77  * address of the TRB.
78  */
80  union xhci_trb *trb)
81 {
82  unsigned long segment_offset;
83 
84  if (!seg || !trb || trb < seg->trbs)
85  return 0;
86  /* offset in TRBs */
87  segment_offset = trb - seg->trbs;
88  if (segment_offset > TRBS_PER_SEGMENT)
89  return 0;
90  return seg->dma + (segment_offset * sizeof(*trb));
91 }
92 
93 /* Does this link TRB point to the first segment in a ring,
94  * or was the previous TRB the last TRB on the last segment in the ERST?
95  */
96 static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97  struct xhci_segment *seg, union xhci_trb *trb)
98 {
99  if (ring == xhci->event_ring)
100  return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
101  (seg->next == xhci->event_ring->first_seg);
102  else
103  return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
104 }
105 
106 /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
107  * segment? I.e. would the updated event TRB pointer step off the end of the
108  * event seg?
109  */
110 static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111  struct xhci_segment *seg, union xhci_trb *trb)
112 {
113  if (ring == xhci->event_ring)
114  return trb == &seg->trbs[TRBS_PER_SEGMENT];
115  else
116  return TRB_TYPE_LINK_LE32(trb->link.control);
117 }
118 
119 static int enqueue_is_link_trb(struct xhci_ring *ring)
120 {
121  struct xhci_link_trb *link = &ring->enqueue->link;
122  return TRB_TYPE_LINK_LE32(link->control);
123 }
124 
125 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment. This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void next_trb(struct xhci_hcd *xhci,
130  struct xhci_ring *ring,
131  struct xhci_segment **seg,
132  union xhci_trb **trb)
133 {
134  if (last_trb(xhci, ring, *seg, *trb)) {
135  *seg = (*seg)->next;
136  *trb = ((*seg)->trbs);
137  } else {
138  (*trb)++;
139  }
140 }
141 
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
145  */
146 static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
147 {
148  unsigned long long addr;
149 
150  ring->deq_updates++;
151 
152  /*
153  * If this is not event ring, and the dequeue pointer
154  * is not on a link TRB, there is one more usable TRB
155  */
156  if (ring->type != TYPE_EVENT &&
157  !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
158  ring->num_trbs_free++;
159 
160  do {
161  /*
162  * Update the dequeue pointer further if that was a link TRB or
163  * we're at the end of an event ring segment (which doesn't have
164  * link TRBS)
165  */
166  if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
167  if (ring->type == TYPE_EVENT &&
168  last_trb_on_last_seg(xhci, ring,
169  ring->deq_seg, ring->dequeue)) {
170  ring->cycle_state = (ring->cycle_state ? 0 : 1);
171  }
172  ring->deq_seg = ring->deq_seg->next;
173  ring->dequeue = ring->deq_seg->trbs;
174  } else {
175  ring->dequeue++;
176  }
177  } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
178 
179  addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
180 }
181 
182 /*
183  * See Cycle bit rules. SW is the consumer for the event ring only.
184  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
185  *
186  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
187  * chain bit is set), then set the chain bit in all the following link TRBs.
188  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
189  * have their chain bit cleared (so that each Link TRB is a separate TD).
190  *
191  * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
192  * set, but other sections talk about dealing with the chain bit set. This was
193  * fixed in the 0.96 specification errata, but we have to assume that all 0.95
194  * xHCI hardware can't handle the chain bit being cleared on a link TRB.
195  *
196  * @more_trbs_coming: Will you enqueue more TRBs before calling
197  * prepare_transfer()?
198  */
199 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
200  bool more_trbs_coming)
201 {
202  u32 chain;
203  union xhci_trb *next;
204  unsigned long long addr;
205 
206  chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
207  /* If this is not event ring, there is one less usable TRB */
208  if (ring->type != TYPE_EVENT &&
209  !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
210  ring->num_trbs_free--;
211  next = ++(ring->enqueue);
212 
213  ring->enq_updates++;
214  /* Update the dequeue pointer further if that was a link TRB or we're at
215  * the end of an event ring segment (which doesn't have link TRBS)
216  */
217  while (last_trb(xhci, ring, ring->enq_seg, next)) {
218  if (ring->type != TYPE_EVENT) {
219  /*
220  * If the caller doesn't plan on enqueueing more
221  * TDs before ringing the doorbell, then we
222  * don't want to give the link TRB to the
223  * hardware just yet. We'll give the link TRB
224  * back in prepare_ring() just before we enqueue
225  * the TD at the top of the ring.
226  */
227  if (!chain && !more_trbs_coming)
228  break;
229 
230  /* If we're not dealing with 0.95 hardware or
231  * isoc rings on AMD 0.96 host,
232  * carry over the chain bit of the previous TRB
233  * (which may mean the chain bit is cleared).
234  */
235  if (!(ring->type == TYPE_ISOC &&
236  (xhci->quirks & XHCI_AMD_0x96_HOST))
237  && !xhci_link_trb_quirk(xhci)) {
238  next->link.control &=
240  next->link.control |=
241  cpu_to_le32(chain);
242  }
243  /* Give this link TRB to the hardware */
244  wmb();
245  next->link.control ^= cpu_to_le32(TRB_CYCLE);
246 
247  /* Toggle the cycle bit after the last ring segment. */
248  if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
249  ring->cycle_state = (ring->cycle_state ? 0 : 1);
250  }
251  }
252  ring->enq_seg = ring->enq_seg->next;
253  ring->enqueue = ring->enq_seg->trbs;
254  next = ring->enqueue;
255  }
256  addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
257 }
258 
259 /*
260  * Check to see if there's room to enqueue num_trbs on the ring and make sure
261  * enqueue pointer will not advance into dequeue segment. See rules above.
262  */
263 static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
264  unsigned int num_trbs)
265 {
266  int num_trbs_in_deq_seg;
267 
268  if (ring->num_trbs_free < num_trbs)
269  return 0;
270 
271  if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
272  num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
273  if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
274  return 0;
275  }
276 
277  return 1;
278 }
279 
280 /* Ring the host controller doorbell after placing a command on the ring */
281 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
282 {
283  if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
284  return;
285 
286  xhci_dbg(xhci, "// Ding dong!\n");
287  xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
288  /* Flush PCI posted writes */
289  xhci_readl(xhci, &xhci->dba->doorbell[0]);
290 }
291 
292 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
293 {
294  u64 temp_64;
295  int ret;
296 
297  xhci_dbg(xhci, "Abort command ring\n");
298 
299  if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
300  xhci_dbg(xhci, "The command ring isn't running, "
301  "Have the command ring been stopped?\n");
302  return 0;
303  }
304 
305  temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
306  if (!(temp_64 & CMD_RING_RUNNING)) {
307  xhci_dbg(xhci, "Command ring had been stopped\n");
308  return 0;
309  }
311  xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
312  &xhci->op_regs->cmd_ring);
313 
314  /* Section 4.6.1.2 of xHCI 1.0 spec says software should
315  * time the completion od all xHCI commands, including
316  * the Command Abort operation. If software doesn't see
317  * CRR negated in a timely manner (e.g. longer than 5
318  * seconds), then it should assume that the there are
319  * larger problems with the xHC and assert HCRST.
320  */
321  ret = handshake(xhci, &xhci->op_regs->cmd_ring,
322  CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
323  if (ret < 0) {
324  xhci_err(xhci, "Stopped the command ring failed, "
325  "maybe the host is dead\n");
326  xhci->xhc_state |= XHCI_STATE_DYING;
327  xhci_quiesce(xhci);
328  xhci_halt(xhci);
329  return -ESHUTDOWN;
330  }
331 
332  return 0;
333 }
334 
335 static int xhci_queue_cd(struct xhci_hcd *xhci,
336  struct xhci_command *command,
337  union xhci_trb *cmd_trb)
338 {
339  struct xhci_cd *cd;
340  cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
341  if (!cd)
342  return -ENOMEM;
343  INIT_LIST_HEAD(&cd->cancel_cmd_list);
344 
345  cd->command = command;
346  cd->cmd_trb = cmd_trb;
348 
349  return 0;
350 }
351 
352 /*
353  * Cancel the command which has issue.
354  *
355  * Some commands may hang due to waiting for acknowledgement from
356  * usb device. It is outside of the xHC's ability to control and
357  * will cause the command ring is blocked. When it occurs software
358  * should intervene to recover the command ring.
359  * See Section 4.6.1.1 and 4.6.1.2
360  */
361 int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
362  union xhci_trb *cmd_trb)
363 {
364  int retval = 0;
365  unsigned long flags;
366 
367  spin_lock_irqsave(&xhci->lock, flags);
368 
369  if (xhci->xhc_state & XHCI_STATE_DYING) {
370  xhci_warn(xhci, "Abort the command ring,"
371  " but the xHCI is dead.\n");
372  retval = -ESHUTDOWN;
373  goto fail;
374  }
375 
376  /* queue the cmd desriptor to cancel_cmd_list */
377  retval = xhci_queue_cd(xhci, command, cmd_trb);
378  if (retval) {
379  xhci_warn(xhci, "Queuing command descriptor failed.\n");
380  goto fail;
381  }
382 
383  /* abort command ring */
384  retval = xhci_abort_cmd_ring(xhci);
385  if (retval) {
386  xhci_err(xhci, "Abort command ring failed\n");
387  if (unlikely(retval == -ESHUTDOWN)) {
388  spin_unlock_irqrestore(&xhci->lock, flags);
389  usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
390  xhci_dbg(xhci, "xHCI host controller is dead.\n");
391  return retval;
392  }
393  }
394 
395 fail:
396  spin_unlock_irqrestore(&xhci->lock, flags);
397  return retval;
398 }
399 
400 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
401  unsigned int slot_id,
402  unsigned int ep_index,
403  unsigned int stream_id)
404 {
405  __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
406  struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
407  unsigned int ep_state = ep->ep_state;
408 
409  /* Don't ring the doorbell for this endpoint if there are pending
410  * cancellations because we don't want to interrupt processing.
411  * We don't want to restart any stream rings if there's a set dequeue
412  * pointer command pending because the device can choose to start any
413  * stream once the endpoint is on the HW schedule.
414  * FIXME - check all the stream rings for pending cancellations.
415  */
416  if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
417  (ep_state & EP_HALTED))
418  return;
419  xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
420  /* The CPU has better things to do at this point than wait for a
421  * write-posting flush. It'll get there soon enough.
422  */
423 }
424 
425 /* Ring the doorbell for any rings with pending URBs */
426 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
427  unsigned int slot_id,
428  unsigned int ep_index)
429 {
430  unsigned int stream_id;
431  struct xhci_virt_ep *ep;
432 
433  ep = &xhci->devs[slot_id]->eps[ep_index];
434 
435  /* A ring has pending URBs if its TD list is not empty */
436  if (!(ep->ep_state & EP_HAS_STREAMS)) {
437  if (!(list_empty(&ep->ring->td_list)))
438  xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
439  return;
440  }
441 
442  for (stream_id = 1; stream_id < ep->stream_info->num_streams;
443  stream_id++) {
445  if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
446  xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
447  stream_id);
448  }
449 }
450 
451 /*
452  * Find the segment that trb is in. Start searching in start_seg.
453  * If we must move past a segment that has a link TRB with a toggle cycle state
454  * bit set, then we will toggle the value pointed at by cycle_state.
455  */
456 static struct xhci_segment *find_trb_seg(
457  struct xhci_segment *start_seg,
458  union xhci_trb *trb, int *cycle_state)
459 {
460  struct xhci_segment *cur_seg = start_seg;
461  struct xhci_generic_trb *generic_trb;
462 
463  while (cur_seg->trbs > trb ||
464  &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
465  generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
466  if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
467  *cycle_state ^= 0x1;
468  cur_seg = cur_seg->next;
469  if (cur_seg == start_seg)
470  /* Looped over the entire list. Oops! */
471  return NULL;
472  }
473  return cur_seg;
474 }
475 
476 
477 static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
478  unsigned int slot_id, unsigned int ep_index,
479  unsigned int stream_id)
480 {
481  struct xhci_virt_ep *ep;
482 
483  ep = &xhci->devs[slot_id]->eps[ep_index];
484  /* Common case: no streams */
485  if (!(ep->ep_state & EP_HAS_STREAMS))
486  return ep->ring;
487 
488  if (stream_id == 0) {
489  xhci_warn(xhci,
490  "WARN: Slot ID %u, ep index %u has streams, "
491  "but URB has no stream ID.\n",
492  slot_id, ep_index);
493  return NULL;
494  }
495 
496  if (stream_id < ep->stream_info->num_streams)
497  return ep->stream_info->stream_rings[stream_id];
498 
499  xhci_warn(xhci,
500  "WARN: Slot ID %u, ep index %u has "
501  "stream IDs 1 to %u allocated, "
502  "but stream ID %u is requested.\n",
503  slot_id, ep_index,
504  ep->stream_info->num_streams - 1,
505  stream_id);
506  return NULL;
507 }
508 
509 /* Get the right ring for the given URB.
510  * If the endpoint supports streams, boundary check the URB's stream ID.
511  * If the endpoint doesn't support streams, return the singular endpoint ring.
512  */
513 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
514  struct urb *urb)
515 {
516  return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
517  xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
518 }
519 
520 /*
521  * Move the xHC's endpoint ring dequeue pointer past cur_td.
522  * Record the new state of the xHC's endpoint ring dequeue segment,
523  * dequeue pointer, and new consumer cycle state in state.
524  * Update our internal representation of the ring's dequeue pointer.
525  *
526  * We do this in three jumps:
527  * - First we update our new ring state to be the same as when the xHC stopped.
528  * - Then we traverse the ring to find the segment that contains
529  * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
530  * any link TRBs with the toggle cycle bit set.
531  * - Finally we move the dequeue state one TRB further, toggling the cycle bit
532  * if we've moved it past a link TRB with the toggle cycle bit set.
533  *
534  * Some of the uses of xhci_generic_trb are grotty, but if they're done
535  * with correct __le32 accesses they should work fine. Only users of this are
536  * in here.
537  */
539  unsigned int slot_id, unsigned int ep_index,
540  unsigned int stream_id, struct xhci_td *cur_td,
541  struct xhci_dequeue_state *state)
542 {
543  struct xhci_virt_device *dev = xhci->devs[slot_id];
544  struct xhci_ring *ep_ring;
545  struct xhci_generic_trb *trb;
546  struct xhci_ep_ctx *ep_ctx;
548 
549  ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
550  ep_index, stream_id);
551  if (!ep_ring) {
552  xhci_warn(xhci, "WARN can't find new dequeue state "
553  "for invalid stream ID %u.\n",
554  stream_id);
555  return;
556  }
557  state->new_cycle_state = 0;
558  xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
559  state->new_deq_seg = find_trb_seg(cur_td->start_seg,
560  dev->eps[ep_index].stopped_trb,
561  &state->new_cycle_state);
562  if (!state->new_deq_seg) {
563  WARN_ON(1);
564  return;
565  }
566 
567  /* Dig out the cycle state saved by the xHC during the stop ep cmd */
568  xhci_dbg(xhci, "Finding endpoint context\n");
569  ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
570  state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
571 
572  state->new_deq_ptr = cur_td->last_trb;
573  xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
574  state->new_deq_seg = find_trb_seg(state->new_deq_seg,
575  state->new_deq_ptr,
576  &state->new_cycle_state);
577  if (!state->new_deq_seg) {
578  WARN_ON(1);
579  return;
580  }
581 
582  trb = &state->new_deq_ptr->generic;
583  if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
584  (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
585  state->new_cycle_state ^= 0x1;
586  next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
587 
588  /*
589  * If there is only one segment in a ring, find_trb_seg()'s while loop
590  * will not run, and it will return before it has a chance to see if it
591  * needs to toggle the cycle bit. It can't tell if the stalled transfer
592  * ended just before the link TRB on a one-segment ring, or if the TD
593  * wrapped around the top of the ring, because it doesn't have the TD in
594  * question. Look for the one-segment case where stalled TRB's address
595  * is greater than the new dequeue pointer address.
596  */
597  if (ep_ring->first_seg == ep_ring->first_seg->next &&
598  state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
599  state->new_cycle_state ^= 0x1;
600  xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
601 
602  /* Don't update the ring cycle state for the producer (us). */
603  xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
604  state->new_deq_seg);
605  addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
606  xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
607  (unsigned long long) addr);
608 }
609 
610 /* flip_cycle means flip the cycle bit of all but the first and last TRB.
611  * (The last TRB actually points to the ring enqueue pointer, which is not part
612  * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
613  */
614 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
615  struct xhci_td *cur_td, bool flip_cycle)
616 {
617  struct xhci_segment *cur_seg;
618  union xhci_trb *cur_trb;
619 
620  for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
621  true;
622  next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
623  if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
624  /* Unchain any chained Link TRBs, but
625  * leave the pointers intact.
626  */
627  cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
628  /* Flip the cycle bit (link TRBs can't be the first
629  * or last TRB).
630  */
631  if (flip_cycle)
632  cur_trb->generic.field[3] ^=
634  xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
635  xhci_dbg(xhci, "Address = %p (0x%llx dma); "
636  "in seg %p (0x%llx dma)\n",
637  cur_trb,
638  (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
639  cur_seg,
640  (unsigned long long)cur_seg->dma);
641  } else {
642  cur_trb->generic.field[0] = 0;
643  cur_trb->generic.field[1] = 0;
644  cur_trb->generic.field[2] = 0;
645  /* Preserve only the cycle bit of this TRB */
646  cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
647  /* Flip the cycle bit except on the first or last TRB */
648  if (flip_cycle && cur_trb != cur_td->first_trb &&
649  cur_trb != cur_td->last_trb)
650  cur_trb->generic.field[3] ^=
652  cur_trb->generic.field[3] |= cpu_to_le32(
654  xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
655  (unsigned long long)
656  xhci_trb_virt_to_dma(cur_seg, cur_trb));
657  }
658  if (cur_trb == cur_td->last_trb)
659  break;
660  }
661 }
662 
663 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
664  unsigned int ep_index, unsigned int stream_id,
665  struct xhci_segment *deq_seg,
666  union xhci_trb *deq_ptr, u32 cycle_state);
667 
669  unsigned int slot_id, unsigned int ep_index,
670  unsigned int stream_id,
671  struct xhci_dequeue_state *deq_state)
672 {
673  struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
674 
675  xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
676  "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
677  deq_state->new_deq_seg,
678  (unsigned long long)deq_state->new_deq_seg->dma,
679  deq_state->new_deq_ptr,
680  (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
681  deq_state->new_cycle_state);
682  queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
683  deq_state->new_deq_seg,
684  deq_state->new_deq_ptr,
685  (u32) deq_state->new_cycle_state);
686  /* Stop the TD queueing code from ringing the doorbell until
687  * this command completes. The HC won't set the dequeue pointer
688  * if the ring is running, and ringing the doorbell starts the
689  * ring running.
690  */
691  ep->ep_state |= SET_DEQ_PENDING;
692 }
693 
694 static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
695  struct xhci_virt_ep *ep)
696 {
697  ep->ep_state &= ~EP_HALT_PENDING;
698  /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
699  * timer is running on another CPU, we don't decrement stop_cmds_pending
700  * (since we didn't successfully stop the watchdog timer).
701  */
702  if (del_timer(&ep->stop_cmd_timer))
703  ep->stop_cmds_pending--;
704 }
705 
706 /* Must be called with xhci->lock held in interrupt context */
707 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
708  struct xhci_td *cur_td, int status, char *adjective)
709 {
710  struct usb_hcd *hcd;
711  struct urb *urb;
712  struct urb_priv *urb_priv;
713 
714  urb = cur_td->urb;
715  urb_priv = urb->hcpriv;
716  urb_priv->td_cnt++;
717  hcd = bus_to_hcd(urb->dev->bus);
718 
719  /* Only giveback urb when this is the last td in urb */
720  if (urb_priv->td_cnt == urb_priv->length) {
721  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
722  xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
723  if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
724  if (xhci->quirks & XHCI_AMD_PLL_FIX)
726  }
727  }
728  usb_hcd_unlink_urb_from_ep(hcd, urb);
729 
730  spin_unlock(&xhci->lock);
731  usb_hcd_giveback_urb(hcd, urb, status);
732  xhci_urb_free_priv(xhci, urb_priv);
733  spin_lock(&xhci->lock);
734  }
735 }
736 
737 /*
738  * When we get a command completion for a Stop Endpoint Command, we need to
739  * unlink any cancelled TDs from the ring. There are two ways to do that:
740  *
741  * 1. If the HW was in the middle of processing the TD that needs to be
742  * cancelled, then we must move the ring's dequeue pointer past the last TRB
743  * in the TD with a Set Dequeue Pointer Command.
744  * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
745  * bit cleared) so that the HW will skip over them.
746  */
747 static void handle_stopped_endpoint(struct xhci_hcd *xhci,
748  union xhci_trb *trb, struct xhci_event_cmd *event)
749 {
750  unsigned int slot_id;
751  unsigned int ep_index;
752  struct xhci_virt_device *virt_dev;
753  struct xhci_ring *ep_ring;
754  struct xhci_virt_ep *ep;
755  struct list_head *entry;
756  struct xhci_td *cur_td = NULL;
757  struct xhci_td *last_unlinked_td;
758 
759  struct xhci_dequeue_state deq_state;
760 
762  le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
763  slot_id = TRB_TO_SLOT_ID(
764  le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
765  virt_dev = xhci->devs[slot_id];
766  if (virt_dev)
767  handle_cmd_in_cmd_wait_list(xhci, virt_dev,
768  event);
769  else
770  xhci_warn(xhci, "Stop endpoint command "
771  "completion for disabled slot %u\n",
772  slot_id);
773  return;
774  }
775 
776  memset(&deq_state, 0, sizeof(deq_state));
777  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
778  ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
779  ep = &xhci->devs[slot_id]->eps[ep_index];
780 
781  if (list_empty(&ep->cancelled_td_list)) {
782  xhci_stop_watchdog_timer_in_irq(xhci, ep);
783  ep->stopped_td = NULL;
784  ep->stopped_trb = NULL;
785  ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
786  return;
787  }
788 
789  /* Fix up the ep ring first, so HW stops executing cancelled TDs.
790  * We have the xHCI lock, so nothing can modify this list until we drop
791  * it. We're also in the event handler, so we can't get re-interrupted
792  * if another Stop Endpoint command completes
793  */
794  list_for_each(entry, &ep->cancelled_td_list) {
795  cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
796  xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
797  (unsigned long long)xhci_trb_virt_to_dma(
798  cur_td->start_seg, cur_td->first_trb));
799  ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
800  if (!ep_ring) {
801  /* This shouldn't happen unless a driver is mucking
802  * with the stream ID after submission. This will
803  * leave the TD on the hardware ring, and the hardware
804  * will try to execute it, and may access a buffer
805  * that has already been freed. In the best case, the
806  * hardware will execute it, and the event handler will
807  * ignore the completion event for that TD, since it was
808  * removed from the td_list for that endpoint. In
809  * short, don't muck with the stream ID after
810  * submission.
811  */
812  xhci_warn(xhci, "WARN Cancelled URB %p "
813  "has invalid stream ID %u.\n",
814  cur_td->urb,
815  cur_td->urb->stream_id);
816  goto remove_finished_td;
817  }
818  /*
819  * If we stopped on the TD we need to cancel, then we have to
820  * move the xHC endpoint ring dequeue pointer past this TD.
821  */
822  if (cur_td == ep->stopped_td)
823  xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
824  cur_td->urb->stream_id,
825  cur_td, &deq_state);
826  else
827  td_to_noop(xhci, ep_ring, cur_td, false);
828 remove_finished_td:
829  /*
830  * The event handler won't see a completion for this TD anymore,
831  * so remove it from the endpoint ring's TD list. Keep it in
832  * the cancelled TD list for URB completion later.
833  */
834  list_del_init(&cur_td->td_list);
835  }
836  last_unlinked_td = cur_td;
837  xhci_stop_watchdog_timer_in_irq(xhci, ep);
838 
839  /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
840  if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
842  slot_id, ep_index,
843  ep->stopped_td->urb->stream_id,
844  &deq_state);
845  xhci_ring_cmd_db(xhci);
846  } else {
847  /* Otherwise ring the doorbell(s) to restart queued transfers */
848  ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
849  }
850  ep->stopped_td = NULL;
851  ep->stopped_trb = NULL;
852 
853  /*
854  * Drop the lock and complete the URBs in the cancelled TD list.
855  * New TDs to be cancelled might be added to the end of the list before
856  * we can complete all the URBs for the TDs we already unlinked.
857  * So stop when we've completed the URB for the last TD we unlinked.
858  */
859  do {
860  cur_td = list_entry(ep->cancelled_td_list.next,
861  struct xhci_td, cancelled_td_list);
862  list_del_init(&cur_td->cancelled_td_list);
863 
864  /* Clean up the cancelled URB */
865  /* Doesn't matter what we pass for status, since the core will
866  * just overwrite it (because the URB has been unlinked).
867  */
868  xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
869 
870  /* Stop processing the cancelled list if the watchdog timer is
871  * running.
872  */
873  if (xhci->xhc_state & XHCI_STATE_DYING)
874  return;
875  } while (cur_td != last_unlinked_td);
876 
877  /* Return to the event handler with xhci->lock re-acquired */
878 }
879 
880 /* Watchdog timer function for when a stop endpoint command fails to complete.
881  * In this case, we assume the host controller is broken or dying or dead. The
882  * host may still be completing some other events, so we have to be careful to
883  * let the event ring handler and the URB dequeueing/enqueueing functions know
884  * through xhci->state.
885  *
886  * The timer may also fire if the host takes a very long time to respond to the
887  * command, and the stop endpoint command completion handler cannot delete the
888  * timer before the timer function is called. Another endpoint cancellation may
889  * sneak in before the timer function can grab the lock, and that may queue
890  * another stop endpoint command and add the timer back. So we cannot use a
891  * simple flag to say whether there is a pending stop endpoint command for a
892  * particular endpoint.
893  *
894  * Instead we use a combination of that flag and a counter for the number of
895  * pending stop endpoint commands. If the timer is the tail end of the last
896  * stop endpoint command, and the endpoint's command is still pending, we assume
897  * the host is dying.
898  */
900 {
901  struct xhci_hcd *xhci;
902  struct xhci_virt_ep *ep;
903  struct xhci_virt_ep *temp_ep;
904  struct xhci_ring *ring;
905  struct xhci_td *cur_td;
906  int ret, i, j;
907  unsigned long flags;
908 
909  ep = (struct xhci_virt_ep *) arg;
910  xhci = ep->xhci;
911 
912  spin_lock_irqsave(&xhci->lock, flags);
913 
914  ep->stop_cmds_pending--;
915  if (xhci->xhc_state & XHCI_STATE_DYING) {
916  xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
917  "xHCI as DYING, exiting.\n");
918  spin_unlock_irqrestore(&xhci->lock, flags);
919  return;
920  }
921  if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
922  xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
923  "exiting.\n");
924  spin_unlock_irqrestore(&xhci->lock, flags);
925  return;
926  }
927 
928  xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
929  xhci_warn(xhci, "Assuming host is dying, halting host.\n");
930  /* Oops, HC is dead or dying or at least not responding to the stop
931  * endpoint command.
932  */
933  xhci->xhc_state |= XHCI_STATE_DYING;
934  /* Disable interrupts from the host controller and start halting it */
935  xhci_quiesce(xhci);
936  spin_unlock_irqrestore(&xhci->lock, flags);
937 
938  ret = xhci_halt(xhci);
939 
940  spin_lock_irqsave(&xhci->lock, flags);
941  if (ret < 0) {
942  /* This is bad; the host is not responding to commands and it's
943  * not allowing itself to be halted. At least interrupts are
944  * disabled. If we call usb_hc_died(), it will attempt to
945  * disconnect all device drivers under this host. Those
946  * disconnect() methods will wait for all URBs to be unlinked,
947  * so we must complete them.
948  */
949  xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
950  xhci_warn(xhci, "Completing active URBs anyway.\n");
951  /* We could turn all TDs on the rings to no-ops. This won't
952  * help if the host has cached part of the ring, and is slow if
953  * we want to preserve the cycle bit. Skip it and hope the host
954  * doesn't touch the memory.
955  */
956  }
957  for (i = 0; i < MAX_HC_SLOTS; i++) {
958  if (!xhci->devs[i])
959  continue;
960  for (j = 0; j < 31; j++) {
961  temp_ep = &xhci->devs[i]->eps[j];
962  ring = temp_ep->ring;
963  if (!ring)
964  continue;
965  xhci_dbg(xhci, "Killing URBs for slot ID %u, "
966  "ep index %u\n", i, j);
967  while (!list_empty(&ring->td_list)) {
968  cur_td = list_first_entry(&ring->td_list,
969  struct xhci_td,
970  td_list);
971  list_del_init(&cur_td->td_list);
972  if (!list_empty(&cur_td->cancelled_td_list))
973  list_del_init(&cur_td->cancelled_td_list);
974  xhci_giveback_urb_in_irq(xhci, cur_td,
975  -ESHUTDOWN, "killed");
976  }
977  while (!list_empty(&temp_ep->cancelled_td_list)) {
978  cur_td = list_first_entry(
979  &temp_ep->cancelled_td_list,
980  struct xhci_td,
982  list_del_init(&cur_td->cancelled_td_list);
983  xhci_giveback_urb_in_irq(xhci, cur_td,
984  -ESHUTDOWN, "killed");
985  }
986  }
987  }
988  spin_unlock_irqrestore(&xhci->lock, flags);
989  xhci_dbg(xhci, "Calling usb_hc_died()\n");
990  usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
991  xhci_dbg(xhci, "xHCI host controller is dead.\n");
992 }
993 
994 
995 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
996  struct xhci_virt_device *dev,
997  struct xhci_ring *ep_ring,
998  unsigned int ep_index)
999 {
1000  union xhci_trb *dequeue_temp;
1001  int num_trbs_free_temp;
1002  bool revert = false;
1003 
1004  num_trbs_free_temp = ep_ring->num_trbs_free;
1005  dequeue_temp = ep_ring->dequeue;
1006 
1007  /* If we get two back-to-back stalls, and the first stalled transfer
1008  * ends just before a link TRB, the dequeue pointer will be left on
1009  * the link TRB by the code in the while loop. So we have to update
1010  * the dequeue pointer one segment further, or we'll jump off
1011  * the segment into la-la-land.
1012  */
1013  if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
1014  ep_ring->deq_seg = ep_ring->deq_seg->next;
1015  ep_ring->dequeue = ep_ring->deq_seg->trbs;
1016  }
1017 
1018  while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1019  /* We have more usable TRBs */
1020  ep_ring->num_trbs_free++;
1021  ep_ring->dequeue++;
1022  if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
1023  ep_ring->dequeue)) {
1024  if (ep_ring->dequeue ==
1025  dev->eps[ep_index].queued_deq_ptr)
1026  break;
1027  ep_ring->deq_seg = ep_ring->deq_seg->next;
1028  ep_ring->dequeue = ep_ring->deq_seg->trbs;
1029  }
1030  if (ep_ring->dequeue == dequeue_temp) {
1031  revert = true;
1032  break;
1033  }
1034  }
1035 
1036  if (revert) {
1037  xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1038  ep_ring->num_trbs_free = num_trbs_free_temp;
1039  }
1040 }
1041 
1042 /*
1043  * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1044  * we need to clear the set deq pending flag in the endpoint ring state, so that
1045  * the TD queueing code can ring the doorbell again. We also need to ring the
1046  * endpoint doorbell to restart the ring, but only if there aren't more
1047  * cancellations pending.
1048  */
1049 static void handle_set_deq_completion(struct xhci_hcd *xhci,
1050  struct xhci_event_cmd *event,
1051  union xhci_trb *trb)
1052 {
1053  unsigned int slot_id;
1054  unsigned int ep_index;
1055  unsigned int stream_id;
1056  struct xhci_ring *ep_ring;
1057  struct xhci_virt_device *dev;
1058  struct xhci_ep_ctx *ep_ctx;
1059  struct xhci_slot_ctx *slot_ctx;
1060 
1061  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1062  ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1063  stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1064  dev = xhci->devs[slot_id];
1065 
1066  ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
1067  if (!ep_ring) {
1068  xhci_warn(xhci, "WARN Set TR deq ptr command for "
1069  "freed stream ID %u\n",
1070  stream_id);
1071  /* XXX: Harmless??? */
1072  dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1073  return;
1074  }
1075 
1076  ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
1077  slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
1078 
1079  if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
1080  unsigned int ep_state;
1081  unsigned int slot_state;
1082 
1083  switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
1084  case COMP_TRB_ERR:
1085  xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
1086  "of stream ID configuration\n");
1087  break;
1088  case COMP_CTX_STATE:
1089  xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
1090  "to incorrect slot or ep state.\n");
1091  ep_state = le32_to_cpu(ep_ctx->ep_info);
1092  ep_state &= EP_STATE_MASK;
1093  slot_state = le32_to_cpu(slot_ctx->dev_state);
1094  slot_state = GET_SLOT_STATE(slot_state);
1095  xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
1096  slot_state, ep_state);
1097  break;
1098  case COMP_EBADSLT:
1099  xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
1100  "slot %u was not enabled.\n", slot_id);
1101  break;
1102  default:
1103  xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
1104  "completion code of %u.\n",
1105  GET_COMP_CODE(le32_to_cpu(event->status)));
1106  break;
1107  }
1108  /* OK what do we do now? The endpoint state is hosed, and we
1109  * should never get to this point if the synchronization between
1110  * queueing, and endpoint state are correct. This might happen
1111  * if the device gets disconnected after we've finished
1112  * cancelling URBs, which might not be an error...
1113  */
1114  } else {
1115  xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
1116  le64_to_cpu(ep_ctx->deq));
1117  if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
1118  dev->eps[ep_index].queued_deq_ptr) ==
1119  (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
1120  /* Update the ring's dequeue segment and dequeue pointer
1121  * to reflect the new position.
1122  */
1123  update_ring_for_set_deq_completion(xhci, dev,
1124  ep_ring, ep_index);
1125  } else {
1126  xhci_warn(xhci, "Mismatch between completed Set TR Deq "
1127  "Ptr command & xHCI internal state.\n");
1128  xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1129  dev->eps[ep_index].queued_deq_seg,
1130  dev->eps[ep_index].queued_deq_ptr);
1131  }
1132  }
1133 
1134  dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
1135  dev->eps[ep_index].queued_deq_seg = NULL;
1136  dev->eps[ep_index].queued_deq_ptr = NULL;
1137  /* Restart any rings with pending URBs */
1138  ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1139 }
1140 
1141 static void handle_reset_ep_completion(struct xhci_hcd *xhci,
1142  struct xhci_event_cmd *event,
1143  union xhci_trb *trb)
1144 {
1145  int slot_id;
1146  unsigned int ep_index;
1147 
1148  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
1149  ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1150  /* This command will only fail if the endpoint wasn't halted,
1151  * but we don't care.
1152  */
1153  xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
1154  GET_COMP_CODE(le32_to_cpu(event->status)));
1155 
1156  /* HW with the reset endpoint quirk needs to have a configure endpoint
1157  * command complete before the endpoint can be used. Queue that here
1158  * because the HW can't handle two commands being queued in a row.
1159  */
1160  if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
1161  xhci_dbg(xhci, "Queueing configure endpoint command\n");
1163  xhci->devs[slot_id]->in_ctx->dma, slot_id,
1164  false);
1165  xhci_ring_cmd_db(xhci);
1166  } else {
1167  /* Clear our internal halted state and restart the ring(s) */
1168  xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1169  ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1170  }
1171 }
1172 
1173 /* Complete the command and detele it from the devcie's command queue.
1174  */
1175 static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1176  struct xhci_command *command, u32 status)
1177 {
1178  command->status = status;
1179  list_del(&command->cmd_list);
1180  if (command->completion)
1181  complete(command->completion);
1182  else
1183  xhci_free_command(xhci, command);
1184 }
1185 
1186 
1187 /* Check to see if a command in the device's command queue matches this one.
1188  * Signal the completion or free the command, and return 1. Return 0 if the
1189  * completed command isn't at the head of the command list.
1190  */
1191 static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
1192  struct xhci_virt_device *virt_dev,
1193  struct xhci_event_cmd *event)
1194 {
1195  struct xhci_command *command;
1196 
1197  if (list_empty(&virt_dev->cmd_list))
1198  return 0;
1199 
1200  command = list_entry(virt_dev->cmd_list.next,
1201  struct xhci_command, cmd_list);
1202  if (xhci->cmd_ring->dequeue != command->command_trb)
1203  return 0;
1204 
1205  xhci_complete_cmd_in_cmd_wait_list(xhci, command,
1206  GET_COMP_CODE(le32_to_cpu(event->status)));
1207  return 1;
1208 }
1209 
1210 /*
1211  * Finding the command trb need to be cancelled and modifying it to
1212  * NO OP command. And if the command is in device's command wait
1213  * list, finishing and freeing it.
1214  *
1215  * If we can't find the command trb, we think it had already been
1216  * executed.
1217  */
1218 static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
1219 {
1220  struct xhci_segment *cur_seg;
1221  union xhci_trb *cmd_trb;
1222  u32 cycle_state;
1223 
1224  if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
1225  return;
1226 
1227  /* find the current segment of command ring */
1228  cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
1229  xhci->cmd_ring->dequeue, &cycle_state);
1230 
1231  if (!cur_seg) {
1232  xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n",
1233  xhci->cmd_ring->dequeue,
1234  (unsigned long long)
1235  xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1236  xhci->cmd_ring->dequeue));
1237  xhci_debug_ring(xhci, xhci->cmd_ring);
1238  xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
1239  return;
1240  }
1241 
1242  /* find the command trb matched by cd from command ring */
1243  for (cmd_trb = xhci->cmd_ring->dequeue;
1244  cmd_trb != xhci->cmd_ring->enqueue;
1245  next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
1246  /* If the trb is link trb, continue */
1247  if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
1248  continue;
1249 
1250  if (cur_cd->cmd_trb == cmd_trb) {
1251 
1252  /* If the command in device's command list, we should
1253  * finish it and free the command structure.
1254  */
1255  if (cur_cd->command)
1256  xhci_complete_cmd_in_cmd_wait_list(xhci,
1257  cur_cd->command, COMP_CMD_STOP);
1258 
1259  /* get cycle state from the origin command trb */
1260  cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
1261  & TRB_CYCLE;
1262 
1263  /* modify the command trb to NO OP command */
1264  cmd_trb->generic.field[0] = 0;
1265  cmd_trb->generic.field[1] = 0;
1266  cmd_trb->generic.field[2] = 0;
1267  cmd_trb->generic.field[3] = cpu_to_le32(
1268  TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1269  break;
1270  }
1271  }
1272 }
1273 
1274 static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
1275 {
1276  struct xhci_cd *cur_cd, *next_cd;
1277 
1278  if (list_empty(&xhci->cancel_cmd_list))
1279  return;
1280 
1281  list_for_each_entry_safe(cur_cd, next_cd,
1282  &xhci->cancel_cmd_list, cancel_cmd_list) {
1283  xhci_cmd_to_noop(xhci, cur_cd);
1284  list_del(&cur_cd->cancel_cmd_list);
1285  kfree(cur_cd);
1286  }
1287 }
1288 
1289 /*
1290  * traversing the cancel_cmd_list. If the command descriptor according
1291  * to cmd_trb is found, the function free it and return 1, otherwise
1292  * return 0.
1293  */
1294 static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
1295  union xhci_trb *cmd_trb)
1296 {
1297  struct xhci_cd *cur_cd, *next_cd;
1298 
1299  if (list_empty(&xhci->cancel_cmd_list))
1300  return 0;
1301 
1302  list_for_each_entry_safe(cur_cd, next_cd,
1303  &xhci->cancel_cmd_list, cancel_cmd_list) {
1304  if (cur_cd->cmd_trb == cmd_trb) {
1305  if (cur_cd->command)
1306  xhci_complete_cmd_in_cmd_wait_list(xhci,
1307  cur_cd->command, COMP_CMD_STOP);
1308  list_del(&cur_cd->cancel_cmd_list);
1309  kfree(cur_cd);
1310  return 1;
1311  }
1312  }
1313 
1314  return 0;
1315 }
1316 
1317 /*
1318  * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
1319  * trb pointed by the command ring dequeue pointer is the trb we want to
1320  * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
1321  * traverse the cancel_cmd_list to trun the all of the commands according
1322  * to command descriptor to NO-OP trb.
1323  */
1324 static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1325  int cmd_trb_comp_code)
1326 {
1327  int cur_trb_is_good = 0;
1328 
1329  /* Searching the cmd trb pointed by the command ring dequeue
1330  * pointer in command descriptor list. If it is found, free it.
1331  */
1332  cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
1333  xhci->cmd_ring->dequeue);
1334 
1335  if (cmd_trb_comp_code == COMP_CMD_ABORT)
1337  else if (cmd_trb_comp_code == COMP_CMD_STOP) {
1338  /* traversing the cancel_cmd_list and canceling
1339  * the command according to command descriptor
1340  */
1341  xhci_cancel_cmd_in_cd_list(xhci);
1342 
1344  /*
1345  * ring command ring doorbell again to restart the
1346  * command ring
1347  */
1348  if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
1349  xhci_ring_cmd_db(xhci);
1350  }
1351  return cur_trb_is_good;
1352 }
1353 
1354 static void handle_cmd_completion(struct xhci_hcd *xhci,
1355  struct xhci_event_cmd *event)
1356 {
1357  int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1358  u64 cmd_dma;
1359  dma_addr_t cmd_dequeue_dma;
1361  struct xhci_virt_device *virt_dev;
1362  unsigned int ep_index;
1363  struct xhci_ring *ep_ring;
1364  unsigned int ep_state;
1365 
1366  cmd_dma = le64_to_cpu(event->cmd_trb);
1367  cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1368  xhci->cmd_ring->dequeue);
1369  /* Is the command ring deq ptr out of sync with the deq seg ptr? */
1370  if (cmd_dequeue_dma == 0) {
1371  xhci->error_bitmask |= 1 << 4;
1372  return;
1373  }
1374  /* Does the DMA address match our internal dequeue pointer address? */
1375  if (cmd_dma != (u64) cmd_dequeue_dma) {
1376  xhci->error_bitmask |= 1 << 5;
1377  return;
1378  }
1379 
1380  if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
1382  /* If the return value is 0, we think the trb pointed by
1383  * command ring dequeue pointer is a good trb. The good
1384  * trb means we don't want to cancel the trb, but it have
1385  * been stopped by host. So we should handle it normally.
1386  * Otherwise, driver should invoke inc_deq() and return.
1387  */
1388  if (handle_stopped_cmd_ring(xhci,
1389  GET_COMP_CODE(le32_to_cpu(event->status)))) {
1390  inc_deq(xhci, xhci->cmd_ring);
1391  return;
1392  }
1393  }
1394 
1395  switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
1396  & TRB_TYPE_BITMASK) {
1397  case TRB_TYPE(TRB_ENABLE_SLOT):
1398  if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
1399  xhci->slot_id = slot_id;
1400  else
1401  xhci->slot_id = 0;
1402  complete(&xhci->addr_dev);
1403  break;
1404  case TRB_TYPE(TRB_DISABLE_SLOT):
1405  if (xhci->devs[slot_id]) {
1406  if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1407  /* Delete default control endpoint resources */
1409  xhci->devs[slot_id], true);
1410  xhci_free_virt_device(xhci, slot_id);
1411  }
1412  break;
1413  case TRB_TYPE(TRB_CONFIG_EP):
1414  virt_dev = xhci->devs[slot_id];
1415  if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1416  break;
1417  /*
1418  * Configure endpoint commands can come from the USB core
1419  * configuration or alt setting changes, or because the HW
1420  * needed an extra configure endpoint command after a reset
1421  * endpoint command or streams were being configured.
1422  * If the command was for a halted endpoint, the xHCI driver
1423  * is not waiting on the configure endpoint command.
1424  */
1425  ctrl_ctx = xhci_get_input_control_ctx(xhci,
1426  virt_dev->in_ctx);
1427  /* Input ctx add_flags are the endpoint index plus one */
1428  ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
1429  /* A usb_set_interface() call directly after clearing a halted
1430  * condition may race on this quirky hardware. Not worth
1431  * worrying about, since this is prototype hardware. Not sure
1432  * if this will work for streams, but streams support was
1433  * untested on this prototype.
1434  */
1435  if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1436  ep_index != (unsigned int) -1 &&
1437  le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
1438  le32_to_cpu(ctrl_ctx->drop_flags)) {
1439  ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
1440  ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1441  if (!(ep_state & EP_HALTED))
1442  goto bandwidth_change;
1443  xhci_dbg(xhci, "Completed config ep cmd - "
1444  "last ep index = %d, state = %d\n",
1445  ep_index, ep_state);
1446  /* Clear internal halted state and restart ring(s) */
1447  xhci->devs[slot_id]->eps[ep_index].ep_state &=
1448  ~EP_HALTED;
1449  ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1450  break;
1451  }
1452 bandwidth_change:
1453  xhci_dbg(xhci, "Completed config ep cmd\n");
1454  xhci->devs[slot_id]->cmd_status =
1455  GET_COMP_CODE(le32_to_cpu(event->status));
1456  complete(&xhci->devs[slot_id]->cmd_completion);
1457  break;
1458  case TRB_TYPE(TRB_EVAL_CONTEXT):
1459  virt_dev = xhci->devs[slot_id];
1460  if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
1461  break;
1462  xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1463  complete(&xhci->devs[slot_id]->cmd_completion);
1464  break;
1465  case TRB_TYPE(TRB_ADDR_DEV):
1466  xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
1467  complete(&xhci->addr_dev);
1468  break;
1469  case TRB_TYPE(TRB_STOP_RING):
1470  handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
1471  break;
1472  case TRB_TYPE(TRB_SET_DEQ):
1473  handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
1474  break;
1475  case TRB_TYPE(TRB_CMD_NOOP):
1476  break;
1477  case TRB_TYPE(TRB_RESET_EP):
1478  handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
1479  break;
1480  case TRB_TYPE(TRB_RESET_DEV):
1481  xhci_dbg(xhci, "Completed reset device command.\n");
1482  slot_id = TRB_TO_SLOT_ID(
1483  le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
1484  virt_dev = xhci->devs[slot_id];
1485  if (virt_dev)
1486  handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
1487  else
1488  xhci_warn(xhci, "Reset device command completion "
1489  "for disabled slot %u\n", slot_id);
1490  break;
1491  case TRB_TYPE(TRB_NEC_GET_FW):
1492  if (!(xhci->quirks & XHCI_NEC_HOST)) {
1493  xhci->error_bitmask |= 1 << 6;
1494  break;
1495  }
1496  xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
1497  NEC_FW_MAJOR(le32_to_cpu(event->status)),
1498  NEC_FW_MINOR(le32_to_cpu(event->status)));
1499  break;
1500  default:
1501  /* Skip over unknown commands on the event ring */
1502  xhci->error_bitmask |= 1 << 6;
1503  break;
1504  }
1505  inc_deq(xhci, xhci->cmd_ring);
1506 }
1507 
1508 static void handle_vendor_event(struct xhci_hcd *xhci,
1509  union xhci_trb *event)
1510 {
1511  u32 trb_type;
1512 
1513  trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
1514  xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1515  if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1516  handle_cmd_completion(xhci, &event->event_cmd);
1517 }
1518 
1519 /* @port_id: the one-based port ID from the hardware (indexed from array of all
1520  * port registers -- USB 3.0 and USB 2.0).
1521  *
1522  * Returns a zero-based port number, which is suitable for indexing into each of
1523  * the split roothubs' port arrays and bus state arrays.
1524  * Add one to it in order to call xhci_find_slot_id_by_port.
1525  */
1526 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1527  struct xhci_hcd *xhci, u32 port_id)
1528 {
1529  unsigned int i;
1530  unsigned int num_similar_speed_ports = 0;
1531 
1532  /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
1533  * and usb2_ports are 0-based indexes. Count the number of similar
1534  * speed ports, up to 1 port before this port.
1535  */
1536  for (i = 0; i < (port_id - 1); i++) {
1537  u8 port_speed = xhci->port_array[i];
1538 
1539  /*
1540  * Skip ports that don't have known speeds, or have duplicate
1541  * Extended Capabilities port speed entries.
1542  */
1543  if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1544  continue;
1545 
1546  /*
1547  * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
1548  * 1.1 ports are under the USB 2.0 hub. If the port speed
1549  * matches the device speed, it's a similar speed port.
1550  */
1551  if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
1552  num_similar_speed_ports++;
1553  }
1554  return num_similar_speed_ports;
1555 }
1556 
1557 static void handle_device_notification(struct xhci_hcd *xhci,
1558  union xhci_trb *event)
1559 {
1560  u32 slot_id;
1561  struct usb_device *udev;
1562 
1563  slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
1564  if (!xhci->devs[slot_id]) {
1565  xhci_warn(xhci, "Device Notification event for "
1566  "unused slot %u\n", slot_id);
1567  return;
1568  }
1569 
1570  xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1571  slot_id);
1572  udev = xhci->devs[slot_id]->udev;
1573  if (udev && udev->parent)
1574  usb_wakeup_notification(udev->parent, udev->portnum);
1575 }
1576 
1577 static void handle_port_status(struct xhci_hcd *xhci,
1578  union xhci_trb *event)
1579 {
1580  struct usb_hcd *hcd;
1581  u32 port_id;
1582  u32 temp, temp1;
1583  int max_ports;
1584  int slot_id;
1585  unsigned int faked_port_index;
1586  u8 major_revision;
1587  struct xhci_bus_state *bus_state;
1588  __le32 __iomem **port_array;
1589  bool bogus_port_status = false;
1590 
1591  /* Port status change events always have a successful completion code */
1592  if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
1593  xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
1594  xhci->error_bitmask |= 1 << 8;
1595  }
1596  port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1597  xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
1598 
1599  max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1600  if ((port_id <= 0) || (port_id > max_ports)) {
1601  xhci_warn(xhci, "Invalid port id %d\n", port_id);
1602  bogus_port_status = true;
1603  goto cleanup;
1604  }
1605 
1606  /* Figure out which usb_hcd this port is attached to:
1607  * is it a USB 3.0 port or a USB 2.0/1.1 port?
1608  */
1609  major_revision = xhci->port_array[port_id - 1];
1610  if (major_revision == 0) {
1611  xhci_warn(xhci, "Event for port %u not in "
1612  "Extended Capabilities, ignoring.\n",
1613  port_id);
1614  bogus_port_status = true;
1615  goto cleanup;
1616  }
1617  if (major_revision == DUPLICATE_ENTRY) {
1618  xhci_warn(xhci, "Event for port %u duplicated in"
1619  "Extended Capabilities, ignoring.\n",
1620  port_id);
1621  bogus_port_status = true;
1622  goto cleanup;
1623  }
1624 
1625  /*
1626  * Hardware port IDs reported by a Port Status Change Event include USB
1627  * 3.0 and USB 2.0 ports. We want to check if the port has reported a
1628  * resume event, but we first need to translate the hardware port ID
1629  * into the index into the ports on the correct split roothub, and the
1630  * correct bus_state structure.
1631  */
1632  /* Find the right roothub. */
1633  hcd = xhci_to_hcd(xhci);
1634  if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
1635  hcd = xhci->shared_hcd;
1636  bus_state = &xhci->bus_state[hcd_index(hcd)];
1637  if (hcd->speed == HCD_USB3)
1638  port_array = xhci->usb3_ports;
1639  else
1640  port_array = xhci->usb2_ports;
1641  /* Find the faked port hub number */
1642  faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
1643  port_id);
1644 
1645  temp = xhci_readl(xhci, port_array[faked_port_index]);
1646  if (hcd->state == HC_STATE_SUSPENDED) {
1647  xhci_dbg(xhci, "resume root hub\n");
1648  usb_hcd_resume_root_hub(hcd);
1649  }
1650 
1651  if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1652  xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1653 
1654  temp1 = xhci_readl(xhci, &xhci->op_regs->command);
1655  if (!(temp1 & CMD_RUN)) {
1656  xhci_warn(xhci, "xHC is not running.\n");
1657  goto cleanup;
1658  }
1659 
1660  if (DEV_SUPERSPEED(temp)) {
1661  xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1662  /* Set a flag to say the port signaled remote wakeup,
1663  * so we can tell the difference between the end of
1664  * device and host initiated resume.
1665  */
1666  bus_state->port_remote_wakeup |= 1 << faked_port_index;
1667  xhci_test_and_clear_bit(xhci, port_array,
1668  faked_port_index, PORT_PLC);
1669  xhci_set_link_state(xhci, port_array, faked_port_index,
1670  XDEV_U0);
1671  /* Need to wait until the next link state change
1672  * indicates the device is actually in U0.
1673  */
1674  bogus_port_status = true;
1675  goto cleanup;
1676  } else {
1677  xhci_dbg(xhci, "resume HS port %d\n", port_id);
1678  bus_state->resume_done[faked_port_index] = jiffies +
1679  msecs_to_jiffies(20);
1680  set_bit(faked_port_index, &bus_state->resuming_ports);
1681  mod_timer(&hcd->rh_timer,
1682  bus_state->resume_done[faked_port_index]);
1683  /* Do the rest in GetPortStatus */
1684  }
1685  }
1686 
1687  if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
1688  DEV_SUPERSPEED(temp)) {
1689  xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1690  /* We've just brought the device into U0 through either the
1691  * Resume state after a device remote wakeup, or through the
1692  * U3Exit state after a host-initiated resume. If it's a device
1693  * initiated remote wake, don't pass up the link state change,
1694  * so the roothub behavior is consistent with external
1695  * USB 3.0 hub behavior.
1696  */
1697  slot_id = xhci_find_slot_id_by_port(hcd, xhci,
1698  faked_port_index + 1);
1699  if (slot_id && xhci->devs[slot_id])
1700  xhci_ring_device(xhci, slot_id);
1701  if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
1702  bus_state->port_remote_wakeup &=
1703  ~(1 << faked_port_index);
1704  xhci_test_and_clear_bit(xhci, port_array,
1705  faked_port_index, PORT_PLC);
1706  usb_wakeup_notification(hcd->self.root_hub,
1707  faked_port_index + 1);
1708  bogus_port_status = true;
1709  goto cleanup;
1710  }
1711  }
1712 
1713  if (hcd->speed != HCD_USB3)
1714  xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
1715  PORT_PLC);
1716 
1717 cleanup:
1718  /* Update event ring dequeue pointer before dropping the lock */
1719  inc_deq(xhci, xhci->event_ring);
1720 
1721  /* Don't make the USB core poll the roothub if we got a bad port status
1722  * change event. Besides, at that point we can't tell which roothub
1723  * (USB 2.0 or USB 3.0) to kick.
1724  */
1725  if (bogus_port_status)
1726  return;
1727 
1728  spin_unlock(&xhci->lock);
1729  /* Pass this up to the core */
1731  spin_lock(&xhci->lock);
1732 }
1733 
1734 /*
1735  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
1736  * at end_trb, which may be in another segment. If the suspect DMA address is a
1737  * TRB in this TD, this function returns that TRB's segment. Otherwise it
1738  * returns 0.
1739  */
1740 struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1741  union xhci_trb *start_trb,
1742  union xhci_trb *end_trb,
1743  dma_addr_t suspect_dma)
1744 {
1745  dma_addr_t start_dma;
1746  dma_addr_t end_seg_dma;
1747  dma_addr_t end_trb_dma;
1748  struct xhci_segment *cur_seg;
1749 
1750  start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
1751  cur_seg = start_seg;
1752 
1753  do {
1754  if (start_dma == 0)
1755  return NULL;
1756  /* We may get an event for a Link TRB in the middle of a TD */
1757  end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
1758  &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
1759  /* If the end TRB isn't in this segment, this is set to 0 */
1760  end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
1761 
1762  if (end_trb_dma > 0) {
1763  /* The end TRB is in this segment, so suspect should be here */
1764  if (start_dma <= end_trb_dma) {
1765  if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
1766  return cur_seg;
1767  } else {
1768  /* Case for one segment with
1769  * a TD wrapped around to the top
1770  */
1771  if ((suspect_dma >= start_dma &&
1772  suspect_dma <= end_seg_dma) ||
1773  (suspect_dma >= cur_seg->dma &&
1774  suspect_dma <= end_trb_dma))
1775  return cur_seg;
1776  }
1777  return NULL;
1778  } else {
1779  /* Might still be somewhere in this segment */
1780  if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
1781  return cur_seg;
1782  }
1783  cur_seg = cur_seg->next;
1784  start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
1785  } while (cur_seg != start_seg);
1786 
1787  return NULL;
1788 }
1789 
1790 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1791  unsigned int slot_id, unsigned int ep_index,
1792  unsigned int stream_id,
1793  struct xhci_td *td, union xhci_trb *event_trb)
1794 {
1795  struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1796  ep->ep_state |= EP_HALTED;
1797  ep->stopped_td = td;
1798  ep->stopped_trb = event_trb;
1799  ep->stopped_stream = stream_id;
1800 
1801  xhci_queue_reset_ep(xhci, slot_id, ep_index);
1802  xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1803 
1804  ep->stopped_td = NULL;
1805  ep->stopped_trb = NULL;
1806  ep->stopped_stream = 0;
1807 
1808  xhci_ring_cmd_db(xhci);
1809 }
1810 
1811 /* Check if an error has halted the endpoint ring. The class driver will
1812  * cleanup the halt for a non-default control endpoint if we indicate a stall.
1813  * However, a babble and other errors also halt the endpoint ring, and the class
1814  * driver won't clear the halt in that case, so we need to issue a Set Transfer
1815  * Ring Dequeue Pointer command manually.
1816  */
1817 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1818  struct xhci_ep_ctx *ep_ctx,
1819  unsigned int trb_comp_code)
1820 {
1821  /* TRB completion codes that may require a manual halt cleanup */
1822  if (trb_comp_code == COMP_TX_ERR ||
1823  trb_comp_code == COMP_BABBLE ||
1824  trb_comp_code == COMP_SPLIT_ERR)
1825  /* The 0.96 spec says a babbling control endpoint
1826  * is not halted. The 0.96 spec says it is. Some HW
1827  * claims to be 0.95 compliant, but it halts the control
1828  * endpoint anyway. Check if a babble halted the
1829  * endpoint.
1830  */
1831  if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1833  return 1;
1834 
1835  return 0;
1836 }
1837 
1838 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
1839 {
1840  if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1841  /* Vendor defined "informational" completion code,
1842  * treat as not-an-error.
1843  */
1844  xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1845  trb_comp_code);
1846  xhci_dbg(xhci, "Treating code as success.\n");
1847  return 1;
1848  }
1849  return 0;
1850 }
1851 
1852 /*
1853  * Finish the td processing, remove the td from td list;
1854  * Return 1 if the urb can be given back.
1855  */
1856 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1857  union xhci_trb *event_trb, struct xhci_transfer_event *event,
1858  struct xhci_virt_ep *ep, int *status, bool skip)
1859 {
1860  struct xhci_virt_device *xdev;
1861  struct xhci_ring *ep_ring;
1862  unsigned int slot_id;
1863  int ep_index;
1864  struct urb *urb = NULL;
1865  struct xhci_ep_ctx *ep_ctx;
1866  int ret = 0;
1867  struct urb_priv *urb_priv;
1868  u32 trb_comp_code;
1869 
1870  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1871  xdev = xhci->devs[slot_id];
1872  ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1873  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1874  ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1875  trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1876 
1877  if (skip)
1878  goto td_cleanup;
1879 
1880  if (trb_comp_code == COMP_STOP_INVAL ||
1881  trb_comp_code == COMP_STOP) {
1882  /* The Endpoint Stop Command completion will take care of any
1883  * stopped TDs. A stopped TD may be restarted, so don't update
1884  * the ring dequeue pointer or take this TD off any lists yet.
1885  */
1886  ep->stopped_td = td;
1887  ep->stopped_trb = event_trb;
1888  return 0;
1889  } else {
1890  if (trb_comp_code == COMP_STALL) {
1891  /* The transfer is completed from the driver's
1892  * perspective, but we need to issue a set dequeue
1893  * command for this stalled endpoint to move the dequeue
1894  * pointer past the TD. We can't do that here because
1895  * the halt condition must be cleared first. Let the
1896  * USB class driver clear the stall later.
1897  */
1898  ep->stopped_td = td;
1899  ep->stopped_trb = event_trb;
1900  ep->stopped_stream = ep_ring->stream_id;
1901  } else if (xhci_requires_manual_halt_cleanup(xhci,
1902  ep_ctx, trb_comp_code)) {
1903  /* Other types of errors halt the endpoint, but the
1904  * class driver doesn't call usb_reset_endpoint() unless
1905  * the error is -EPIPE. Clear the halted status in the
1906  * xHCI hardware manually.
1907  */
1908  xhci_cleanup_halted_endpoint(xhci,
1909  slot_id, ep_index, ep_ring->stream_id,
1910  td, event_trb);
1911  } else {
1912  /* Update ring dequeue pointer */
1913  while (ep_ring->dequeue != td->last_trb)
1914  inc_deq(xhci, ep_ring);
1915  inc_deq(xhci, ep_ring);
1916  }
1917 
1918 td_cleanup:
1919  /* Clean up the endpoint's TD list */
1920  urb = td->urb;
1921  urb_priv = urb->hcpriv;
1922 
1923  /* Do one last check of the actual transfer length.
1924  * If the host controller said we transferred more data than
1925  * the buffer length, urb->actual_length will be a very big
1926  * number (since it's unsigned). Play it safe and say we didn't
1927  * transfer anything.
1928  */
1929  if (urb->actual_length > urb->transfer_buffer_length) {
1930  xhci_warn(xhci, "URB transfer length is wrong, "
1931  "xHC issue? req. len = %u, "
1932  "act. len = %u\n",
1933  urb->transfer_buffer_length,
1934  urb->actual_length);
1935  urb->actual_length = 0;
1936  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
1937  *status = -EREMOTEIO;
1938  else
1939  *status = 0;
1940  }
1941  list_del_init(&td->td_list);
1942  /* Was this TD slated to be cancelled but completed anyway? */
1943  if (!list_empty(&td->cancelled_td_list))
1944  list_del_init(&td->cancelled_td_list);
1945 
1946  urb_priv->td_cnt++;
1947  /* Giveback the urb when all the tds are completed */
1948  if (urb_priv->td_cnt == urb_priv->length) {
1949  ret = 1;
1950  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1951  xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1952  if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1953  == 0) {
1954  if (xhci->quirks & XHCI_AMD_PLL_FIX)
1956  }
1957  }
1958  }
1959  }
1960 
1961  return ret;
1962 }
1963 
1964 /*
1965  * Process control tds, update urb status and actual_length.
1966  */
1967 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1968  union xhci_trb *event_trb, struct xhci_transfer_event *event,
1969  struct xhci_virt_ep *ep, int *status)
1970 {
1971  struct xhci_virt_device *xdev;
1972  struct xhci_ring *ep_ring;
1973  unsigned int slot_id;
1974  int ep_index;
1975  struct xhci_ep_ctx *ep_ctx;
1976  u32 trb_comp_code;
1977 
1978  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1979  xdev = xhci->devs[slot_id];
1980  ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1981  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1982  ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
1983  trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1984 
1985  switch (trb_comp_code) {
1986  case COMP_SUCCESS:
1987  if (event_trb == ep_ring->dequeue) {
1988  xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
1989  "without IOC set??\n");
1990  *status = -ESHUTDOWN;
1991  } else if (event_trb != td->last_trb) {
1992  xhci_warn(xhci, "WARN: Success on ctrl data TRB "
1993  "without IOC set??\n");
1994  *status = -ESHUTDOWN;
1995  } else {
1996  *status = 0;
1997  }
1998  break;
1999  case COMP_SHORT_TX:
2000  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2001  *status = -EREMOTEIO;
2002  else
2003  *status = 0;
2004  break;
2005  case COMP_STOP_INVAL:
2006  case COMP_STOP:
2007  return finish_td(xhci, td, event_trb, event, ep, status, false);
2008  default:
2009  if (!xhci_requires_manual_halt_cleanup(xhci,
2010  ep_ctx, trb_comp_code))
2011  break;
2012  xhci_dbg(xhci, "TRB error code %u, "
2013  "halted endpoint index = %u\n",
2014  trb_comp_code, ep_index);
2015  /* else fall through */
2016  case COMP_STALL:
2017  /* Did we transfer part of the data (middle) phase? */
2018  if (event_trb != ep_ring->dequeue &&
2019  event_trb != td->last_trb)
2020  td->urb->actual_length =
2021  td->urb->transfer_buffer_length
2022  - TRB_LEN(le32_to_cpu(event->transfer_len));
2023  else
2024  td->urb->actual_length = 0;
2025 
2026  xhci_cleanup_halted_endpoint(xhci,
2027  slot_id, ep_index, 0, td, event_trb);
2028  return finish_td(xhci, td, event_trb, event, ep, status, true);
2029  }
2030  /*
2031  * Did we transfer any data, despite the errors that might have
2032  * happened? I.e. did we get past the setup stage?
2033  */
2034  if (event_trb != ep_ring->dequeue) {
2035  /* The event was for the status stage */
2036  if (event_trb == td->last_trb) {
2037  if (td->urb->actual_length != 0) {
2038  /* Don't overwrite a previously set error code
2039  */
2040  if ((*status == -EINPROGRESS || *status == 0) &&
2041  (td->urb->transfer_flags
2042  & URB_SHORT_NOT_OK))
2043  /* Did we already see a short data
2044  * stage? */
2045  *status = -EREMOTEIO;
2046  } else {
2047  td->urb->actual_length =
2048  td->urb->transfer_buffer_length;
2049  }
2050  } else {
2051  /* Maybe the event was for the data stage? */
2052  td->urb->actual_length =
2053  td->urb->transfer_buffer_length -
2054  TRB_LEN(le32_to_cpu(event->transfer_len));
2055  xhci_dbg(xhci, "Waiting for status "
2056  "stage event\n");
2057  return 0;
2058  }
2059  }
2060 
2061  return finish_td(xhci, td, event_trb, event, ep, status, false);
2062 }
2063 
2064 /*
2065  * Process isochronous tds, update urb packet status and actual_length.
2066  */
2067 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2068  union xhci_trb *event_trb, struct xhci_transfer_event *event,
2069  struct xhci_virt_ep *ep, int *status)
2070 {
2071  struct xhci_ring *ep_ring;
2072  struct urb_priv *urb_priv;
2073  int idx;
2074  int len = 0;
2075  union xhci_trb *cur_trb;
2076  struct xhci_segment *cur_seg;
2077  struct usb_iso_packet_descriptor *frame;
2078  u32 trb_comp_code;
2079  bool skip_td = false;
2080 
2081  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2082  trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2083  urb_priv = td->urb->hcpriv;
2084  idx = urb_priv->td_cnt;
2085  frame = &td->urb->iso_frame_desc[idx];
2086 
2087  /* handle completion code */
2088  switch (trb_comp_code) {
2089  case COMP_SUCCESS:
2090  if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
2091  frame->status = 0;
2092  break;
2093  }
2094  if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2095  trb_comp_code = COMP_SHORT_TX;
2096  case COMP_SHORT_TX:
2097  frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2098  -EREMOTEIO : 0;
2099  break;
2100  case COMP_BW_OVER:
2101  frame->status = -ECOMM;
2102  skip_td = true;
2103  break;
2104  case COMP_BUFF_OVER:
2105  case COMP_BABBLE:
2106  frame->status = -EOVERFLOW;
2107  skip_td = true;
2108  break;
2109  case COMP_DEV_ERR:
2110  case COMP_STALL:
2111  case COMP_TX_ERR:
2112  frame->status = -EPROTO;
2113  skip_td = true;
2114  break;
2115  case COMP_STOP:
2116  case COMP_STOP_INVAL:
2117  break;
2118  default:
2119  frame->status = -1;
2120  break;
2121  }
2122 
2123  if (trb_comp_code == COMP_SUCCESS || skip_td) {
2124  frame->actual_length = frame->length;
2125  td->urb->actual_length += frame->length;
2126  } else {
2127  for (cur_trb = ep_ring->dequeue,
2128  cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
2129  next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2130  if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2131  !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2132  len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2133  }
2134  len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2135  TRB_LEN(le32_to_cpu(event->transfer_len));
2136 
2137  if (trb_comp_code != COMP_STOP_INVAL) {
2138  frame->actual_length = len;
2139  td->urb->actual_length += len;
2140  }
2141  }
2142 
2143  return finish_td(xhci, td, event_trb, event, ep, status, false);
2144 }
2145 
2146 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2147  struct xhci_transfer_event *event,
2148  struct xhci_virt_ep *ep, int *status)
2149 {
2150  struct xhci_ring *ep_ring;
2151  struct urb_priv *urb_priv;
2152  struct usb_iso_packet_descriptor *frame;
2153  int idx;
2154 
2155  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2156  urb_priv = td->urb->hcpriv;
2157  idx = urb_priv->td_cnt;
2158  frame = &td->urb->iso_frame_desc[idx];
2159 
2160  /* The transfer is partly done. */
2161  frame->status = -EXDEV;
2162 
2163  /* calc actual length */
2164  frame->actual_length = 0;
2165 
2166  /* Update ring dequeue pointer */
2167  while (ep_ring->dequeue != td->last_trb)
2168  inc_deq(xhci, ep_ring);
2169  inc_deq(xhci, ep_ring);
2170 
2171  return finish_td(xhci, td, NULL, event, ep, status, true);
2172 }
2173 
2174 /*
2175  * Process bulk and interrupt tds, update urb status and actual_length.
2176  */
2177 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
2178  union xhci_trb *event_trb, struct xhci_transfer_event *event,
2179  struct xhci_virt_ep *ep, int *status)
2180 {
2181  struct xhci_ring *ep_ring;
2182  union xhci_trb *cur_trb;
2183  struct xhci_segment *cur_seg;
2184  u32 trb_comp_code;
2185 
2186  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2187  trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2188 
2189  switch (trb_comp_code) {
2190  case COMP_SUCCESS:
2191  /* Double check that the HW transferred everything. */
2192  if (event_trb != td->last_trb ||
2193  TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2194  xhci_warn(xhci, "WARN Successful completion "
2195  "on short TX\n");
2196  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2197  *status = -EREMOTEIO;
2198  else
2199  *status = 0;
2200  if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
2201  trb_comp_code = COMP_SHORT_TX;
2202  } else {
2203  *status = 0;
2204  }
2205  break;
2206  case COMP_SHORT_TX:
2207  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2208  *status = -EREMOTEIO;
2209  else
2210  *status = 0;
2211  break;
2212  default:
2213  /* Others already handled above */
2214  break;
2215  }
2216  if (trb_comp_code == COMP_SHORT_TX)
2217  xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
2218  "%d bytes untransferred\n",
2219  td->urb->ep->desc.bEndpointAddress,
2220  td->urb->transfer_buffer_length,
2221  TRB_LEN(le32_to_cpu(event->transfer_len)));
2222  /* Fast path - was this the last TRB in the TD for this URB? */
2223  if (event_trb == td->last_trb) {
2224  if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
2225  td->urb->actual_length =
2226  td->urb->transfer_buffer_length -
2227  TRB_LEN(le32_to_cpu(event->transfer_len));
2228  if (td->urb->transfer_buffer_length <
2229  td->urb->actual_length) {
2230  xhci_warn(xhci, "HC gave bad length "
2231  "of %d bytes left\n",
2232  TRB_LEN(le32_to_cpu(event->transfer_len)));
2233  td->urb->actual_length = 0;
2234  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2235  *status = -EREMOTEIO;
2236  else
2237  *status = 0;
2238  }
2239  /* Don't overwrite a previously set error code */
2240  if (*status == -EINPROGRESS) {
2241  if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
2242  *status = -EREMOTEIO;
2243  else
2244  *status = 0;
2245  }
2246  } else {
2247  td->urb->actual_length =
2248  td->urb->transfer_buffer_length;
2249  /* Ignore a short packet completion if the
2250  * untransferred length was zero.
2251  */
2252  if (*status == -EREMOTEIO)
2253  *status = 0;
2254  }
2255  } else {
2256  /* Slow path - walk the list, starting from the dequeue
2257  * pointer, to get the actual length transferred.
2258  */
2259  td->urb->actual_length = 0;
2260  for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
2261  cur_trb != event_trb;
2262  next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
2263  if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
2264  !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
2265  td->urb->actual_length +=
2266  TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
2267  }
2268  /* If the ring didn't stop on a Link or No-op TRB, add
2269  * in the actual bytes transferred from the Normal TRB
2270  */
2271  if (trb_comp_code != COMP_STOP_INVAL)
2272  td->urb->actual_length +=
2273  TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
2274  TRB_LEN(le32_to_cpu(event->transfer_len));
2275  }
2276 
2277  return finish_td(xhci, td, event_trb, event, ep, status, false);
2278 }
2279 
2280 /*
2281  * If this function returns an error condition, it means it got a Transfer
2282  * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2283  * At this point, the host controller is probably hosed and should be reset.
2284  */
2285 static int handle_tx_event(struct xhci_hcd *xhci,
2286  struct xhci_transfer_event *event)
2287  __releases(&xhci->lock)
2288  __acquires(&xhci->lock)
2289 {
2290  struct xhci_virt_device *xdev;
2291  struct xhci_virt_ep *ep;
2292  struct xhci_ring *ep_ring;
2293  unsigned int slot_id;
2294  int ep_index;
2295  struct xhci_td *td = NULL;
2296  dma_addr_t event_dma;
2297  struct xhci_segment *event_seg;
2298  union xhci_trb *event_trb;
2299  struct urb *urb = NULL;
2300  int status = -EINPROGRESS;
2301  struct urb_priv *urb_priv;
2302  struct xhci_ep_ctx *ep_ctx;
2303  struct list_head *tmp;
2304  u32 trb_comp_code;
2305  int ret = 0;
2306  int td_num = 0;
2307 
2308  slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2309  xdev = xhci->devs[slot_id];
2310  if (!xdev) {
2311  xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
2312  xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2313  (unsigned long long) xhci_trb_virt_to_dma(
2314  xhci->event_ring->deq_seg,
2315  xhci->event_ring->dequeue),
2316  lower_32_bits(le64_to_cpu(event->buffer)),
2317  upper_32_bits(le64_to_cpu(event->buffer)),
2318  le32_to_cpu(event->transfer_len),
2319  le32_to_cpu(event->flags));
2320  xhci_dbg(xhci, "Event ring:\n");
2321  xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2322  return -ENODEV;
2323  }
2324 
2325  /* Endpoint ID is 1 based, our index is zero based */
2326  ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2327  ep = &xdev->eps[ep_index];
2328  ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
2329  ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2330  if (!ep_ring ||
2331  (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
2333  xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
2334  "or incorrect stream ring\n");
2335  xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2336  (unsigned long long) xhci_trb_virt_to_dma(
2337  xhci->event_ring->deq_seg,
2338  xhci->event_ring->dequeue),
2339  lower_32_bits(le64_to_cpu(event->buffer)),
2340  upper_32_bits(le64_to_cpu(event->buffer)),
2341  le32_to_cpu(event->transfer_len),
2342  le32_to_cpu(event->flags));
2343  xhci_dbg(xhci, "Event ring:\n");
2344  xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
2345  return -ENODEV;
2346  }
2347 
2348  /* Count current td numbers if ep->skip is set */
2349  if (ep->skip) {
2350  list_for_each(tmp, &ep_ring->td_list)
2351  td_num++;
2352  }
2353 
2354  event_dma = le64_to_cpu(event->buffer);
2355  trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2356  /* Look for common error cases */
2357  switch (trb_comp_code) {
2358  /* Skip codes that require special handling depending on
2359  * transfer type
2360  */
2361  case COMP_SUCCESS:
2362  if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2363  break;
2364  if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2365  trb_comp_code = COMP_SHORT_TX;
2366  else
2367  xhci_warn_ratelimited(xhci,
2368  "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
2369  case COMP_SHORT_TX:
2370  break;
2371  case COMP_STOP:
2372  xhci_dbg(xhci, "Stopped on Transfer TRB\n");
2373  break;
2374  case COMP_STOP_INVAL:
2375  xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
2376  break;
2377  case COMP_STALL:
2378  xhci_dbg(xhci, "Stalled endpoint\n");
2379  ep->ep_state |= EP_HALTED;
2380  status = -EPIPE;
2381  break;
2382  case COMP_TRB_ERR:
2383  xhci_warn(xhci, "WARN: TRB error on endpoint\n");
2384  status = -EILSEQ;
2385  break;
2386  case COMP_SPLIT_ERR:
2387  case COMP_TX_ERR:
2388  xhci_dbg(xhci, "Transfer error on endpoint\n");
2389  status = -EPROTO;
2390  break;
2391  case COMP_BABBLE:
2392  xhci_dbg(xhci, "Babble error on endpoint\n");
2393  status = -EOVERFLOW;
2394  break;
2395  case COMP_DB_ERR:
2396  xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
2397  status = -ENOSR;
2398  break;
2399  case COMP_BW_OVER:
2400  xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
2401  break;
2402  case COMP_BUFF_OVER:
2403  xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
2404  break;
2405  case COMP_UNDERRUN:
2406  /*
2407  * When the Isoch ring is empty, the xHC will generate
2408  * a Ring Overrun Event for IN Isoch endpoint or Ring
2409  * Underrun Event for OUT Isoch endpoint.
2410  */
2411  xhci_dbg(xhci, "underrun event on endpoint\n");
2412  if (!list_empty(&ep_ring->td_list))
2413  xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2414  "still with TDs queued?\n",
2415  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2416  ep_index);
2417  goto cleanup;
2418  case COMP_OVERRUN:
2419  xhci_dbg(xhci, "overrun event on endpoint\n");
2420  if (!list_empty(&ep_ring->td_list))
2421  xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2422  "still with TDs queued?\n",
2423  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2424  ep_index);
2425  goto cleanup;
2426  case COMP_DEV_ERR:
2427  xhci_warn(xhci, "WARN: detect an incompatible device");
2428  status = -EPROTO;
2429  break;
2430  case COMP_MISSED_INT:
2431  /*
2432  * When encounter missed service error, one or more isoc tds
2433  * may be missed by xHC.
2434  * Set skip flag of the ep_ring; Complete the missed tds as
2435  * short transfer when process the ep_ring next time.
2436  */
2437  ep->skip = true;
2438  xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
2439  goto cleanup;
2440  default:
2441  if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2442  status = 0;
2443  break;
2444  }
2445  xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
2446  "busted\n");
2447  goto cleanup;
2448  }
2449 
2450  do {
2451  /* This TRB should be in the TD at the head of this ring's
2452  * TD list.
2453  */
2454  if (list_empty(&ep_ring->td_list)) {
2455  xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2456  "with no TDs queued?\n",
2457  TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2458  ep_index);
2459  xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2460  (le32_to_cpu(event->flags) &
2461  TRB_TYPE_BITMASK)>>10);
2462  xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2463  if (ep->skip) {
2464  ep->skip = false;
2465  xhci_dbg(xhci, "td_list is empty while skip "
2466  "flag set. Clear skip flag.\n");
2467  }
2468  ret = 0;
2469  goto cleanup;
2470  }
2471 
2472  /* We've skipped all the TDs on the ep ring when ep->skip set */
2473  if (ep->skip && td_num == 0) {
2474  ep->skip = false;
2475  xhci_dbg(xhci, "All tds on the ep_ring skipped. "
2476  "Clear skip flag.\n");
2477  ret = 0;
2478  goto cleanup;
2479  }
2480 
2481  td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2482  if (ep->skip)
2483  td_num--;
2484 
2485  /* Is this a TRB in the currently executing TD? */
2486  event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2487  td->last_trb, event_dma);
2488 
2489  /*
2490  * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2491  * is not in the current TD pointed by ep_ring->dequeue because
2492  * that the hardware dequeue pointer still at the previous TRB
2493  * of the current TD. The previous TRB maybe a Link TD or the
2494  * last TRB of the previous TD. The command completion handle
2495  * will take care the rest.
2496  */
2497  if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
2498  ret = 0;
2499  goto cleanup;
2500  }
2501 
2502  if (!event_seg) {
2503  if (!ep->skip ||
2504  !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2505  /* Some host controllers give a spurious
2506  * successful event after a short transfer.
2507  * Ignore it.
2508  */
2509  if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2510  ep_ring->last_td_was_short) {
2511  ep_ring->last_td_was_short = false;
2512  ret = 0;
2513  goto cleanup;
2514  }
2515  /* HC is busted, give up! */
2516  xhci_err(xhci,
2517  "ERROR Transfer event TRB DMA ptr not "
2518  "part of current TD\n");
2519  return -ESHUTDOWN;
2520  }
2521 
2522  ret = skip_isoc_td(xhci, td, event, ep, &status);
2523  goto cleanup;
2524  }
2525  if (trb_comp_code == COMP_SHORT_TX)
2526  ep_ring->last_td_was_short = true;
2527  else
2528  ep_ring->last_td_was_short = false;
2529 
2530  if (ep->skip) {
2531  xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2532  ep->skip = false;
2533  }
2534 
2535  event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2536  sizeof(*event_trb)];
2537  /*
2538  * No-op TRB should not trigger interrupts.
2539  * If event_trb is a no-op TRB, it means the
2540  * corresponding TD has been cancelled. Just ignore
2541  * the TD.
2542  */
2543  if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
2544  xhci_dbg(xhci,
2545  "event_trb is a no-op TRB. Skip it\n");
2546  goto cleanup;
2547  }
2548 
2549  /* Now update the urb's actual_length and give back to
2550  * the core
2551  */
2552  if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2553  ret = process_ctrl_td(xhci, td, event_trb, event, ep,
2554  &status);
2555  else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2556  ret = process_isoc_td(xhci, td, event_trb, event, ep,
2557  &status);
2558  else
2559  ret = process_bulk_intr_td(xhci, td, event_trb, event,
2560  ep, &status);
2561 
2562 cleanup:
2563  /*
2564  * Do not update event ring dequeue pointer if ep->skip is set.
2565  * Will roll back to continue process missed tds.
2566  */
2567  if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
2568  inc_deq(xhci, xhci->event_ring);
2569  }
2570 
2571  if (ret) {
2572  urb = td->urb;
2573  urb_priv = urb->hcpriv;
2574  /* Leave the TD around for the reset endpoint function
2575  * to use(but only if it's not a control endpoint,
2576  * since we already queued the Set TR dequeue pointer
2577  * command for stalled control endpoints).
2578  */
2579  if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2580  (trb_comp_code != COMP_STALL &&
2581  trb_comp_code != COMP_BABBLE))
2582  xhci_urb_free_priv(xhci, urb_priv);
2583 
2584  usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2585  if ((urb->actual_length != urb->transfer_buffer_length &&
2586  (urb->transfer_flags &
2587  URB_SHORT_NOT_OK)) ||
2588  (status != 0 &&
2589  !usb_endpoint_xfer_isoc(&urb->ep->desc)))
2590  xhci_dbg(xhci, "Giveback URB %p, len = %d, "
2591  "expected = %d, status = %d\n",
2592  urb, urb->actual_length,
2593  urb->transfer_buffer_length,
2594  status);
2595  spin_unlock(&xhci->lock);
2596  /* EHCI, UHCI, and OHCI always unconditionally set the
2597  * urb->status of an isochronous endpoint to 0.
2598  */
2599  if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
2600  status = 0;
2601  usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
2602  spin_lock(&xhci->lock);
2603  }
2604 
2605  /*
2606  * If ep->skip is set, it means there are missed tds on the
2607  * endpoint ring need to take care of.
2608  * Process them as short transfer until reach the td pointed by
2609  * the event.
2610  */
2611  } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
2612 
2613  return 0;
2614 }
2615 
2616 /*
2617  * This function handles all OS-owned events on the event ring. It may drop
2618  * xhci->lock between event processing (e.g. to pass up port status changes).
2619  * Returns >0 for "possibly more events to process" (caller should call again),
2620  * otherwise 0 if done. In future, <0 returns should indicate error code.
2621  */
2622 static int xhci_handle_event(struct xhci_hcd *xhci)
2623 {
2624  union xhci_trb *event;
2625  int update_ptrs = 1;
2626  int ret;
2627 
2628  if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2629  xhci->error_bitmask |= 1 << 1;
2630  return 0;
2631  }
2632 
2633  event = xhci->event_ring->dequeue;
2634  /* Does the HC or OS own the TRB? */
2635  if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2636  xhci->event_ring->cycle_state) {
2637  xhci->error_bitmask |= 1 << 2;
2638  return 0;
2639  }
2640 
2641  /*
2642  * Barrier between reading the TRB_CYCLE (valid) flag above and any
2643  * speculative reads of the event's flags/data below.
2644  */
2645  rmb();
2646  /* FIXME: Handle more event types. */
2647  switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
2648  case TRB_TYPE(TRB_COMPLETION):
2649  handle_cmd_completion(xhci, &event->event_cmd);
2650  break;
2651  case TRB_TYPE(TRB_PORT_STATUS):
2652  handle_port_status(xhci, event);
2653  update_ptrs = 0;
2654  break;
2655  case TRB_TYPE(TRB_TRANSFER):
2656  ret = handle_tx_event(xhci, &event->trans_event);
2657  if (ret < 0)
2658  xhci->error_bitmask |= 1 << 9;
2659  else
2660  update_ptrs = 0;
2661  break;
2662  case TRB_TYPE(TRB_DEV_NOTE):
2663  handle_device_notification(xhci, event);
2664  break;
2665  default:
2666  if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
2667  TRB_TYPE(48))
2668  handle_vendor_event(xhci, event);
2669  else
2670  xhci->error_bitmask |= 1 << 3;
2671  }
2672  /* Any of the above functions may drop and re-acquire the lock, so check
2673  * to make sure a watchdog timer didn't mark the host as non-responsive.
2674  */
2675  if (xhci->xhc_state & XHCI_STATE_DYING) {
2676  xhci_dbg(xhci, "xHCI host dying, returning from "
2677  "event handler.\n");
2678  return 0;
2679  }
2680 
2681  if (update_ptrs)
2682  /* Update SW event ring dequeue pointer */
2683  inc_deq(xhci, xhci->event_ring);
2684 
2685  /* Are there more items on the event ring? Caller will call us again to
2686  * check.
2687  */
2688  return 1;
2689 }
2690 
2691 /*
2692  * xHCI spec says we can get an interrupt, and if the HC has an error condition,
2693  * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
2694  * indicators of an event TRB error, but we check the status *first* to be safe.
2695  */
2696 irqreturn_t xhci_irq(struct usb_hcd *hcd)
2697 {
2698  struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2699  u32 status;
2700  union xhci_trb *trb;
2701  u64 temp_64;
2702  union xhci_trb *event_ring_deq;
2703  dma_addr_t deq;
2704 
2705  spin_lock(&xhci->lock);
2706  trb = xhci->event_ring->dequeue;
2707  /* Check if the xHC generated the interrupt, or the irq is shared */
2708  status = xhci_readl(xhci, &xhci->op_regs->status);
2709  if (status == 0xffffffff)
2710  goto hw_died;
2711 
2712  if (!(status & STS_EINT)) {
2713  spin_unlock(&xhci->lock);
2714  return IRQ_NONE;
2715  }
2716  if (status & STS_FATAL) {
2717  xhci_warn(xhci, "WARNING: Host System Error\n");
2718  xhci_halt(xhci);
2719 hw_died:
2720  spin_unlock(&xhci->lock);
2721  return -ESHUTDOWN;
2722  }
2723 
2724  /*
2725  * Clear the op reg interrupt status first,
2726  * so we can receive interrupts from other MSI-X interrupters.
2727  * Write 1 to clear the interrupt status.
2728  */
2729  status |= STS_EINT;
2730  xhci_writel(xhci, status, &xhci->op_regs->status);
2731  /* FIXME when MSI-X is supported and there are multiple vectors */
2732  /* Clear the MSI-X event interrupt status */
2733 
2734  if (hcd->irq) {
2735  u32 irq_pending;
2736  /* Acknowledge the PCI interrupt */
2737  irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2738  irq_pending |= IMAN_IP;
2739  xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2740  }
2741 
2742  if (xhci->xhc_state & XHCI_STATE_DYING) {
2743  xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
2744  "Shouldn't IRQs be disabled?\n");
2745  /* Clear the event handler busy flag (RW1C);
2746  * the event ring should be empty.
2747  */
2748  temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2749  xhci_write_64(xhci, temp_64 | ERST_EHB,
2750  &xhci->ir_set->erst_dequeue);
2751  spin_unlock(&xhci->lock);
2752 
2753  return IRQ_HANDLED;
2754  }
2755 
2756  event_ring_deq = xhci->event_ring->dequeue;
2757  /* FIXME this should be a delayed service routine
2758  * that clears the EHB.
2759  */
2760  while (xhci_handle_event(xhci) > 0) {}
2761 
2762  temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2763  /* If necessary, update the HW's version of the event ring deq ptr. */
2764  if (event_ring_deq != xhci->event_ring->dequeue) {
2765  deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2766  xhci->event_ring->dequeue);
2767  if (deq == 0)
2768  xhci_warn(xhci, "WARN something wrong with SW event "
2769  "ring dequeue ptr.\n");
2770  /* Update HC event ring dequeue pointer */
2771  temp_64 &= ERST_PTR_MASK;
2772  temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
2773  }
2774 
2775  /* Clear the event handler busy flag (RW1C); event ring is empty. */
2776  temp_64 |= ERST_EHB;
2777  xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
2778 
2779  spin_unlock(&xhci->lock);
2780 
2781  return IRQ_HANDLED;
2782 }
2783 
2784 irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
2785 {
2786  return xhci_irq(hcd);
2787 }
2788 
2789 /**** Endpoint Ring Operations ****/
2790 
2791 /*
2792  * Generic function for queueing a TRB on a ring.
2793  * The caller must have checked to make sure there's room on the ring.
2794  *
2795  * @more_trbs_coming: Will you enqueue more TRBs before calling
2796  * prepare_transfer()?
2797  */
2798 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
2799  bool more_trbs_coming,
2800  u32 field1, u32 field2, u32 field3, u32 field4)
2801 {
2802  struct xhci_generic_trb *trb;
2803 
2804  trb = &ring->enqueue->generic;
2805  trb->field[0] = cpu_to_le32(field1);
2806  trb->field[1] = cpu_to_le32(field2);
2807  trb->field[2] = cpu_to_le32(field3);
2808  trb->field[3] = cpu_to_le32(field4);
2809  inc_enq(xhci, ring, more_trbs_coming);
2810 }
2811 
2812 /*
2813  * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
2814  * FIXME allocate segments if the ring is full.
2815  */
2816 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
2817  u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
2818 {
2819  unsigned int num_trbs_needed;
2820 
2821  /* Make sure the endpoint has been added to xHC schedule */
2822  switch (ep_state) {
2823  case EP_STATE_DISABLED:
2824  /*
2825  * USB core changed config/interfaces without notifying us,
2826  * or hardware is reporting the wrong state.
2827  */
2828  xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
2829  return -ENOENT;
2830  case EP_STATE_ERROR:
2831  xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
2832  /* FIXME event handling code for error needs to clear it */
2833  /* XXX not sure if this should be -ENOENT or not */
2834  return -EINVAL;
2835  case EP_STATE_HALTED:
2836  xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
2837  case EP_STATE_STOPPED:
2838  case EP_STATE_RUNNING:
2839  break;
2840  default:
2841  xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
2842  /*
2843  * FIXME issue Configure Endpoint command to try to get the HC
2844  * back into a known state.
2845  */
2846  return -EINVAL;
2847  }
2848 
2849  while (1) {
2850  if (room_on_ring(xhci, ep_ring, num_trbs))
2851  break;
2852 
2853  if (ep_ring == xhci->cmd_ring) {
2854  xhci_err(xhci, "Do not support expand command ring\n");
2855  return -ENOMEM;
2856  }
2857 
2858  xhci_dbg(xhci, "ERROR no room on ep ring, "
2859  "try ring expansion\n");
2860  num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
2861  if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
2862  mem_flags)) {
2863  xhci_err(xhci, "Ring expansion failed\n");
2864  return -ENOMEM;
2865  }
2866  }
2867 
2868  if (enqueue_is_link_trb(ep_ring)) {
2869  struct xhci_ring *ring = ep_ring;
2870  union xhci_trb *next;
2871 
2872  next = ring->enqueue;
2873 
2874  while (last_trb(xhci, ring, ring->enq_seg, next)) {
2875  /* If we're not dealing with 0.95 hardware or isoc rings
2876  * on AMD 0.96 host, clear the chain bit.
2877  */
2878  if (!xhci_link_trb_quirk(xhci) &&
2879  !(ring->type == TYPE_ISOC &&
2880  (xhci->quirks & XHCI_AMD_0x96_HOST)))
2881  next->link.control &= cpu_to_le32(~TRB_CHAIN);
2882  else
2883  next->link.control |= cpu_to_le32(TRB_CHAIN);
2884 
2885  wmb();
2886  next->link.control ^= cpu_to_le32(TRB_CYCLE);
2887 
2888  /* Toggle the cycle bit after the last ring segment. */
2889  if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
2890  ring->cycle_state = (ring->cycle_state ? 0 : 1);
2891  }
2892  ring->enq_seg = ring->enq_seg->next;
2893  ring->enqueue = ring->enq_seg->trbs;
2894  next = ring->enqueue;
2895  }
2896  }
2897 
2898  return 0;
2899 }
2900 
2901 static int prepare_transfer(struct xhci_hcd *xhci,
2902  struct xhci_virt_device *xdev,
2903  unsigned int ep_index,
2904  unsigned int stream_id,
2905  unsigned int num_trbs,
2906  struct urb *urb,
2907  unsigned int td_index,
2908  gfp_t mem_flags)
2909 {
2910  int ret;
2911  struct urb_priv *urb_priv;
2912  struct xhci_td *td;
2913  struct xhci_ring *ep_ring;
2914  struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
2915 
2916  ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
2917  if (!ep_ring) {
2918  xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
2919  stream_id);
2920  return -EINVAL;
2921  }
2922 
2923  ret = prepare_ring(xhci, ep_ring,
2924  le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
2925  num_trbs, mem_flags);
2926  if (ret)
2927  return ret;
2928 
2929  urb_priv = urb->hcpriv;
2930  td = urb_priv->td[td_index];
2931 
2932  INIT_LIST_HEAD(&td->td_list);
2933  INIT_LIST_HEAD(&td->cancelled_td_list);
2934 
2935  if (td_index == 0) {
2936  ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
2937  if (unlikely(ret))
2938  return ret;
2939  }
2940 
2941  td->urb = urb;
2942  /* Add this TD to the tail of the endpoint ring's TD list */
2943  list_add_tail(&td->td_list, &ep_ring->td_list);
2944  td->start_seg = ep_ring->enq_seg;
2945  td->first_trb = ep_ring->enqueue;
2946 
2947  urb_priv->td[td_index] = td;
2948 
2949  return 0;
2950 }
2951 
2952 static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2953 {
2954  int num_sgs, num_trbs, running_total, temp, i;
2955  struct scatterlist *sg;
2956 
2957  sg = NULL;
2958  num_sgs = urb->num_mapped_sgs;
2959  temp = urb->transfer_buffer_length;
2960 
2961  num_trbs = 0;
2962  for_each_sg(urb->sg, sg, num_sgs, i) {
2963  unsigned int len = sg_dma_len(sg);
2964 
2965  /* Scatter gather list entries may cross 64KB boundaries */
2966  running_total = TRB_MAX_BUFF_SIZE -
2967  (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2968  running_total &= TRB_MAX_BUFF_SIZE - 1;
2969  if (running_total != 0)
2970  num_trbs++;
2971 
2972  /* How many more 64KB chunks to transfer, how many more TRBs? */
2973  while (running_total < sg_dma_len(sg) && running_total < temp) {
2974  num_trbs++;
2975  running_total += TRB_MAX_BUFF_SIZE;
2976  }
2977  len = min_t(int, len, temp);
2978  temp -= len;
2979  if (temp == 0)
2980  break;
2981  }
2982  return num_trbs;
2983 }
2984 
2985 static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2986 {
2987  if (num_trbs != 0)
2988  dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2989  "TRBs, %d left\n", __func__,
2990  urb->ep->desc.bEndpointAddress, num_trbs);
2991  if (running_total != urb->transfer_buffer_length)
2992  dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2993  "queued %#x (%d), asked for %#x (%d)\n",
2994  __func__,
2995  urb->ep->desc.bEndpointAddress,
2996  running_total, running_total,
2997  urb->transfer_buffer_length,
2998  urb->transfer_buffer_length);
2999 }
3000 
3001 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3002  unsigned int ep_index, unsigned int stream_id, int start_cycle,
3003  struct xhci_generic_trb *start_trb)
3004 {
3005  /*
3006  * Pass all the TRBs to the hardware at once and make sure this write
3007  * isn't reordered.
3008  */
3009  wmb();
3010  if (start_cycle)
3011  start_trb->field[3] |= cpu_to_le32(start_cycle);
3012  else
3013  start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3014  xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3015 }
3016 
3017 /*
3018  * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3019  * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3020  * (comprised of sg list entries) can take several service intervals to
3021  * transmit.
3022  */
3023 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3024  struct urb *urb, int slot_id, unsigned int ep_index)
3025 {
3026  struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
3027  xhci->devs[slot_id]->out_ctx, ep_index);
3028  int xhci_interval;
3029  int ep_interval;
3030 
3031  xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3032  ep_interval = urb->interval;
3033  /* Convert to microframes */
3034  if (urb->dev->speed == USB_SPEED_LOW ||
3035  urb->dev->speed == USB_SPEED_FULL)
3036  ep_interval *= 8;
3037  /* FIXME change this to a warning and a suggestion to use the new API
3038  * to set the polling interval (once the API is added).
3039  */
3040  if (xhci_interval != ep_interval) {
3041  if (printk_ratelimit())
3042  dev_dbg(&urb->dev->dev, "Driver uses different interval"
3043  " (%d microframe%s) than xHCI "
3044  "(%d microframe%s)\n",
3045  ep_interval,
3046  ep_interval == 1 ? "" : "s",
3047  xhci_interval,
3048  xhci_interval == 1 ? "" : "s");
3049  urb->interval = xhci_interval;
3050  /* Convert back to frames for LS/FS devices */
3051  if (urb->dev->speed == USB_SPEED_LOW ||
3052  urb->dev->speed == USB_SPEED_FULL)
3053  urb->interval /= 8;
3054  }
3055  return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3056 }
3057 
3058 /*
3059  * The TD size is the number of bytes remaining in the TD (including this TRB),
3060  * right shifted by 10.
3061  * It must fit in bits 21:17, so it can't be bigger than 31.
3062  */
3063 static u32 xhci_td_remainder(unsigned int remainder)
3064 {
3065  u32 max = (1 << (21 - 17 + 1)) - 1;
3066 
3067  if ((remainder >> 10) >= max)
3068  return max << 17;
3069  else
3070  return (remainder >> 10) << 17;
3071 }
3072 
3073 /*
3074  * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
3075  * the TD (*not* including this TRB).
3076  *
3077  * Total TD packet count = total_packet_count =
3078  * roundup(TD size in bytes / wMaxPacketSize)
3079  *
3080  * Packets transferred up to and including this TRB = packets_transferred =
3081  * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3082  *
3083  * TD size = total_packet_count - packets_transferred
3084  *
3085  * It must fit in bits 21:17, so it can't be bigger than 31.
3086  */
3087 
3088 static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
3089  unsigned int total_packet_count, struct urb *urb)
3090 {
3091  int packets_transferred;
3092 
3093  /* One TRB with a zero-length data packet. */
3094  if (running_total == 0 && trb_buff_len == 0)
3095  return 0;
3096 
3097  /* All the TRB queueing functions don't count the current TRB in
3098  * running_total.
3099  */
3100  packets_transferred = (running_total + trb_buff_len) /
3101  usb_endpoint_maxp(&urb->ep->desc);
3102 
3103  return xhci_td_remainder(total_packet_count - packets_transferred);
3104 }
3105 
3106 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3107  struct urb *urb, int slot_id, unsigned int ep_index)
3108 {
3109  struct xhci_ring *ep_ring;
3110  unsigned int num_trbs;
3111  struct urb_priv *urb_priv;
3112  struct xhci_td *td;
3113  struct scatterlist *sg;
3114  int num_sgs;
3115  int trb_buff_len, this_sg_len, running_total;
3116  unsigned int total_packet_count;
3117  bool first_trb;
3118  u64 addr;
3119  bool more_trbs_coming;
3120 
3121  struct xhci_generic_trb *start_trb;
3122  int start_cycle;
3123 
3124  ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3125  if (!ep_ring)
3126  return -EINVAL;
3127 
3128  num_trbs = count_sg_trbs_needed(xhci, urb);
3129  num_sgs = urb->num_mapped_sgs;
3130  total_packet_count = roundup(urb->transfer_buffer_length,
3131  usb_endpoint_maxp(&urb->ep->desc));
3132 
3133  trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
3134  ep_index, urb->stream_id,
3135  num_trbs, urb, 0, mem_flags);
3136  if (trb_buff_len < 0)
3137  return trb_buff_len;
3138 
3139  urb_priv = urb->hcpriv;
3140  td = urb_priv->td[0];
3141 
3142  /*
3143  * Don't give the first TRB to the hardware (by toggling the cycle bit)
3144  * until we've finished creating all the other TRBs. The ring's cycle
3145  * state may change as we enqueue the other TRBs, so save it too.
3146  */
3147  start_trb = &ep_ring->enqueue->generic;
3148  start_cycle = ep_ring->cycle_state;
3149 
3150  running_total = 0;
3151  /*
3152  * How much data is in the first TRB?
3153  *
3154  * There are three forces at work for TRB buffer pointers and lengths:
3155  * 1. We don't want to walk off the end of this sg-list entry buffer.
3156  * 2. The transfer length that the driver requested may be smaller than
3157  * the amount of memory allocated for this scatter-gather list.
3158  * 3. TRBs buffers can't cross 64KB boundaries.
3159  */
3160  sg = urb->sg;
3161  addr = (u64) sg_dma_address(sg);
3162  this_sg_len = sg_dma_len(sg);
3163  trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
3164  trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3165  if (trb_buff_len > urb->transfer_buffer_length)
3166  trb_buff_len = urb->transfer_buffer_length;
3167 
3168  first_trb = true;
3169  /* Queue the first TRB, even if it's zero-length */
3170  do {
3171  u32 field = 0;
3172  u32 length_field = 0;
3173  u32 remainder = 0;
3174 
3175  /* Don't change the cycle bit of the first TRB until later */
3176  if (first_trb) {
3177  first_trb = false;
3178  if (start_cycle == 0)
3179  field |= 0x1;
3180  } else
3181  field |= ep_ring->cycle_state;
3182 
3183  /* Chain all the TRBs together; clear the chain bit in the last
3184  * TRB to indicate it's the last TRB in the chain.
3185  */
3186  if (num_trbs > 1) {
3187  field |= TRB_CHAIN;
3188  } else {
3189  /* FIXME - add check for ZERO_PACKET flag before this */
3190  td->last_trb = ep_ring->enqueue;
3191  field |= TRB_IOC;
3192  }
3193 
3194  /* Only set interrupt on short packet for IN endpoints */
3195  if (usb_urb_dir_in(urb))
3196  field |= TRB_ISP;
3197 
3198  if (TRB_MAX_BUFF_SIZE -
3199  (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
3200  xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
3201  xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
3202  (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
3203  (unsigned int) addr + trb_buff_len);
3204  }
3205 
3206  /* Set the TRB length, TD size, and interrupter fields. */
3207  if (xhci->hci_version < 0x100) {
3208  remainder = xhci_td_remainder(
3209  urb->transfer_buffer_length -
3210  running_total);
3211  } else {
3212  remainder = xhci_v1_0_td_remainder(running_total,
3213  trb_buff_len, total_packet_count, urb);
3214  }
3215  length_field = TRB_LEN(trb_buff_len) |
3216  remainder |
3217  TRB_INTR_TARGET(0);
3218 
3219  if (num_trbs > 1)
3220  more_trbs_coming = true;
3221  else
3222  more_trbs_coming = false;
3223  queue_trb(xhci, ep_ring, more_trbs_coming,
3224  lower_32_bits(addr),
3225  upper_32_bits(addr),
3226  length_field,
3227  field | TRB_TYPE(TRB_NORMAL));
3228  --num_trbs;
3229  running_total += trb_buff_len;
3230 
3231  /* Calculate length for next transfer --
3232  * Are we done queueing all the TRBs for this sg entry?
3233  */
3234  this_sg_len -= trb_buff_len;
3235  if (this_sg_len == 0) {
3236  --num_sgs;
3237  if (num_sgs == 0)
3238  break;
3239  sg = sg_next(sg);
3240  addr = (u64) sg_dma_address(sg);
3241  this_sg_len = sg_dma_len(sg);
3242  } else {
3243  addr += trb_buff_len;
3244  }
3245 
3246  trb_buff_len = TRB_MAX_BUFF_SIZE -
3247  (addr & (TRB_MAX_BUFF_SIZE - 1));
3248  trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
3249  if (running_total + trb_buff_len > urb->transfer_buffer_length)
3250  trb_buff_len =
3251  urb->transfer_buffer_length - running_total;
3252  } while (running_total < urb->transfer_buffer_length);
3253 
3254  check_trb_math(urb, num_trbs, running_total);
3255  giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3256  start_cycle, start_trb);
3257  return 0;
3258 }
3259 
3260 /* This is very similar to what ehci-q.c qtd_fill() does */
3261 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3262  struct urb *urb, int slot_id, unsigned int ep_index)
3263 {
3264  struct xhci_ring *ep_ring;
3265  struct urb_priv *urb_priv;
3266  struct xhci_td *td;
3267  int num_trbs;
3268  struct xhci_generic_trb *start_trb;
3269  bool first_trb;
3270  bool more_trbs_coming;
3271  int start_cycle;
3272  u32 field, length_field;
3273 
3274  int running_total, trb_buff_len, ret;
3275  unsigned int total_packet_count;
3276  u64 addr;
3277 
3278  if (urb->num_sgs)
3279  return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
3280 
3281  ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3282  if (!ep_ring)
3283  return -EINVAL;
3284 
3285  num_trbs = 0;
3286  /* How much data is (potentially) left before the 64KB boundary? */
3287  running_total = TRB_MAX_BUFF_SIZE -
3288  (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3289  running_total &= TRB_MAX_BUFF_SIZE - 1;
3290 
3291  /* If there's some data on this 64KB chunk, or we have to send a
3292  * zero-length transfer, we need at least one TRB
3293  */
3294  if (running_total != 0 || urb->transfer_buffer_length == 0)
3295  num_trbs++;
3296  /* How many more 64KB chunks to transfer, how many more TRBs? */
3297  while (running_total < urb->transfer_buffer_length) {
3298  num_trbs++;
3299  running_total += TRB_MAX_BUFF_SIZE;
3300  }
3301  /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
3302 
3303  ret = prepare_transfer(xhci, xhci->devs[slot_id],
3304  ep_index, urb->stream_id,
3305  num_trbs, urb, 0, mem_flags);
3306  if (ret < 0)
3307  return ret;
3308 
3309  urb_priv = urb->hcpriv;
3310  td = urb_priv->td[0];
3311 
3312  /*
3313  * Don't give the first TRB to the hardware (by toggling the cycle bit)
3314  * until we've finished creating all the other TRBs. The ring's cycle
3315  * state may change as we enqueue the other TRBs, so save it too.
3316  */
3317  start_trb = &ep_ring->enqueue->generic;
3318  start_cycle = ep_ring->cycle_state;
3319 
3320  running_total = 0;
3321  total_packet_count = roundup(urb->transfer_buffer_length,
3322  usb_endpoint_maxp(&urb->ep->desc));
3323  /* How much data is in the first TRB? */
3324  addr = (u64) urb->transfer_dma;
3325  trb_buff_len = TRB_MAX_BUFF_SIZE -
3326  (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
3327  if (trb_buff_len > urb->transfer_buffer_length)
3328  trb_buff_len = urb->transfer_buffer_length;
3329 
3330  first_trb = true;
3331 
3332  /* Queue the first TRB, even if it's zero-length */
3333  do {
3334  u32 remainder = 0;
3335  field = 0;
3336 
3337  /* Don't change the cycle bit of the first TRB until later */
3338  if (first_trb) {
3339  first_trb = false;
3340  if (start_cycle == 0)
3341  field |= 0x1;
3342  } else
3343  field |= ep_ring->cycle_state;
3344 
3345  /* Chain all the TRBs together; clear the chain bit in the last
3346  * TRB to indicate it's the last TRB in the chain.
3347  */
3348  if (num_trbs > 1) {
3349  field |= TRB_CHAIN;
3350  } else {
3351  /* FIXME - add check for ZERO_PACKET flag before this */
3352  td->last_trb = ep_ring->enqueue;
3353  field |= TRB_IOC;
3354  }
3355 
3356  /* Only set interrupt on short packet for IN endpoints */
3357  if (usb_urb_dir_in(urb))
3358  field |= TRB_ISP;
3359 
3360  /* Set the TRB length, TD size, and interrupter fields. */
3361  if (xhci->hci_version < 0x100) {
3362  remainder = xhci_td_remainder(
3363  urb->transfer_buffer_length -
3364  running_total);
3365  } else {
3366  remainder = xhci_v1_0_td_remainder(running_total,
3367  trb_buff_len, total_packet_count, urb);
3368  }
3369  length_field = TRB_LEN(trb_buff_len) |
3370  remainder |
3371  TRB_INTR_TARGET(0);
3372 
3373  if (num_trbs > 1)
3374  more_trbs_coming = true;
3375  else
3376  more_trbs_coming = false;
3377  queue_trb(xhci, ep_ring, more_trbs_coming,
3378  lower_32_bits(addr),
3379  upper_32_bits(addr),
3380  length_field,
3381  field | TRB_TYPE(TRB_NORMAL));
3382  --num_trbs;
3383  running_total += trb_buff_len;
3384 
3385  /* Calculate length for next transfer */
3386  addr += trb_buff_len;
3387  trb_buff_len = urb->transfer_buffer_length - running_total;
3388  if (trb_buff_len > TRB_MAX_BUFF_SIZE)
3389  trb_buff_len = TRB_MAX_BUFF_SIZE;
3390  } while (running_total < urb->transfer_buffer_length);
3391 
3392  check_trb_math(urb, num_trbs, running_total);
3393  giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3394  start_cycle, start_trb);
3395  return 0;
3396 }
3397 
3398 /* Caller must have locked xhci->lock */
3399 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3400  struct urb *urb, int slot_id, unsigned int ep_index)
3401 {
3402  struct xhci_ring *ep_ring;
3403  int num_trbs;
3404  int ret;
3405  struct usb_ctrlrequest *setup;
3406  struct xhci_generic_trb *start_trb;
3407  int start_cycle;
3408  u32 field, length_field;
3409  struct urb_priv *urb_priv;
3410  struct xhci_td *td;
3411 
3412  ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3413  if (!ep_ring)
3414  return -EINVAL;
3415 
3416  /*
3417  * Need to copy setup packet into setup TRB, so we can't use the setup
3418  * DMA address.
3419  */
3420  if (!urb->setup_packet)
3421  return -EINVAL;
3422 
3423  /* 1 TRB for setup, 1 for status */
3424  num_trbs = 2;
3425  /*
3426  * Don't need to check if we need additional event data and normal TRBs,
3427  * since data in control transfers will never get bigger than 16MB
3428  * XXX: can we get a buffer that crosses 64KB boundaries?
3429  */
3430  if (urb->transfer_buffer_length > 0)
3431  num_trbs++;
3432  ret = prepare_transfer(xhci, xhci->devs[slot_id],
3433  ep_index, urb->stream_id,
3434  num_trbs, urb, 0, mem_flags);
3435  if (ret < 0)
3436  return ret;
3437 
3438  urb_priv = urb->hcpriv;
3439  td = urb_priv->td[0];
3440 
3441  /*
3442  * Don't give the first TRB to the hardware (by toggling the cycle bit)
3443  * until we've finished creating all the other TRBs. The ring's cycle
3444  * state may change as we enqueue the other TRBs, so save it too.
3445  */
3446  start_trb = &ep_ring->enqueue->generic;
3447  start_cycle = ep_ring->cycle_state;
3448 
3449  /* Queue setup TRB - see section 6.4.1.2.1 */
3450  /* FIXME better way to translate setup_packet into two u32 fields? */
3451  setup = (struct usb_ctrlrequest *) urb->setup_packet;
3452  field = 0;
3453  field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3454  if (start_cycle == 0)
3455  field |= 0x1;
3456 
3457  /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
3458  if (xhci->hci_version == 0x100) {
3459  if (urb->transfer_buffer_length > 0) {
3460  if (setup->bRequestType & USB_DIR_IN)
3461  field |= TRB_TX_TYPE(TRB_DATA_IN);
3462  else
3463  field |= TRB_TX_TYPE(TRB_DATA_OUT);
3464  }
3465  }
3466 
3467  queue_trb(xhci, ep_ring, true,
3468  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3469  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3470  TRB_LEN(8) | TRB_INTR_TARGET(0),
3471  /* Immediate data in pointer */
3472  field);
3473 
3474  /* If there's data, queue data TRBs */
3475  /* Only set interrupt on short packet for IN endpoints */
3476  if (usb_urb_dir_in(urb))
3477  field = TRB_ISP | TRB_TYPE(TRB_DATA);
3478  else
3479  field = TRB_TYPE(TRB_DATA);
3480 
3481  length_field = TRB_LEN(urb->transfer_buffer_length) |
3482  xhci_td_remainder(urb->transfer_buffer_length) |
3483  TRB_INTR_TARGET(0);
3484  if (urb->transfer_buffer_length > 0) {
3485  if (setup->bRequestType & USB_DIR_IN)
3486  field |= TRB_DIR_IN;
3487  queue_trb(xhci, ep_ring, true,
3488  lower_32_bits(urb->transfer_dma),
3489  upper_32_bits(urb->transfer_dma),
3490  length_field,
3491  field | ep_ring->cycle_state);
3492  }
3493 
3494  /* Save the DMA address of the last TRB in the TD */
3495  td->last_trb = ep_ring->enqueue;
3496 
3497  /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3498  /* If the device sent data, the status stage is an OUT transfer */
3499  if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3500  field = 0;
3501  else
3502  field = TRB_DIR_IN;
3503  queue_trb(xhci, ep_ring, false,
3504  0,
3505  0,
3506  TRB_INTR_TARGET(0),
3507  /* Event on completion */
3508  field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3509 
3510  giveback_first_trb(xhci, slot_id, ep_index, 0,
3511  start_cycle, start_trb);
3512  return 0;
3513 }
3514 
3515 static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
3516  struct urb *urb, int i)
3517 {
3518  int num_trbs = 0;
3519  u64 addr, td_len;
3520 
3521  addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3522  td_len = urb->iso_frame_desc[i].length;
3523 
3524  num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3526  if (num_trbs == 0)
3527  num_trbs++;
3528 
3529  return num_trbs;
3530 }
3531 
3532 /*
3533  * The transfer burst count field of the isochronous TRB defines the number of
3534  * bursts that are required to move all packets in this TD. Only SuperSpeed
3535  * devices can burst up to bMaxBurst number of packets per service interval.
3536  * This field is zero based, meaning a value of zero in the field means one
3537  * burst. Basically, for everything but SuperSpeed devices, this field will be
3538  * zero. Only xHCI 1.0 host controllers support this field.
3539  */
3540 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3541  struct usb_device *udev,
3542  struct urb *urb, unsigned int total_packet_count)
3543 {
3544  unsigned int max_burst;
3545 
3546  if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
3547  return 0;
3548 
3549  max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3550  return roundup(total_packet_count, max_burst + 1) - 1;
3551 }
3552 
3553 /*
3554  * Returns the number of packets in the last "burst" of packets. This field is
3555  * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3556  * the last burst packet count is equal to the total number of packets in the
3557  * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3558  * must contain (bMaxBurst + 1) number of packets, but the last burst can
3559  * contain 1 to (bMaxBurst + 1) packets.
3560  */
3561 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3562  struct usb_device *udev,
3563  struct urb *urb, unsigned int total_packet_count)
3564 {
3565  unsigned int max_burst;
3566  unsigned int residue;
3567 
3568  if (xhci->hci_version < 0x100)
3569  return 0;
3570 
3571  switch (udev->speed) {
3572  case USB_SPEED_SUPER:
3573  /* bMaxBurst is zero based: 0 means 1 packet per burst */
3574  max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3575  residue = total_packet_count % (max_burst + 1);
3576  /* If residue is zero, the last burst contains (max_burst + 1)
3577  * number of packets, but the TLBPC field is zero-based.
3578  */
3579  if (residue == 0)
3580  return max_burst;
3581  return residue - 1;
3582  default:
3583  if (total_packet_count == 0)
3584  return 0;
3585  return total_packet_count - 1;
3586  }
3587 }
3588 
3589 /* This is for isoc transfer */
3590 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3591  struct urb *urb, int slot_id, unsigned int ep_index)
3592 {
3593  struct xhci_ring *ep_ring;
3594  struct urb_priv *urb_priv;
3595  struct xhci_td *td;
3596  int num_tds, trbs_per_td;
3597  struct xhci_generic_trb *start_trb;
3598  bool first_trb;
3599  int start_cycle;
3600  u32 field, length_field;
3601  int running_total, trb_buff_len, td_len, td_remain_len, ret;
3602  u64 start_addr, addr;
3603  int i, j;
3604  bool more_trbs_coming;
3605 
3606  ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3607 
3608  num_tds = urb->number_of_packets;
3609  if (num_tds < 1) {
3610  xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3611  return -EINVAL;
3612  }
3613 
3614  start_addr = (u64) urb->transfer_dma;
3615  start_trb = &ep_ring->enqueue->generic;
3616  start_cycle = ep_ring->cycle_state;
3617 
3618  urb_priv = urb->hcpriv;
3619  /* Queue the first TRB, even if it's zero-length */
3620  for (i = 0; i < num_tds; i++) {
3621  unsigned int total_packet_count;
3622  unsigned int burst_count;
3623  unsigned int residue;
3624 
3625  first_trb = true;
3626  running_total = 0;
3627  addr = start_addr + urb->iso_frame_desc[i].offset;
3628  td_len = urb->iso_frame_desc[i].length;
3629  td_remain_len = td_len;
3630  total_packet_count = roundup(td_len,
3631  usb_endpoint_maxp(&urb->ep->desc));
3632  /* A zero-length transfer still involves at least one packet. */
3633  if (total_packet_count == 0)
3634  total_packet_count++;
3635  burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
3636  total_packet_count);
3637  residue = xhci_get_last_burst_packet_count(xhci,
3638  urb->dev, urb, total_packet_count);
3639 
3640  trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
3641 
3642  ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
3643  urb->stream_id, trbs_per_td, urb, i, mem_flags);
3644  if (ret < 0) {
3645  if (i == 0)
3646  return ret;
3647  goto cleanup;
3648  }
3649 
3650  td = urb_priv->td[i];
3651  for (j = 0; j < trbs_per_td; j++) {
3652  u32 remainder = 0;
3653  field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
3654 
3655  if (first_trb) {
3656  /* Queue the isoc TRB */
3657  field |= TRB_TYPE(TRB_ISOC);
3658  /* Assume URB_ISO_ASAP is set */
3659  field |= TRB_SIA;
3660  if (i == 0) {
3661  if (start_cycle == 0)
3662  field |= 0x1;
3663  } else
3664  field |= ep_ring->cycle_state;
3665  first_trb = false;
3666  } else {
3667  /* Queue other normal TRBs */
3668  field |= TRB_TYPE(TRB_NORMAL);
3669  field |= ep_ring->cycle_state;
3670  }
3671 
3672  /* Only set interrupt on short packet for IN EPs */
3673  if (usb_urb_dir_in(urb))
3674  field |= TRB_ISP;
3675 
3676  /* Chain all the TRBs together; clear the chain bit in
3677  * the last TRB to indicate it's the last TRB in the
3678  * chain.
3679  */
3680  if (j < trbs_per_td - 1) {
3681  field |= TRB_CHAIN;
3682  more_trbs_coming = true;
3683  } else {
3684  td->last_trb = ep_ring->enqueue;
3685  field |= TRB_IOC;
3686  if (xhci->hci_version == 0x100 &&
3687  !(xhci->quirks &
3688  XHCI_AVOID_BEI)) {
3689  /* Set BEI bit except for the last td */
3690  if (i < num_tds - 1)
3691  field |= TRB_BEI;
3692  }
3693  more_trbs_coming = false;
3694  }
3695 
3696  /* Calculate TRB length */
3697  trb_buff_len = TRB_MAX_BUFF_SIZE -
3698  (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
3699  if (trb_buff_len > td_remain_len)
3700  trb_buff_len = td_remain_len;
3701 
3702  /* Set the TRB length, TD size, & interrupter fields. */
3703  if (xhci->hci_version < 0x100) {
3704  remainder = xhci_td_remainder(
3705  td_len - running_total);
3706  } else {
3707  remainder = xhci_v1_0_td_remainder(
3708  running_total, trb_buff_len,
3709  total_packet_count, urb);
3710  }
3711  length_field = TRB_LEN(trb_buff_len) |
3712  remainder |
3713  TRB_INTR_TARGET(0);
3714 
3715  queue_trb(xhci, ep_ring, more_trbs_coming,
3716  lower_32_bits(addr),
3717  upper_32_bits(addr),
3718  length_field,
3719  field);
3720  running_total += trb_buff_len;
3721 
3722  addr += trb_buff_len;
3723  td_remain_len -= trb_buff_len;
3724  }
3725 
3726  /* Check TD length */
3727  if (running_total != td_len) {
3728  xhci_err(xhci, "ISOC TD length unmatch\n");
3729  ret = -EINVAL;
3730  goto cleanup;
3731  }
3732  }
3733 
3734  if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3735  if (xhci->quirks & XHCI_AMD_PLL_FIX)
3737  }
3738  xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3739 
3740  giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3741  start_cycle, start_trb);
3742  return 0;
3743 cleanup:
3744  /* Clean up a partially enqueued isoc transfer. */
3745 
3746  for (i--; i >= 0; i--)
3747  list_del_init(&urb_priv->td[i]->td_list);
3748 
3749  /* Use the first TD as a temporary variable to turn the TDs we've queued
3750  * into No-ops with a software-owned cycle bit. That way the hardware
3751  * won't accidentally start executing bogus TDs when we partially
3752  * overwrite them. td->first_trb and td->start_seg are already set.
3753  */
3754  urb_priv->td[0]->last_trb = ep_ring->enqueue;
3755  /* Every TRB except the first & last will have its cycle bit flipped. */
3756  td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
3757 
3758  /* Reset the ring enqueue back to the first TRB and its cycle bit. */
3759  ep_ring->enqueue = urb_priv->td[0]->first_trb;
3760  ep_ring->enq_seg = urb_priv->td[0]->start_seg;
3761  ep_ring->cycle_state = start_cycle;
3762  ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
3763  usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
3764  return ret;
3765 }
3766 
3767 /*
3768  * Check transfer ring to guarantee there is enough room for the urb.
3769  * Update ISO URB start_frame and interval.
3770  * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
3771  * update the urb->start_frame by now.
3772  * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
3773  */
3774 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3775  struct urb *urb, int slot_id, unsigned int ep_index)
3776 {
3777  struct xhci_virt_device *xdev;
3778  struct xhci_ring *ep_ring;
3779  struct xhci_ep_ctx *ep_ctx;
3780  int start_frame;
3781  int xhci_interval;
3782  int ep_interval;
3783  int num_tds, num_trbs, i;
3784  int ret;
3785 
3786  xdev = xhci->devs[slot_id];
3787  ep_ring = xdev->eps[ep_index].ring;
3788  ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3789 
3790  num_trbs = 0;
3791  num_tds = urb->number_of_packets;
3792  for (i = 0; i < num_tds; i++)
3793  num_trbs += count_isoc_trbs_needed(xhci, urb, i);
3794 
3795  /* Check the ring to guarantee there is enough room for the whole urb.
3796  * Do not insert any td of the urb to the ring if the check failed.
3797  */
3798  ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
3799  num_trbs, mem_flags);
3800  if (ret)
3801  return ret;
3802 
3803  start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
3804  start_frame &= 0x3fff;
3805 
3806  urb->start_frame = start_frame;
3807  if (urb->dev->speed == USB_SPEED_LOW ||
3808  urb->dev->speed == USB_SPEED_FULL)
3809  urb->start_frame >>= 3;
3810 
3811  xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3812  ep_interval = urb->interval;
3813  /* Convert to microframes */
3814  if (urb->dev->speed == USB_SPEED_LOW ||
3815  urb->dev->speed == USB_SPEED_FULL)
3816  ep_interval *= 8;
3817  /* FIXME change this to a warning and a suggestion to use the new API
3818  * to set the polling interval (once the API is added).
3819  */
3820  if (xhci_interval != ep_interval) {
3821  if (printk_ratelimit())
3822  dev_dbg(&urb->dev->dev, "Driver uses different interval"
3823  " (%d microframe%s) than xHCI "
3824  "(%d microframe%s)\n",
3825  ep_interval,
3826  ep_interval == 1 ? "" : "s",
3827  xhci_interval,
3828  xhci_interval == 1 ? "" : "s");
3829  urb->interval = xhci_interval;
3830  /* Convert back to frames for LS/FS devices */
3831  if (urb->dev->speed == USB_SPEED_LOW ||
3832  urb->dev->speed == USB_SPEED_FULL)
3833  urb->interval /= 8;
3834  }
3835  ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3836 
3837  return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3838 }
3839 
3840 /**** Command Ring Operations ****/
3841 
3842 /* Generic function for queueing a command TRB on the command ring.
3843  * Check to make sure there's room on the command ring for one command TRB.
3844  * Also check that there's room reserved for commands that must not fail.
3845  * If this is a command that must not fail, meaning command_must_succeed = TRUE,
3846  * then only check for the number of reserved spots.
3847  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
3848  * because the command event handler may want to resubmit a failed command.
3849  */
3850 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
3851  u32 field3, u32 field4, bool command_must_succeed)
3852 {
3853  int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3854  int ret;
3855 
3856  if (!command_must_succeed)
3857  reserved_trbs++;
3858 
3859  ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
3860  reserved_trbs, GFP_ATOMIC);
3861  if (ret < 0) {
3862  xhci_err(xhci, "ERR: No room for command on command ring\n");
3863  if (command_must_succeed)
3864  xhci_err(xhci, "ERR: Reserved TRB counting for "
3865  "unfailable commands failed.\n");
3866  return ret;
3867  }
3868  queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
3869  field4 | xhci->cmd_ring->cycle_state);
3870  return 0;
3871 }
3872 
3873 /* Queue a slot enable or disable request on the command ring */
3874 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
3875 {
3876  return queue_command(xhci, 0, 0, 0,
3877  TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
3878 }
3879 
3880 /* Queue an address device command TRB */
3881 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3882  u32 slot_id)
3883 {
3884  return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3885  upper_32_bits(in_ctx_ptr), 0,
3887  false);
3888 }
3889 
3891  u32 field1, u32 field2, u32 field3, u32 field4)
3892 {
3893  return queue_command(xhci, field1, field2, field3, field4, false);
3894 }
3895 
3896 /* Queue a reset device command TRB */
3897 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
3898 {
3899  return queue_command(xhci, 0, 0, 0,
3901  false);
3902 }
3903 
3904 /* Queue a configure endpoint command TRB */
3906  u32 slot_id, bool command_must_succeed)
3907 {
3908  return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3909  upper_32_bits(in_ctx_ptr), 0,
3911  command_must_succeed);
3912 }
3913 
3914 /* Queue an evaluate context command TRB */
3915 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
3916  u32 slot_id, bool command_must_succeed)
3917 {
3918  return queue_command(xhci, lower_32_bits(in_ctx_ptr),
3919  upper_32_bits(in_ctx_ptr), 0,
3921  command_must_succeed);
3922 }
3923 
3924 /*
3925  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
3926  * activity on an endpoint that is about to be suspended.
3927  */
3928 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
3929  unsigned int ep_index, int suspend)
3930 {
3931  u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3932  u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3934  u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
3935 
3936  return queue_command(xhci, 0, 0, 0,
3937  trb_slot_id | trb_ep_index | type | trb_suspend, false);
3938 }
3939 
3940 /* Set Transfer Ring Dequeue Pointer command.
3941  * This should not be used for endpoints that have streams enabled.
3942  */
3943 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
3944  unsigned int ep_index, unsigned int stream_id,
3945  struct xhci_segment *deq_seg,
3946  union xhci_trb *deq_ptr, u32 cycle_state)
3947 {
3948  dma_addr_t addr;
3949  u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3950  u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3951  u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
3953  struct xhci_virt_ep *ep;
3954 
3955  addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
3956  if (addr == 0) {
3957  xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3958  xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
3959  deq_seg, deq_ptr);
3960  return 0;
3961  }
3962  ep = &xhci->devs[slot_id]->eps[ep_index];
3963  if ((ep->ep_state & SET_DEQ_PENDING)) {
3964  xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
3965  xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
3966  return 0;
3967  }
3968  ep->queued_deq_seg = deq_seg;
3969  ep->queued_deq_ptr = deq_ptr;
3970  return queue_command(xhci, lower_32_bits(addr) | cycle_state,
3971  upper_32_bits(addr), trb_stream_id,
3972  trb_slot_id | trb_ep_index | type, false);
3973 }
3974 
3975 int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
3976  unsigned int ep_index)
3977 {
3978  u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
3979  u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
3980  u32 type = TRB_TYPE(TRB_RESET_EP);
3981 
3982  return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
3983  false);
3984 }