Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ohci-q.c
Go to the documentation of this file.
1 /*
2  * OHCI HCD (Host Controller Driver) for USB.
3  *
4  * (C) Copyright 1999 Roman Weissgaerber <[email protected]>
5  * (C) Copyright 2000-2002 David Brownell <[email protected]>
6  *
7  * This file is licenced under the GPL.
8  */
9 
10 #include <linux/irq.h>
11 #include <linux/slab.h>
12 
13 static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
14 {
15  int last = urb_priv->length - 1;
16 
17  if (last >= 0) {
18  int i;
19  struct td *td;
20 
21  for (i = 0; i <= last; i++) {
22  td = urb_priv->td [i];
23  if (td)
24  td_free (hc, td);
25  }
26  }
27 
28  list_del (&urb_priv->pending);
29  kfree (urb_priv);
30 }
31 
32 /*-------------------------------------------------------------------------*/
33 
34 /*
35  * URB goes back to driver, and isn't reissued.
36  * It's completely gone from HC data structures.
37  * PRECONDITION: ohci lock held, irqs blocked.
38  */
39 static void
40 finish_urb(struct ohci_hcd *ohci, struct urb *urb, int status)
41 __releases(ohci->lock)
42 __acquires(ohci->lock)
43 {
44  // ASSERT (urb->hcpriv != 0);
45 
46  urb_free_priv (ohci, urb->hcpriv);
47  if (likely(status == -EINPROGRESS))
48  status = 0;
49 
50  switch (usb_pipetype (urb->pipe)) {
51  case PIPE_ISOCHRONOUS:
52  ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
53  if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
54  if (quirk_amdiso(ohci))
56  if (quirk_amdprefetch(ohci))
57  sb800_prefetch(ohci, 0);
58  }
59  break;
60  case PIPE_INTERRUPT:
61  ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
62  break;
63  }
64 
65 #ifdef OHCI_VERBOSE_DEBUG
66  urb_print(urb, "RET", usb_pipeout (urb->pipe), status);
67 #endif
68 
69  /* urb->complete() can reenter this HCD */
70  usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
71  spin_unlock (&ohci->lock);
72  usb_hcd_giveback_urb(ohci_to_hcd(ohci), urb, status);
73  spin_lock (&ohci->lock);
74 
75  /* stop periodic dma if it's not needed */
76  if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
77  && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0) {
78  ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
79  ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
80  }
81 }
82 
83 
84 /*-------------------------------------------------------------------------*
85  * ED handling functions
86  *-------------------------------------------------------------------------*/
87 
88 /* search for the right schedule branch to use for a periodic ed.
89  * does some load balancing; returns the branch, or negative errno.
90  */
91 static int balance (struct ohci_hcd *ohci, int interval, int load)
92 {
93  int i, branch = -ENOSPC;
94 
95  /* iso periods can be huge; iso tds specify frame numbers */
96  if (interval > NUM_INTS)
97  interval = NUM_INTS;
98 
99  /* search for the least loaded schedule branch of that period
100  * that has enough bandwidth left unreserved.
101  */
102  for (i = 0; i < interval ; i++) {
103  if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
104  int j;
105 
106  /* usb 1.1 says 90% of one frame */
107  for (j = i; j < NUM_INTS; j += interval) {
108  if ((ohci->load [j] + load) > 900)
109  break;
110  }
111  if (j < NUM_INTS)
112  continue;
113  branch = i;
114  }
115  }
116  return branch;
117 }
118 
119 /*-------------------------------------------------------------------------*/
120 
121 /* both iso and interrupt requests have periods; this routine puts them
122  * into the schedule tree in the apppropriate place. most iso devices use
123  * 1msec periods, but that's not required.
124  */
125 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
126 {
127  unsigned i;
128 
129  ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
130  (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
131  ed, ed->branch, ed->load, ed->interval);
132 
133  for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
134  struct ed **prev = &ohci->periodic [i];
135  __hc32 *prev_p = &ohci->hcca->int_table [i];
136  struct ed *here = *prev;
137 
138  /* sorting each branch by period (slow before fast)
139  * lets us share the faster parts of the tree.
140  * (plus maybe: put interrupt eds before iso)
141  */
142  while (here && ed != here) {
143  if (ed->interval > here->interval)
144  break;
145  prev = &here->ed_next;
146  prev_p = &here->hwNextED;
147  here = *prev;
148  }
149  if (ed != here) {
150  ed->ed_next = here;
151  if (here)
152  ed->hwNextED = *prev_p;
153  wmb ();
154  *prev = ed;
155  *prev_p = cpu_to_hc32(ohci, ed->dma);
156  wmb();
157  }
158  ohci->load [i] += ed->load;
159  }
160  ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval;
161 }
162 
163 /* link an ed into one of the HC chains */
164 
165 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
166 {
167  int branch;
168 
169  ed->state = ED_OPER;
170  ed->ed_prev = NULL;
171  ed->ed_next = NULL;
172  ed->hwNextED = 0;
173  if (quirk_zfmicro(ohci)
174  && (ed->type == PIPE_INTERRUPT)
175  && !(ohci->eds_scheduled++))
176  mod_timer(&ohci->unlink_watchdog, round_jiffies(jiffies + HZ));
177  wmb ();
178 
179  /* we care about rm_list when setting CLE/BLE in case the HC was at
180  * work on some TD when CLE/BLE was turned off, and isn't quiesced
181  * yet. finish_unlinks() restarts as needed, some upcoming INTR_SF.
182  *
183  * control and bulk EDs are doubly linked (ed_next, ed_prev), but
184  * periodic ones are singly linked (ed_next). that's because the
185  * periodic schedule encodes a tree like figure 3-5 in the ohci
186  * spec: each qh can have several "previous" nodes, and the tree
187  * doesn't have unused/idle descriptors.
188  */
189  switch (ed->type) {
190  case PIPE_CONTROL:
191  if (ohci->ed_controltail == NULL) {
192  WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
193  ohci_writel (ohci, ed->dma,
194  &ohci->regs->ed_controlhead);
195  } else {
196  ohci->ed_controltail->ed_next = ed;
197  ohci->ed_controltail->hwNextED = cpu_to_hc32 (ohci,
198  ed->dma);
199  }
200  ed->ed_prev = ohci->ed_controltail;
201  if (!ohci->ed_controltail && !ohci->ed_rm_list) {
202  wmb();
203  ohci->hc_control |= OHCI_CTRL_CLE;
204  ohci_writel (ohci, 0, &ohci->regs->ed_controlcurrent);
205  ohci_writel (ohci, ohci->hc_control,
206  &ohci->regs->control);
207  }
208  ohci->ed_controltail = ed;
209  break;
210 
211  case PIPE_BULK:
212  if (ohci->ed_bulktail == NULL) {
213  WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
214  ohci_writel (ohci, ed->dma, &ohci->regs->ed_bulkhead);
215  } else {
216  ohci->ed_bulktail->ed_next = ed;
217  ohci->ed_bulktail->hwNextED = cpu_to_hc32 (ohci,
218  ed->dma);
219  }
220  ed->ed_prev = ohci->ed_bulktail;
221  if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
222  wmb();
223  ohci->hc_control |= OHCI_CTRL_BLE;
224  ohci_writel (ohci, 0, &ohci->regs->ed_bulkcurrent);
225  ohci_writel (ohci, ohci->hc_control,
226  &ohci->regs->control);
227  }
228  ohci->ed_bulktail = ed;
229  break;
230 
231  // case PIPE_INTERRUPT:
232  // case PIPE_ISOCHRONOUS:
233  default:
234  branch = balance (ohci, ed->interval, ed->load);
235  if (branch < 0) {
236  ohci_dbg (ohci,
237  "ERR %d, interval %d msecs, load %d\n",
238  branch, ed->interval, ed->load);
239  // FIXME if there are TDs queued, fail them!
240  return branch;
241  }
242  ed->branch = branch;
243  periodic_link (ohci, ed);
244  }
245 
246  /* the HC may not see the schedule updates yet, but if it does
247  * then they'll be properly ordered.
248  */
249  return 0;
250 }
251 
252 /*-------------------------------------------------------------------------*/
253 
254 /* scan the periodic table to find and unlink this ED */
255 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
256 {
257  int i;
258 
259  for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
260  struct ed *temp;
261  struct ed **prev = &ohci->periodic [i];
262  __hc32 *prev_p = &ohci->hcca->int_table [i];
263 
264  while (*prev && (temp = *prev) != ed) {
265  prev_p = &temp->hwNextED;
266  prev = &temp->ed_next;
267  }
268  if (*prev) {
269  *prev_p = ed->hwNextED;
270  *prev = ed->ed_next;
271  }
272  ohci->load [i] -= ed->load;
273  }
274  ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval;
275 
276  ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
277  (ed->hwINFO & cpu_to_hc32 (ohci, ED_ISO)) ? "iso " : "",
278  ed, ed->branch, ed->load, ed->interval);
279 }
280 
281 /* unlink an ed from one of the HC chains.
282  * just the link to the ed is unlinked.
283  * the link from the ed still points to another operational ed or 0
284  * so the HC can eventually finish the processing of the unlinked ed
285  * (assuming it already started that, which needn't be true).
286  *
287  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
288  * it won't. ED_SKIP means the HC will finish its current transaction,
289  * but won't start anything new. The TD queue may still grow; device
290  * drivers don't know about this HCD-internal state.
291  *
292  * When the HC can't see the ED, something changes ED_UNLINK to one of:
293  *
294  * - ED_OPER: when there's any request queued, the ED gets rescheduled
295  * immediately. HC should be working on them.
296  *
297  * - ED_IDLE: when there's no TD queue. there's no reason for the HC
298  * to care about this ED; safe to disable the endpoint.
299  *
300  * When finish_unlinks() runs later, after SOF interrupt, it will often
301  * complete one or more URB unlinks before making that state change.
302  */
303 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed)
304 {
305  ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
306  wmb ();
307  ed->state = ED_UNLINK;
308 
309  /* To deschedule something from the control or bulk list, just
310  * clear CLE/BLE and wait. There's no safe way to scrub out list
311  * head/current registers until later, and "later" isn't very
312  * tightly specified. Figure 6-5 and Section 6.4.2.2 show how
313  * the HC is reading the ED queues (while we modify them).
314  *
315  * For now, ed_schedule() is "later". It might be good paranoia
316  * to scrub those registers in finish_unlinks(), in case of bugs
317  * that make the HC try to use them.
318  */
319  switch (ed->type) {
320  case PIPE_CONTROL:
321  /* remove ED from the HC's list: */
322  if (ed->ed_prev == NULL) {
323  if (!ed->hwNextED) {
324  ohci->hc_control &= ~OHCI_CTRL_CLE;
325  ohci_writel (ohci, ohci->hc_control,
326  &ohci->regs->control);
327  // a ohci_readl() later syncs CLE with the HC
328  } else
329  ohci_writel (ohci,
330  hc32_to_cpup (ohci, &ed->hwNextED),
331  &ohci->regs->ed_controlhead);
332  } else {
333  ed->ed_prev->ed_next = ed->ed_next;
334  ed->ed_prev->hwNextED = ed->hwNextED;
335  }
336  /* remove ED from the HCD's list: */
337  if (ohci->ed_controltail == ed) {
338  ohci->ed_controltail = ed->ed_prev;
339  if (ohci->ed_controltail)
340  ohci->ed_controltail->ed_next = NULL;
341  } else if (ed->ed_next) {
342  ed->ed_next->ed_prev = ed->ed_prev;
343  }
344  break;
345 
346  case PIPE_BULK:
347  /* remove ED from the HC's list: */
348  if (ed->ed_prev == NULL) {
349  if (!ed->hwNextED) {
350  ohci->hc_control &= ~OHCI_CTRL_BLE;
351  ohci_writel (ohci, ohci->hc_control,
352  &ohci->regs->control);
353  // a ohci_readl() later syncs BLE with the HC
354  } else
355  ohci_writel (ohci,
356  hc32_to_cpup (ohci, &ed->hwNextED),
357  &ohci->regs->ed_bulkhead);
358  } else {
359  ed->ed_prev->ed_next = ed->ed_next;
360  ed->ed_prev->hwNextED = ed->hwNextED;
361  }
362  /* remove ED from the HCD's list: */
363  if (ohci->ed_bulktail == ed) {
364  ohci->ed_bulktail = ed->ed_prev;
365  if (ohci->ed_bulktail)
366  ohci->ed_bulktail->ed_next = NULL;
367  } else if (ed->ed_next) {
368  ed->ed_next->ed_prev = ed->ed_prev;
369  }
370  break;
371 
372  // case PIPE_INTERRUPT:
373  // case PIPE_ISOCHRONOUS:
374  default:
375  periodic_unlink (ohci, ed);
376  break;
377  }
378 }
379 
380 
381 /*-------------------------------------------------------------------------*/
382 
383 /* get and maybe (re)init an endpoint. init _should_ be done only as part
384  * of enumeration, usb_set_configuration() or usb_set_interface().
385  */
386 static struct ed *ed_get (
387  struct ohci_hcd *ohci,
388  struct usb_host_endpoint *ep,
389  struct usb_device *udev,
390  unsigned int pipe,
391  int interval
392 ) {
393  struct ed *ed;
394  unsigned long flags;
395 
396  spin_lock_irqsave (&ohci->lock, flags);
397 
398  if (!(ed = ep->hcpriv)) {
399  struct td *td;
400  int is_out;
401  u32 info;
402 
403  ed = ed_alloc (ohci, GFP_ATOMIC);
404  if (!ed) {
405  /* out of memory */
406  goto done;
407  }
408 
409  /* dummy td; end of td list for ed */
410  td = td_alloc (ohci, GFP_ATOMIC);
411  if (!td) {
412  /* out of memory */
413  ed_free (ohci, ed);
414  ed = NULL;
415  goto done;
416  }
417  ed->dummy = td;
418  ed->hwTailP = cpu_to_hc32 (ohci, td->td_dma);
419  ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
420  ed->state = ED_IDLE;
421 
422  is_out = !(ep->desc.bEndpointAddress & USB_DIR_IN);
423 
424  /* FIXME usbcore changes dev->devnum before SET_ADDRESS
425  * succeeds ... otherwise we wouldn't need "pipe".
426  */
427  info = usb_pipedevice (pipe);
428  ed->type = usb_pipetype(pipe);
429 
430  info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << 7;
431  info |= usb_endpoint_maxp(&ep->desc) << 16;
432  if (udev->speed == USB_SPEED_LOW)
433  info |= ED_LOWSPEED;
434  /* only control transfers store pids in tds */
435  if (ed->type != PIPE_CONTROL) {
436  info |= is_out ? ED_OUT : ED_IN;
437  if (ed->type != PIPE_BULK) {
438  /* periodic transfers... */
439  if (ed->type == PIPE_ISOCHRONOUS)
440  info |= ED_ISO;
441  else if (interval > 32) /* iso can be bigger */
442  interval = 32;
443  ed->interval = interval;
444  ed->load = usb_calc_bus_time (
445  udev->speed, !is_out,
446  ed->type == PIPE_ISOCHRONOUS,
447  usb_endpoint_maxp(&ep->desc))
448  / 1000;
449  }
450  }
451  ed->hwINFO = cpu_to_hc32(ohci, info);
452 
453  ep->hcpriv = ed;
454  }
455 
456 done:
457  spin_unlock_irqrestore (&ohci->lock, flags);
458  return ed;
459 }
460 
461 /*-------------------------------------------------------------------------*/
462 
463 /* request unlinking of an endpoint from an operational HC.
464  * put the ep on the rm_list
465  * real work is done at the next start frame (SF) hardware interrupt
466  * caller guarantees HCD is running, so hardware access is safe,
467  * and that ed->state is ED_OPER
468  */
469 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
470 {
471  ed->hwINFO |= cpu_to_hc32 (ohci, ED_DEQUEUE);
472  ed_deschedule (ohci, ed);
473 
474  /* rm_list is just singly linked, for simplicity */
475  ed->ed_next = ohci->ed_rm_list;
476  ed->ed_prev = NULL;
477  ohci->ed_rm_list = ed;
478 
479  /* enable SOF interrupt */
480  ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrstatus);
481  ohci_writel (ohci, OHCI_INTR_SF, &ohci->regs->intrenable);
482  // flush those writes, and get latest HCCA contents
483  (void) ohci_readl (ohci, &ohci->regs->control);
484 
485  /* SF interrupt might get delayed; record the frame counter value that
486  * indicates when the HC isn't looking at it, so concurrent unlinks
487  * behave. frame_no wraps every 2^16 msec, and changes right before
488  * SF is triggered.
489  */
490  ed->tick = ohci_frame_no(ohci) + 1;
491 
492 }
493 
494 /*-------------------------------------------------------------------------*
495  * TD handling functions
496  *-------------------------------------------------------------------------*/
497 
498 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
499 
500 static void
501 td_fill (struct ohci_hcd *ohci, u32 info,
502  dma_addr_t data, int len,
503  struct urb *urb, int index)
504 {
505  struct td *td, *td_pt;
506  struct urb_priv *urb_priv = urb->hcpriv;
507  int is_iso = info & TD_ISO;
508  int hash;
509 
510  // ASSERT (index < urb_priv->length);
511 
512  /* aim for only one interrupt per urb. mostly applies to control
513  * and iso; other urbs rarely need more than one TD per urb.
514  * this way, only final tds (or ones with an error) cause IRQs.
515  * at least immediately; use DI=6 in case any control request is
516  * tempted to die part way through. (and to force the hc to flush
517  * its donelist soonish, even on unlink paths.)
518  *
519  * NOTE: could delay interrupts even for the last TD, and get fewer
520  * interrupts ... increasing per-urb latency by sharing interrupts.
521  * Drivers that queue bulk urbs may request that behavior.
522  */
523  if (index != (urb_priv->length - 1)
524  || (urb->transfer_flags & URB_NO_INTERRUPT))
525  info |= TD_DI_SET (6);
526 
527  /* use this td as the next dummy */
528  td_pt = urb_priv->td [index];
529 
530  /* fill the old dummy TD */
531  td = urb_priv->td [index] = urb_priv->ed->dummy;
532  urb_priv->ed->dummy = td_pt;
533 
534  td->ed = urb_priv->ed;
535  td->next_dl_td = NULL;
536  td->index = index;
537  td->urb = urb;
538  td->data_dma = data;
539  if (!len)
540  data = 0;
541 
542  td->hwINFO = cpu_to_hc32 (ohci, info);
543  if (is_iso) {
544  td->hwCBP = cpu_to_hc32 (ohci, data & 0xFFFFF000);
545  *ohci_hwPSWp(ohci, td, 0) = cpu_to_hc16 (ohci,
546  (data & 0x0FFF) | 0xE000);
547  td->ed->last_iso = info & 0xffff;
548  } else {
549  td->hwCBP = cpu_to_hc32 (ohci, data);
550  }
551  if (data)
552  td->hwBE = cpu_to_hc32 (ohci, data + len - 1);
553  else
554  td->hwBE = 0;
555  td->hwNextTD = cpu_to_hc32 (ohci, td_pt->td_dma);
556 
557  /* append to queue */
558  list_add_tail (&td->td_list, &td->ed->td_list);
559 
560  /* hash it for later reverse mapping */
561  hash = TD_HASH_FUNC (td->td_dma);
562  td->td_hash = ohci->td_hash [hash];
563  ohci->td_hash [hash] = td;
564 
565  /* HC might read the TD (or cachelines) right away ... */
566  wmb ();
567  td->ed->hwTailP = td->hwNextTD;
568 }
569 
570 /*-------------------------------------------------------------------------*/
571 
572 /* Prepare all TDs of a transfer, and queue them onto the ED.
573  * Caller guarantees HC is active.
574  * Usually the ED is already on the schedule, so TDs might be
575  * processed as soon as they're queued.
576  */
577 static void td_submit_urb (
578  struct ohci_hcd *ohci,
579  struct urb *urb
580 ) {
581  struct urb_priv *urb_priv = urb->hcpriv;
583  int data_len = urb->transfer_buffer_length;
584  int cnt = 0;
585  u32 info = 0;
586  int is_out = usb_pipeout (urb->pipe);
587  int periodic = 0;
588 
589  /* OHCI handles the bulk/interrupt data toggles itself. We just
590  * use the device toggle bits for resetting, and rely on the fact
591  * that resetting toggle is meaningless if the endpoint is active.
592  */
593  if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
594  usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
595  is_out, 1);
596  urb_priv->ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_C);
597  }
598 
599  urb_priv->td_cnt = 0;
600  list_add (&urb_priv->pending, &ohci->pending);
601 
602  if (data_len)
603  data = urb->transfer_dma;
604  else
605  data = 0;
606 
607  /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
608  * using TD_CC_GET, as well as by seeing them on the done list.
609  * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
610  */
611  switch (urb_priv->ed->type) {
612 
613  /* Bulk and interrupt are identical except for where in the schedule
614  * their EDs live.
615  */
616  case PIPE_INTERRUPT:
617  /* ... and periodic urbs have extra accounting */
618  periodic = ohci_to_hcd(ohci)->self.bandwidth_int_reqs++ == 0
619  && ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0;
620  /* FALLTHROUGH */
621  case PIPE_BULK:
622  info = is_out
624  : TD_T_TOGGLE | TD_CC | TD_DP_IN;
625  /* TDs _could_ transfer up to 8K each */
626  while (data_len > 4096) {
627  td_fill (ohci, info, data, 4096, urb, cnt);
628  data += 4096;
629  data_len -= 4096;
630  cnt++;
631  }
632  /* maybe avoid ED halt on final TD short read */
633  if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
634  info |= TD_R;
635  td_fill (ohci, info, data, data_len, urb, cnt);
636  cnt++;
637  if ((urb->transfer_flags & URB_ZERO_PACKET)
638  && cnt < urb_priv->length) {
639  td_fill (ohci, info, 0, 0, urb, cnt);
640  cnt++;
641  }
642  /* maybe kickstart bulk list */
643  if (urb_priv->ed->type == PIPE_BULK) {
644  wmb ();
645  ohci_writel (ohci, OHCI_BLF, &ohci->regs->cmdstatus);
646  }
647  break;
648 
649  /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
650  * any DATA phase works normally, and the STATUS ack is special.
651  */
652  case PIPE_CONTROL:
653  info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
654  td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
655  if (data_len > 0) {
656  info = TD_CC | TD_R | TD_T_DATA1;
657  info |= is_out ? TD_DP_OUT : TD_DP_IN;
658  /* NOTE: mishandles transfers >8K, some >4K */
659  td_fill (ohci, info, data, data_len, urb, cnt++);
660  }
661  info = (is_out || data_len == 0)
663  : TD_CC | TD_DP_OUT | TD_T_DATA1;
664  td_fill (ohci, info, data, 0, urb, cnt++);
665  /* maybe kickstart control list */
666  wmb ();
667  ohci_writel (ohci, OHCI_CLF, &ohci->regs->cmdstatus);
668  break;
669 
670  /* ISO has no retransmit, so no toggle; and it uses special TDs.
671  * Each TD could handle multiple consecutive frames (interval 1);
672  * we could often reduce the number of TDs here.
673  */
674  case PIPE_ISOCHRONOUS:
675  for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
676  int frame = urb->start_frame;
677 
678  // FIXME scheduling should handle frame counter
679  // roll-around ... exotic case (and OHCI has
680  // a 2^16 iso range, vs other HCs max of 2^10)
681  frame += cnt * urb->interval;
682  frame &= 0xffff;
683  td_fill (ohci, TD_CC | TD_ISO | frame,
684  data + urb->iso_frame_desc [cnt].offset,
685  urb->iso_frame_desc [cnt].length, urb, cnt);
686  }
687  if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
688  if (quirk_amdiso(ohci))
690  if (quirk_amdprefetch(ohci))
691  sb800_prefetch(ohci, 1);
692  }
693  periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
694  && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
695  break;
696  }
697 
698  /* start periodic dma if needed */
699  if (periodic) {
700  wmb ();
702  ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
703  }
704 
705  // ASSERT (urb_priv->length == cnt);
706 }
707 
708 /*-------------------------------------------------------------------------*
709  * Done List handling functions
710  *-------------------------------------------------------------------------*/
711 
712 /* calculate transfer length/status and update the urb */
713 static int td_done(struct ohci_hcd *ohci, struct urb *urb, struct td *td)
714 {
715  u32 tdINFO = hc32_to_cpup (ohci, &td->hwINFO);
716  int cc = 0;
717  int status = -EINPROGRESS;
718 
719  list_del (&td->td_list);
720 
721  /* ISO ... drivers see per-TD length/status */
722  if (tdINFO & TD_ISO) {
723  u16 tdPSW = ohci_hwPSW(ohci, td, 0);
724  int dlen = 0;
725 
726  /* NOTE: assumes FC in tdINFO == 0, and that
727  * only the first of 0..MAXPSW psws is used.
728  */
729 
730  cc = (tdPSW >> 12) & 0xF;
731  if (tdINFO & TD_CC) /* hc didn't touch? */
732  return status;
733 
734  if (usb_pipeout (urb->pipe))
735  dlen = urb->iso_frame_desc [td->index].length;
736  else {
737  /* short reads are always OK for ISO */
738  if (cc == TD_DATAUNDERRUN)
739  cc = TD_CC_NOERROR;
740  dlen = tdPSW & 0x3ff;
741  }
742  urb->actual_length += dlen;
743  urb->iso_frame_desc [td->index].actual_length = dlen;
744  urb->iso_frame_desc [td->index].status = cc_to_error [cc];
745 
746  if (cc != TD_CC_NOERROR)
747  ohci_vdbg (ohci,
748  "urb %p iso td %p (%d) len %d cc %d\n",
749  urb, td, 1 + td->index, dlen, cc);
750 
751  /* BULK, INT, CONTROL ... drivers see aggregate length/status,
752  * except that "setup" bytes aren't counted and "short" transfers
753  * might not be reported as errors.
754  */
755  } else {
756  int type = usb_pipetype (urb->pipe);
757  u32 tdBE = hc32_to_cpup (ohci, &td->hwBE);
758 
759  cc = TD_CC_GET (tdINFO);
760 
761  /* update packet status if needed (short is normally ok) */
762  if (cc == TD_DATAUNDERRUN
763  && !(urb->transfer_flags & URB_SHORT_NOT_OK))
764  cc = TD_CC_NOERROR;
765  if (cc != TD_CC_NOERROR && cc < 0x0E)
766  status = cc_to_error[cc];
767 
768  /* count all non-empty packets except control SETUP packet */
769  if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
770  if (td->hwCBP == 0)
771  urb->actual_length += tdBE - td->data_dma + 1;
772  else
773  urb->actual_length +=
774  hc32_to_cpup (ohci, &td->hwCBP)
775  - td->data_dma;
776  }
777 
778  if (cc != TD_CC_NOERROR && cc < 0x0E)
779  ohci_vdbg (ohci,
780  "urb %p td %p (%d) cc %d, len=%d/%d\n",
781  urb, td, 1 + td->index, cc,
782  urb->actual_length,
783  urb->transfer_buffer_length);
784  }
785  return status;
786 }
787 
788 /*-------------------------------------------------------------------------*/
789 
790 static void ed_halted(struct ohci_hcd *ohci, struct td *td, int cc)
791 {
792  struct urb *urb = td->urb;
793  urb_priv_t *urb_priv = urb->hcpriv;
794  struct ed *ed = td->ed;
795  struct list_head *tmp = td->td_list.next;
796  __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ohci, ED_C);
797 
798  /* clear ed halt; this is the td that caused it, but keep it inactive
799  * until its urb->complete() has a chance to clean up.
800  */
801  ed->hwINFO |= cpu_to_hc32 (ohci, ED_SKIP);
802  wmb ();
803  ed->hwHeadP &= ~cpu_to_hc32 (ohci, ED_H);
804 
805  /* Get rid of all later tds from this urb. We don't have
806  * to be careful: no errors and nothing was transferred.
807  * Also patch the ed so it looks as if those tds completed normally.
808  */
809  while (tmp != &ed->td_list) {
810  struct td *next;
811 
812  next = list_entry (tmp, struct td, td_list);
813  tmp = next->td_list.next;
814 
815  if (next->urb != urb)
816  break;
817 
818  /* NOTE: if multi-td control DATA segments get supported,
819  * this urb had one of them, this td wasn't the last td
820  * in that segment (TD_R clear), this ed halted because
821  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
822  * then we need to leave the control STATUS packet queued
823  * and clear ED_SKIP.
824  */
825 
826  list_del(&next->td_list);
827  urb_priv->td_cnt++;
828  ed->hwHeadP = next->hwNextTD | toggle;
829  }
830 
831  /* help for troubleshooting: report anything that
832  * looks odd ... that doesn't include protocol stalls
833  * (or maybe some other things)
834  */
835  switch (cc) {
836  case TD_DATAUNDERRUN:
837  if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
838  break;
839  /* fallthrough */
840  case TD_CC_STALL:
841  if (usb_pipecontrol (urb->pipe))
842  break;
843  /* fallthrough */
844  default:
845  ohci_dbg (ohci,
846  "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
847  urb, urb->dev->devpath,
848  usb_pipeendpoint (urb->pipe),
849  usb_pipein (urb->pipe) ? "in" : "out",
850  hc32_to_cpu (ohci, td->hwINFO),
851  cc, cc_to_error [cc]);
852  }
853 }
854 
855 /* replies to the request have to be on a FIFO basis so
856  * we unreverse the hc-reversed done-list
857  */
858 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
859 {
860  u32 td_dma;
861  struct td *td_rev = NULL;
862  struct td *td = NULL;
863 
864  td_dma = hc32_to_cpup (ohci, &ohci->hcca->done_head);
865  ohci->hcca->done_head = 0;
866  wmb();
867 
868  /* get TD from hc's singly linked list, and
869  * prepend to ours. ed->td_list changes later.
870  */
871  while (td_dma) {
872  int cc;
873 
874  td = dma_to_td (ohci, td_dma);
875  if (!td) {
876  ohci_err (ohci, "bad entry %8x\n", td_dma);
877  break;
878  }
879 
880  td->hwINFO |= cpu_to_hc32 (ohci, TD_DONE);
881  cc = TD_CC_GET (hc32_to_cpup (ohci, &td->hwINFO));
882 
883  /* Non-iso endpoints can halt on error; un-halt,
884  * and dequeue any other TDs from this urb.
885  * No other TD could have caused the halt.
886  */
887  if (cc != TD_CC_NOERROR
888  && (td->ed->hwHeadP & cpu_to_hc32 (ohci, ED_H)))
889  ed_halted(ohci, td, cc);
890 
891  td->next_dl_td = td_rev;
892  td_rev = td;
893  td_dma = hc32_to_cpup (ohci, &td->hwNextTD);
894  }
895  return td_rev;
896 }
897 
898 /*-------------------------------------------------------------------------*/
899 
900 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
901 static void
902 finish_unlinks (struct ohci_hcd *ohci, u16 tick)
903 {
904  struct ed *ed, **last;
905 
906 rescan_all:
907  for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
908  struct list_head *entry, *tmp;
909  int completed, modified;
910  __hc32 *prev;
911 
912  /* only take off EDs that the HC isn't using, accounting for
913  * frame counter wraps and EDs with partially retired TDs
914  */
915  if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
916  if (tick_before (tick, ed->tick)) {
917 skip_ed:
918  last = &ed->ed_next;
919  continue;
920  }
921 
922  if (!list_empty (&ed->td_list)) {
923  struct td *td;
924  u32 head;
925 
926  td = list_entry (ed->td_list.next, struct td,
927  td_list);
928  head = hc32_to_cpu (ohci, ed->hwHeadP) &
929  TD_MASK;
930 
931  /* INTR_WDH may need to clean up first */
932  if (td->td_dma != head) {
933  if (ed == ohci->ed_to_check)
934  ohci->ed_to_check = NULL;
935  else
936  goto skip_ed;
937  }
938  }
939  }
940 
941  /* reentrancy: if we drop the schedule lock, someone might
942  * have modified this list. normally it's just prepending
943  * entries (which we'd ignore), but paranoia won't hurt.
944  */
945  *last = ed->ed_next;
946  ed->ed_next = NULL;
947  modified = 0;
948 
949  /* unlink urbs as requested, but rescan the list after
950  * we call a completion since it might have unlinked
951  * another (earlier) urb
952  *
953  * When we get here, the HC doesn't see this ed. But it
954  * must not be rescheduled until all completed URBs have
955  * been given back to the driver.
956  */
957 rescan_this:
958  completed = 0;
959  prev = &ed->hwHeadP;
960  list_for_each_safe (entry, tmp, &ed->td_list) {
961  struct td *td;
962  struct urb *urb;
963  urb_priv_t *urb_priv;
964  __hc32 savebits;
965  u32 tdINFO;
966 
967  td = list_entry (entry, struct td, td_list);
968  urb = td->urb;
969  urb_priv = td->urb->hcpriv;
970 
971  if (!urb->unlinked) {
972  prev = &td->hwNextTD;
973  continue;
974  }
975 
976  /* patch pointer hc uses */
977  savebits = *prev & ~cpu_to_hc32 (ohci, TD_MASK);
978  *prev = td->hwNextTD | savebits;
979 
980  /* If this was unlinked, the TD may not have been
981  * retired ... so manually save the data toggle.
982  * The controller ignores the value we save for
983  * control and ISO endpoints.
984  */
985  tdINFO = hc32_to_cpup(ohci, &td->hwINFO);
986  if ((tdINFO & TD_T) == TD_T_DATA0)
987  ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_C);
988  else if ((tdINFO & TD_T) == TD_T_DATA1)
989  ed->hwHeadP |= cpu_to_hc32(ohci, ED_C);
990 
991  /* HC may have partly processed this TD */
992  td_done (ohci, urb, td);
993  urb_priv->td_cnt++;
994 
995  /* if URB is done, clean up */
996  if (urb_priv->td_cnt == urb_priv->length) {
997  modified = completed = 1;
998  finish_urb(ohci, urb, 0);
999  }
1000  }
1001  if (completed && !list_empty (&ed->td_list))
1002  goto rescan_this;
1003 
1004  /* ED's now officially unlinked, hc doesn't see */
1005  ed->state = ED_IDLE;
1006  if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
1007  ohci->eds_scheduled--;
1008  ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1009  ed->hwNextED = 0;
1010  wmb ();
1011  ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
1012 
1013  /* but if there's work queued, reschedule */
1014  if (!list_empty (&ed->td_list)) {
1015  if (ohci->rh_state == OHCI_RH_RUNNING)
1016  ed_schedule (ohci, ed);
1017  }
1018 
1019  if (modified)
1020  goto rescan_all;
1021  }
1022 
1023  /* maybe reenable control and bulk lists */
1024  if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
1025  u32 command = 0, control = 0;
1026 
1027  if (ohci->ed_controltail) {
1028  command |= OHCI_CLF;
1029  if (quirk_zfmicro(ohci))
1030  mdelay(1);
1031  if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1033  ohci_writel (ohci, 0,
1034  &ohci->regs->ed_controlcurrent);
1035  }
1036  }
1037  if (ohci->ed_bulktail) {
1038  command |= OHCI_BLF;
1039  if (quirk_zfmicro(ohci))
1040  mdelay(1);
1041  if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1043  ohci_writel (ohci, 0,
1044  &ohci->regs->ed_bulkcurrent);
1045  }
1046  }
1047 
1048  /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1049  if (control) {
1050  ohci->hc_control |= control;
1051  if (quirk_zfmicro(ohci))
1052  mdelay(1);
1053  ohci_writel (ohci, ohci->hc_control,
1054  &ohci->regs->control);
1055  }
1056  if (command) {
1057  if (quirk_zfmicro(ohci))
1058  mdelay(1);
1059  ohci_writel (ohci, command, &ohci->regs->cmdstatus);
1060  }
1061  }
1062 }
1063 
1064 
1065 
1066 /*-------------------------------------------------------------------------*/
1067 
1068 /*
1069  * Used to take back a TD from the host controller. This would normally be
1070  * called from within dl_done_list, however it may be called directly if the
1071  * HC no longer sees the TD and it has not appeared on the donelist (after
1072  * two frames). This bug has been observed on ZF Micro systems.
1073  */
1074 static void takeback_td(struct ohci_hcd *ohci, struct td *td)
1075 {
1076  struct urb *urb = td->urb;
1077  urb_priv_t *urb_priv = urb->hcpriv;
1078  struct ed *ed = td->ed;
1079  int status;
1080 
1081  /* update URB's length and status from TD */
1082  status = td_done(ohci, urb, td);
1083  urb_priv->td_cnt++;
1084 
1085  /* If all this urb's TDs are done, call complete() */
1086  if (urb_priv->td_cnt == urb_priv->length)
1087  finish_urb(ohci, urb, status);
1088 
1089  /* clean schedule: unlink EDs that are no longer busy */
1090  if (list_empty(&ed->td_list)) {
1091  if (ed->state == ED_OPER)
1092  start_ed_unlink(ohci, ed);
1093 
1094  /* ... reenabling halted EDs only after fault cleanup */
1095  } else if ((ed->hwINFO & cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE))
1096  == cpu_to_hc32(ohci, ED_SKIP)) {
1097  td = list_entry(ed->td_list.next, struct td, td_list);
1098  if (!(td->hwINFO & cpu_to_hc32(ohci, TD_DONE))) {
1099  ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP);
1100  /* ... hc may need waking-up */
1101  switch (ed->type) {
1102  case PIPE_CONTROL:
1103  ohci_writel(ohci, OHCI_CLF,
1104  &ohci->regs->cmdstatus);
1105  break;
1106  case PIPE_BULK:
1107  ohci_writel(ohci, OHCI_BLF,
1108  &ohci->regs->cmdstatus);
1109  break;
1110  }
1111  }
1112  }
1113 }
1114 
1115 /*
1116  * Process normal completions (error or success) and clean the schedules.
1117  *
1118  * This is the main path for handing urbs back to drivers. The only other
1119  * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
1120  * instead of scanning the (re-reversed) donelist as this does. There's
1121  * an abnormal path too, handling a quirk in some Compaq silicon: URBs
1122  * with TDs that appear to be orphaned are directly reclaimed.
1123  */
1124 static void
1125 dl_done_list (struct ohci_hcd *ohci)
1126 {
1127  struct td *td = dl_reverse_done_list (ohci);
1128 
1129  while (td) {
1130  struct td *td_next = td->next_dl_td;
1131  struct ed *ed = td->ed;
1132 
1133  /*
1134  * Some OHCI controllers (NVIDIA for sure, maybe others)
1135  * occasionally forget to add TDs to the done queue. Since
1136  * TDs for a given endpoint are always processed in order,
1137  * if we find a TD on the donelist then all of its
1138  * predecessors must be finished as well.
1139  */
1140  for (;;) {
1141  struct td *td2;
1142 
1143  td2 = list_first_entry(&ed->td_list, struct td,
1144  td_list);
1145  if (td2 == td)
1146  break;
1147  takeback_td(ohci, td2);
1148  }
1149 
1150  takeback_td(ohci, td);
1151  td = td_next;
1152  }
1153 }