Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
imx21-hcd.c
Go to the documentation of this file.
1 /*
2  * USB Host Controller Driver for IMX21
3  *
4  * Copyright (C) 2006 Loping Dog Embedded Systems
5  * Copyright (C) 2009 Martin Fuzzey
6  * Originally written by Jay Monkman <[email protected]>
7  * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the
11  * Free Software Foundation; either version 2 of the License, or (at your
12  * option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17  * for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 
25  /*
26  * The i.MX21 USB hardware contains
27  * * 32 transfer descriptors (called ETDs)
28  * * 4Kb of Data memory
29  *
30  * The data memory is shared between the host and function controllers
31  * (but this driver only supports the host controller)
32  *
33  * So setting up a transfer involves:
34  * * Allocating a ETD
35  * * Fill in ETD with appropriate information
36  * * Allocating data memory (and putting the offset in the ETD)
37  * * Activate the ETD
38  * * Get interrupt when done.
39  *
40  * An ETD is assigned to each active endpoint.
41  *
42  * Low resource (ETD and Data memory) situations are handled differently for
43  * isochronous and non insosynchronous transactions :
44  *
45  * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46  *
47  * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48  * They allocate both ETDs and Data memory during URB submission
49  * (and fail if unavailable).
50  */
51 
52 #include <linux/clk.h>
53 #include <linux/io.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
59 #include <linux/usb/hcd.h>
60 #include <linux/dma-mapping.h>
61 
62 #include "imx21-hcd.h"
63 
64 #ifdef DEBUG
65 #define DEBUG_LOG_FRAME(imx21, etd, event) \
66  (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
67 #else
68 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
69 #endif
70 
71 static const char hcd_name[] = "imx21-hcd";
72 
73 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
74 {
75  return (struct imx21 *)hcd->hcd_priv;
76 }
77 
78 
79 /* =========================================== */
80 /* Hardware access helpers */
81 /* =========================================== */
82 
83 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
84 {
85  void __iomem *reg = imx21->regs + offset;
86  writel(readl(reg) | mask, reg);
87 }
88 
89 static inline void clear_register_bits(struct imx21 *imx21,
90  u32 offset, u32 mask)
91 {
92  void __iomem *reg = imx21->regs + offset;
93  writel(readl(reg) & ~mask, reg);
94 }
95 
96 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
97 {
98  void __iomem *reg = imx21->regs + offset;
99 
100  if (readl(reg) & mask)
101  writel(mask, reg);
102 }
103 
104 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
105 {
106  void __iomem *reg = imx21->regs + offset;
107 
108  if (!(readl(reg) & mask))
109  writel(mask, reg);
110 }
111 
112 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
113 {
114  writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
115 }
116 
117 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
118 {
119  return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
120 }
121 
122 static inline int wrap_frame(int counter)
123 {
124  return counter & 0xFFFF;
125 }
126 
127 static inline int frame_after(int frame, int after)
128 {
129  /* handle wrapping like jiffies time_afer */
130  return (s16)((s16)after - (s16)frame) < 0;
131 }
132 
133 static int imx21_hc_get_frame(struct usb_hcd *hcd)
134 {
135  struct imx21 *imx21 = hcd_to_imx21(hcd);
136 
137  return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
138 }
139 
140 static inline bool unsuitable_for_dma(dma_addr_t addr)
141 {
142  return (addr & 3) != 0;
143 }
144 
145 #include "imx21-dbg.c"
146 
147 static void nonisoc_urb_completed_for_etd(
148  struct imx21 *imx21, struct etd_priv *etd, int status);
149 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
150 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151 
152 /* =========================================== */
153 /* ETD management */
154 /* =========================================== */
155 
156 static int alloc_etd(struct imx21 *imx21)
157 {
158  int i;
159  struct etd_priv *etd = imx21->etd;
160 
161  for (i = 0; i < USB_NUM_ETD; i++, etd++) {
162  if (etd->alloc == 0) {
163  memset(etd, 0, sizeof(imx21->etd[0]));
164  etd->alloc = 1;
165  debug_etd_allocated(imx21);
166  return i;
167  }
168  }
169  return -1;
170 }
171 
172 static void disactivate_etd(struct imx21 *imx21, int num)
173 {
174  int etd_mask = (1 << num);
175  struct etd_priv *etd = &imx21->etd[num];
176 
177  writel(etd_mask, imx21->regs + USBH_ETDENCLR);
178  clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
179  writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
180  clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
181 
182  etd->active_count = 0;
183 
184  DEBUG_LOG_FRAME(imx21, etd, disactivated);
185 }
186 
187 static void reset_etd(struct imx21 *imx21, int num)
188 {
189  struct etd_priv *etd = imx21->etd + num;
190  int i;
191 
192  disactivate_etd(imx21, num);
193 
194  for (i = 0; i < 4; i++)
195  etd_writel(imx21, num, i, 0);
196  etd->urb = NULL;
197  etd->ep = NULL;
198  etd->td = NULL;
199  etd->bounce_buffer = NULL;
200 }
201 
202 static void free_etd(struct imx21 *imx21, int num)
203 {
204  if (num < 0)
205  return;
206 
207  if (num >= USB_NUM_ETD) {
208  dev_err(imx21->dev, "BAD etd=%d!\n", num);
209  return;
210  }
211  if (imx21->etd[num].alloc == 0) {
212  dev_err(imx21->dev, "ETD %d already free!\n", num);
213  return;
214  }
215 
216  debug_etd_freed(imx21);
217  reset_etd(imx21, num);
218  memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
219 }
220 
221 
222 static void setup_etd_dword0(struct imx21 *imx21,
223  int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
224 {
225  etd_writel(imx21, etd_num, 0,
226  ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
227  ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
228  ((u32) dir << DW0_DIRECT) |
229  ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
230  1 : 0) << DW0_SPEED) |
231  ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
232  ((u32) maxpacket << DW0_MAXPKTSIZ));
233 }
234 
239 static void copy_to_dmem(
240  struct imx21 *imx21, int dmem_offset, void *src, int count)
241 {
242  void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
243  u32 word = 0;
244  u8 *p = src;
245  int byte = 0;
246  int i;
247 
248  for (i = 0; i < count; i++) {
249  byte = i % 4;
250  word += (*p++ << (byte * 8));
251  if (byte == 3) {
252  writel(word, dmem);
253  dmem += 4;
254  word = 0;
255  }
256  }
257 
258  if (count && byte != 3)
259  writel(word, dmem);
260 }
261 
262 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
263 {
264  u32 etd_mask = 1 << etd_num;
265  struct etd_priv *etd = &imx21->etd[etd_num];
266 
267  if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
268  /* For non aligned isoc the condition below is always true */
269  if (etd->len <= etd->dmem_size) {
270  /* Fits into data memory, use PIO */
271  if (dir != TD_DIR_IN) {
272  copy_to_dmem(imx21,
273  etd->dmem_offset,
274  etd->cpu_buffer, etd->len);
275  }
276  etd->dma_handle = 0;
277 
278  } else {
279  /* Too big for data memory, use bounce buffer */
280  enum dma_data_direction dmadir;
281 
282  if (dir == TD_DIR_IN) {
283  dmadir = DMA_FROM_DEVICE;
284  etd->bounce_buffer = kmalloc(etd->len,
285  GFP_ATOMIC);
286  } else {
287  dmadir = DMA_TO_DEVICE;
288  etd->bounce_buffer = kmemdup(etd->cpu_buffer,
289  etd->len,
290  GFP_ATOMIC);
291  }
292  if (!etd->bounce_buffer) {
293  dev_err(imx21->dev, "failed bounce alloc\n");
294  goto err_bounce_alloc;
295  }
296 
297  etd->dma_handle =
298  dma_map_single(imx21->dev,
299  etd->bounce_buffer,
300  etd->len,
301  dmadir);
302  if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
303  dev_err(imx21->dev, "failed bounce map\n");
304  goto err_bounce_map;
305  }
306  }
307  }
308 
309  clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
310  set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
311  clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
312  clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
313 
314  if (etd->dma_handle) {
315  set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
316  clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
317  clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
318  writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
319  set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
320  } else {
321  if (dir != TD_DIR_IN) {
322  /* need to set for ZLP and PIO */
323  set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
324  set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
325  }
326  }
327 
328  DEBUG_LOG_FRAME(imx21, etd, activated);
329 
330 #ifdef DEBUG
331  if (!etd->active_count) {
332  int i;
333  etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
334  etd->disactivated_frame = -1;
335  etd->last_int_frame = -1;
336  etd->last_req_frame = -1;
337 
338  for (i = 0; i < 4; i++)
339  etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
340  }
341 #endif
342 
343  etd->active_count = 1;
344  writel(etd_mask, imx21->regs + USBH_ETDENSET);
345  return;
346 
347 err_bounce_map:
348  kfree(etd->bounce_buffer);
349 
350 err_bounce_alloc:
351  free_dmem(imx21, etd);
352  nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
353 }
354 
355 /* =========================================== */
356 /* Data memory management */
357 /* =========================================== */
358 
359 static int alloc_dmem(struct imx21 *imx21, unsigned int size,
360  struct usb_host_endpoint *ep)
361 {
362  unsigned int offset = 0;
363  struct imx21_dmem_area *area;
364  struct imx21_dmem_area *tmp;
365 
366  size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
367 
368  if (size > DMEM_SIZE) {
369  dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
370  size, DMEM_SIZE);
371  return -EINVAL;
372  }
373 
374  list_for_each_entry(tmp, &imx21->dmem_list, list) {
375  if ((size + offset) < offset)
376  goto fail;
377  if ((size + offset) <= tmp->offset)
378  break;
379  offset = tmp->size + tmp->offset;
380  if ((offset + size) > DMEM_SIZE)
381  goto fail;
382  }
383 
384  area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
385  if (area == NULL)
386  return -ENOMEM;
387 
388  area->ep = ep;
389  area->offset = offset;
390  area->size = size;
391  list_add_tail(&area->list, &tmp->list);
392  debug_dmem_allocated(imx21, size);
393  return offset;
394 
395 fail:
396  return -ENOMEM;
397 }
398 
399 /* Memory now available for a queued ETD - activate it */
400 static void activate_queued_etd(struct imx21 *imx21,
401  struct etd_priv *etd, u32 dmem_offset)
402 {
403  struct urb_priv *urb_priv = etd->urb->hcpriv;
404  int etd_num = etd - &imx21->etd[0];
405  u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
406  u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
407 
408  dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
409  etd_num);
410  etd_writel(imx21, etd_num, 1,
411  ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
412 
413  etd->dmem_offset = dmem_offset;
414  urb_priv->active = 1;
415  activate_etd(imx21, etd_num, dir);
416 }
417 
418 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
419 {
420  struct imx21_dmem_area *area;
421  struct etd_priv *tmp;
422  int found = 0;
423  int offset;
424 
425  if (!etd->dmem_size)
426  return;
427  etd->dmem_size = 0;
428 
429  offset = etd->dmem_offset;
430  list_for_each_entry(area, &imx21->dmem_list, list) {
431  if (area->offset == offset) {
432  debug_dmem_freed(imx21, area->size);
433  list_del(&area->list);
434  kfree(area);
435  found = 1;
436  break;
437  }
438  }
439 
440  if (!found) {
441  dev_err(imx21->dev,
442  "Trying to free unallocated DMEM %d\n", offset);
443  return;
444  }
445 
446  /* Try again to allocate memory for anything we've queued */
447  list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
448  offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
449  if (offset >= 0) {
450  list_del(&etd->queue);
451  activate_queued_etd(imx21, etd, (u32)offset);
452  }
453  }
454 }
455 
456 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
457 {
458  struct imx21_dmem_area *area, *tmp;
459 
460  list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
461  if (area->ep == ep) {
462  dev_err(imx21->dev,
463  "Active DMEM %d for disabled ep=%p\n",
464  area->offset, ep);
465  list_del(&area->list);
466  kfree(area);
467  }
468  }
469 }
470 
471 
472 /* =========================================== */
473 /* End handling */
474 /* =========================================== */
475 
476 /* Endpoint now idle - release its ETD(s) or assign to queued request */
477 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
478 {
479  int i;
480 
481  for (i = 0; i < NUM_ISO_ETDS; i++) {
482  int etd_num = ep_priv->etd[i];
483  struct etd_priv *etd;
484  if (etd_num < 0)
485  continue;
486 
487  etd = &imx21->etd[etd_num];
488  ep_priv->etd[i] = -1;
489 
490  free_dmem(imx21, etd); /* for isoc */
491 
492  if (list_empty(&imx21->queue_for_etd)) {
493  free_etd(imx21, etd_num);
494  continue;
495  }
496 
497  dev_dbg(imx21->dev,
498  "assigning idle etd %d for queued request\n", etd_num);
499  ep_priv = list_first_entry(&imx21->queue_for_etd,
500  struct ep_priv, queue);
501  list_del(&ep_priv->queue);
502  reset_etd(imx21, etd_num);
503  ep_priv->waiting_etd = 0;
504  ep_priv->etd[i] = etd_num;
505 
506  if (list_empty(&ep_priv->ep->urb_list)) {
507  dev_err(imx21->dev, "No urb for queued ep!\n");
508  continue;
509  }
510  schedule_nonisoc_etd(imx21, list_first_entry(
511  &ep_priv->ep->urb_list, struct urb, urb_list));
512  }
513 }
514 
515 static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
516 __releases(imx21->lock)
517 __acquires(imx21->lock)
518 {
519  struct imx21 *imx21 = hcd_to_imx21(hcd);
520  struct ep_priv *ep_priv = urb->ep->hcpriv;
521  struct urb_priv *urb_priv = urb->hcpriv;
522 
523  debug_urb_completed(imx21, urb, status);
524  dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
525 
526  kfree(urb_priv->isoc_td);
527  kfree(urb->hcpriv);
528  urb->hcpriv = NULL;
529  usb_hcd_unlink_urb_from_ep(hcd, urb);
530  spin_unlock(&imx21->lock);
531  usb_hcd_giveback_urb(hcd, urb, status);
532  spin_lock(&imx21->lock);
533  if (list_empty(&ep_priv->ep->urb_list))
534  ep_idle(imx21, ep_priv);
535 }
536 
537 static void nonisoc_urb_completed_for_etd(
538  struct imx21 *imx21, struct etd_priv *etd, int status)
539 {
540  struct usb_host_endpoint *ep = etd->ep;
541 
542  urb_done(imx21->hcd, etd->urb, status);
543  etd->urb = NULL;
544 
545  if (!list_empty(&ep->urb_list)) {
546  struct urb *urb = list_first_entry(
547  &ep->urb_list, struct urb, urb_list);
548 
549  dev_vdbg(imx21->dev, "next URB %p\n", urb);
550  schedule_nonisoc_etd(imx21, urb);
551  }
552 }
553 
554 
555 /* =========================================== */
556 /* ISOC Handling ... */
557 /* =========================================== */
558 
559 static void schedule_isoc_etds(struct usb_hcd *hcd,
560  struct usb_host_endpoint *ep)
561 {
562  struct imx21 *imx21 = hcd_to_imx21(hcd);
563  struct ep_priv *ep_priv = ep->hcpriv;
564  struct etd_priv *etd;
565  struct urb_priv *urb_priv;
566  struct td *td;
567  int etd_num;
568  int i;
569  int cur_frame;
570  u8 dir;
571 
572  for (i = 0; i < NUM_ISO_ETDS; i++) {
573 too_late:
574  if (list_empty(&ep_priv->td_list))
575  break;
576 
577  etd_num = ep_priv->etd[i];
578  if (etd_num < 0)
579  break;
580 
581  etd = &imx21->etd[etd_num];
582  if (etd->urb)
583  continue;
584 
585  td = list_entry(ep_priv->td_list.next, struct td, list);
586  list_del(&td->list);
587  urb_priv = td->urb->hcpriv;
588 
589  cur_frame = imx21_hc_get_frame(hcd);
590  if (frame_after(cur_frame, td->frame)) {
591  dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
592  cur_frame, td->frame);
593  urb_priv->isoc_status = -EXDEV;
594  td->urb->iso_frame_desc[
595  td->isoc_index].actual_length = 0;
596  td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
597  if (--urb_priv->isoc_remaining == 0)
598  urb_done(hcd, td->urb, urb_priv->isoc_status);
599  goto too_late;
600  }
601 
602  urb_priv->active = 1;
603  etd->td = td;
604  etd->ep = td->ep;
605  etd->urb = td->urb;
606  etd->len = td->len;
607  etd->dma_handle = td->dma_handle;
608  etd->cpu_buffer = td->cpu_buffer;
609 
610  debug_isoc_submitted(imx21, cur_frame, td);
611 
612  dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
613  setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
614  etd_writel(imx21, etd_num, 1, etd->dmem_offset);
615  etd_writel(imx21, etd_num, 2,
617  ((td->frame & 0xFFFF) << DW2_STARTFRM));
618  etd_writel(imx21, etd_num, 3,
620  (td->len << DW3_PKTLEN0));
621 
622  activate_etd(imx21, etd_num, dir);
623  }
624 }
625 
626 static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
627 {
628  struct imx21 *imx21 = hcd_to_imx21(hcd);
629  int etd_mask = 1 << etd_num;
630  struct etd_priv *etd = imx21->etd + etd_num;
631  struct urb *urb = etd->urb;
632  struct urb_priv *urb_priv = urb->hcpriv;
633  struct td *td = etd->td;
634  struct usb_host_endpoint *ep = etd->ep;
635  int isoc_index = td->isoc_index;
636  unsigned int pipe = urb->pipe;
637  int dir_in = usb_pipein(pipe);
638  int cc;
639  int bytes_xfrd;
640 
641  disactivate_etd(imx21, etd_num);
642 
643  cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
644  bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
645 
646  /* Input doesn't always fill the buffer, don't generate an error
647  * when this happens.
648  */
649  if (dir_in && (cc == TD_DATAUNDERRUN))
650  cc = TD_CC_NOERROR;
651 
652  if (cc == TD_NOTACCESSED)
653  bytes_xfrd = 0;
654 
655  debug_isoc_completed(imx21,
656  imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
657  if (cc) {
658  urb_priv->isoc_status = -EXDEV;
659  dev_dbg(imx21->dev,
660  "bad iso cc=0x%X frame=%d sched frame=%d "
661  "cnt=%d len=%d urb=%p etd=%d index=%d\n",
662  cc, imx21_hc_get_frame(hcd), td->frame,
663  bytes_xfrd, td->len, urb, etd_num, isoc_index);
664  }
665 
666  if (dir_in) {
667  clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
668  if (!etd->dma_handle)
670  imx21->regs + USBOTG_DMEM + etd->dmem_offset,
671  bytes_xfrd);
672  }
673 
674  urb->actual_length += bytes_xfrd;
675  urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
676  urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
677 
678  etd->td = NULL;
679  etd->urb = NULL;
680  etd->ep = NULL;
681 
682  if (--urb_priv->isoc_remaining == 0)
683  urb_done(hcd, urb, urb_priv->isoc_status);
684 
685  schedule_isoc_etds(hcd, ep);
686 }
687 
688 static struct ep_priv *alloc_isoc_ep(
689  struct imx21 *imx21, struct usb_host_endpoint *ep)
690 {
691  struct ep_priv *ep_priv;
692  int i;
693 
694  ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
695  if (!ep_priv)
696  return NULL;
697 
698  for (i = 0; i < NUM_ISO_ETDS; i++)
699  ep_priv->etd[i] = -1;
700 
701  INIT_LIST_HEAD(&ep_priv->td_list);
702  ep_priv->ep = ep;
703  ep->hcpriv = ep_priv;
704  return ep_priv;
705 }
706 
707 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
708 {
709  int i, j;
710  int etd_num;
711 
712  /* Allocate the ETDs if required */
713  for (i = 0; i < NUM_ISO_ETDS; i++) {
714  if (ep_priv->etd[i] < 0) {
715  etd_num = alloc_etd(imx21);
716  if (etd_num < 0)
717  goto alloc_etd_failed;
718 
719  ep_priv->etd[i] = etd_num;
720  imx21->etd[etd_num].ep = ep_priv->ep;
721  }
722  }
723  return 0;
724 
725 alloc_etd_failed:
726  dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
727  for (j = 0; j < i; j++) {
728  free_etd(imx21, ep_priv->etd[j]);
729  ep_priv->etd[j] = -1;
730  }
731  return -ENOMEM;
732 }
733 
734 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
735  struct usb_host_endpoint *ep,
736  struct urb *urb, gfp_t mem_flags)
737 {
738  struct imx21 *imx21 = hcd_to_imx21(hcd);
739  struct urb_priv *urb_priv;
740  unsigned long flags;
741  struct ep_priv *ep_priv;
742  struct td *td = NULL;
743  int i;
744  int ret;
745  int cur_frame;
746  u16 maxpacket;
747 
748  urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
749  if (urb_priv == NULL)
750  return -ENOMEM;
751 
752  urb_priv->isoc_td = kzalloc(
753  sizeof(struct td) * urb->number_of_packets, mem_flags);
754  if (urb_priv->isoc_td == NULL) {
755  ret = -ENOMEM;
756  goto alloc_td_failed;
757  }
758 
759  spin_lock_irqsave(&imx21->lock, flags);
760 
761  if (ep->hcpriv == NULL) {
762  ep_priv = alloc_isoc_ep(imx21, ep);
763  if (ep_priv == NULL) {
764  ret = -ENOMEM;
765  goto alloc_ep_failed;
766  }
767  } else {
768  ep_priv = ep->hcpriv;
769  }
770 
771  ret = alloc_isoc_etds(imx21, ep_priv);
772  if (ret)
773  goto alloc_etd_failed;
774 
775  ret = usb_hcd_link_urb_to_ep(hcd, urb);
776  if (ret)
777  goto link_failed;
778 
779  urb->status = -EINPROGRESS;
780  urb->actual_length = 0;
781  urb->error_count = 0;
782  urb->hcpriv = urb_priv;
783  urb_priv->ep = ep;
784 
785  /* allocate data memory for largest packets if not already done */
786  maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
787  for (i = 0; i < NUM_ISO_ETDS; i++) {
788  struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
789 
790  if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
791  /* not sure if this can really occur.... */
792  dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
793  etd->dmem_size, maxpacket);
794  ret = -EMSGSIZE;
795  goto alloc_dmem_failed;
796  }
797 
798  if (etd->dmem_size == 0) {
799  etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
800  if (etd->dmem_offset < 0) {
801  dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
802  ret = -EAGAIN;
803  goto alloc_dmem_failed;
804  }
805  etd->dmem_size = maxpacket;
806  }
807  }
808 
809  /* calculate frame */
810  cur_frame = imx21_hc_get_frame(hcd);
811  if (urb->transfer_flags & URB_ISO_ASAP) {
812  if (list_empty(&ep_priv->td_list))
813  urb->start_frame = cur_frame + 5;
814  else
815  urb->start_frame = list_entry(
816  ep_priv->td_list.prev,
817  struct td, list)->frame + urb->interval;
818  }
819  urb->start_frame = wrap_frame(urb->start_frame);
820  if (frame_after(cur_frame, urb->start_frame)) {
821  dev_dbg(imx21->dev,
822  "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
823  urb->start_frame, cur_frame,
824  (urb->transfer_flags & URB_ISO_ASAP) != 0);
825  urb->start_frame = wrap_frame(cur_frame + 1);
826  }
827 
828  /* set up transfers */
829  td = urb_priv->isoc_td;
830  for (i = 0; i < urb->number_of_packets; i++, td++) {
831  unsigned int offset = urb->iso_frame_desc[i].offset;
832  td->ep = ep;
833  td->urb = urb;
834  td->len = urb->iso_frame_desc[i].length;
835  td->isoc_index = i;
836  td->frame = wrap_frame(urb->start_frame + urb->interval * i);
837  td->dma_handle = urb->transfer_dma + offset;
838  td->cpu_buffer = urb->transfer_buffer + offset;
839  list_add_tail(&td->list, &ep_priv->td_list);
840  }
841 
842  urb_priv->isoc_remaining = urb->number_of_packets;
843  dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
844  urb->number_of_packets, urb->start_frame, td->frame);
845 
846  debug_urb_submitted(imx21, urb);
847  schedule_isoc_etds(hcd, ep);
848 
849  spin_unlock_irqrestore(&imx21->lock, flags);
850  return 0;
851 
852 alloc_dmem_failed:
853  usb_hcd_unlink_urb_from_ep(hcd, urb);
854 
855 link_failed:
856 alloc_etd_failed:
857 alloc_ep_failed:
858  spin_unlock_irqrestore(&imx21->lock, flags);
859  kfree(urb_priv->isoc_td);
860 
861 alloc_td_failed:
862  kfree(urb_priv);
863  return ret;
864 }
865 
866 static void dequeue_isoc_urb(struct imx21 *imx21,
867  struct urb *urb, struct ep_priv *ep_priv)
868 {
869  struct urb_priv *urb_priv = urb->hcpriv;
870  struct td *td, *tmp;
871  int i;
872 
873  if (urb_priv->active) {
874  for (i = 0; i < NUM_ISO_ETDS; i++) {
875  int etd_num = ep_priv->etd[i];
876  if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
877  struct etd_priv *etd = imx21->etd + etd_num;
878 
879  reset_etd(imx21, etd_num);
880  free_dmem(imx21, etd);
881  }
882  }
883  }
884 
885  list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
886  if (td->urb == urb) {
887  dev_vdbg(imx21->dev, "removing td %p\n", td);
888  list_del(&td->list);
889  }
890  }
891 }
892 
893 /* =========================================== */
894 /* NON ISOC Handling ... */
895 /* =========================================== */
896 
897 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
898 {
899  unsigned int pipe = urb->pipe;
900  struct urb_priv *urb_priv = urb->hcpriv;
901  struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
902  int state = urb_priv->state;
903  int etd_num = ep_priv->etd[0];
904  struct etd_priv *etd;
905  u32 count;
906  u16 etd_buf_size;
907  u16 maxpacket;
908  u8 dir;
909  u8 bufround;
910  u8 datatoggle;
911  u8 interval = 0;
912  u8 relpolpos = 0;
913 
914  if (etd_num < 0) {
915  dev_err(imx21->dev, "No valid ETD\n");
916  return;
917  }
918  if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
919  dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
920 
921  etd = &imx21->etd[etd_num];
922  maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
923  if (!maxpacket)
924  maxpacket = 8;
925 
926  if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
927  if (state == US_CTRL_SETUP) {
928  dir = TD_DIR_SETUP;
929  if (unsuitable_for_dma(urb->setup_dma))
931  urb);
932  etd->dma_handle = urb->setup_dma;
933  etd->cpu_buffer = urb->setup_packet;
934  bufround = 0;
935  count = 8;
936  datatoggle = TD_TOGGLE_DATA0;
937  } else { /* US_CTRL_ACK */
938  dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
939  bufround = 0;
940  count = 0;
941  datatoggle = TD_TOGGLE_DATA1;
942  }
943  } else {
944  dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
945  bufround = (dir == TD_DIR_IN) ? 1 : 0;
946  if (unsuitable_for_dma(urb->transfer_dma))
947  usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
948 
949  etd->dma_handle = urb->transfer_dma;
950  etd->cpu_buffer = urb->transfer_buffer;
951  if (usb_pipebulk(pipe) && (state == US_BULK0))
952  count = 0;
953  else
954  count = urb->transfer_buffer_length;
955 
956  if (usb_pipecontrol(pipe)) {
957  datatoggle = TD_TOGGLE_DATA1;
958  } else {
959  if (usb_gettoggle(
960  urb->dev,
961  usb_pipeendpoint(urb->pipe),
962  usb_pipeout(urb->pipe)))
963  datatoggle = TD_TOGGLE_DATA1;
964  else
965  datatoggle = TD_TOGGLE_DATA0;
966  }
967  }
968 
969  etd->urb = urb;
970  etd->ep = urb_priv->ep;
971  etd->len = count;
972 
973  if (usb_pipeint(pipe)) {
974  interval = urb->interval;
975  relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
976  }
977 
978  /* Write ETD to device memory */
979  setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
980 
981  etd_writel(imx21, etd_num, 2,
982  (u32) interval << DW2_POLINTERV |
983  ((u32) relpolpos << DW2_RELPOLPOS) |
984  ((u32) dir << DW2_DIRPID) |
985  ((u32) bufround << DW2_BUFROUND) |
986  ((u32) datatoggle << DW2_DATATOG) |
988 
989  /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
990  is smaller. Make sure we don't overrun the buffer!
991  */
992  if (count && count < maxpacket)
993  etd_buf_size = count;
994  else
995  etd_buf_size = maxpacket;
996 
997  etd_writel(imx21, etd_num, 3,
998  ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
999 
1000  if (!count)
1001  etd->dma_handle = 0;
1002 
1003  /* allocate x and y buffer space at once */
1004  etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1005  etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1006  if (etd->dmem_offset < 0) {
1007  /* Setup everything we can in HW and update when we get DMEM */
1008  etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1009 
1010  dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1011  debug_urb_queued_for_dmem(imx21, urb);
1012  list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1013  return;
1014  }
1015 
1016  etd_writel(imx21, etd_num, 1,
1017  (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1018  (u32) etd->dmem_offset);
1019 
1020  urb_priv->active = 1;
1021 
1022  /* enable the ETD to kick off transfer */
1023  dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1024  etd_num, count, dir != TD_DIR_IN ? "out" : "in");
1025  activate_etd(imx21, etd_num, dir);
1026 
1027 }
1028 
1029 static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
1030 {
1031  struct imx21 *imx21 = hcd_to_imx21(hcd);
1032  struct etd_priv *etd = &imx21->etd[etd_num];
1033  struct urb *urb = etd->urb;
1034  u32 etd_mask = 1 << etd_num;
1035  struct urb_priv *urb_priv = urb->hcpriv;
1036  int dir;
1037  int cc;
1038  u32 bytes_xfrd;
1039  int etd_done;
1040 
1041  disactivate_etd(imx21, etd_num);
1042 
1043  dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1044  cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1045  bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1046 
1047  /* save toggle carry */
1048  usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1049  usb_pipeout(urb->pipe),
1050  (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1051 
1052  if (dir == TD_DIR_IN) {
1053  clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1054  clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1055 
1056  if (etd->bounce_buffer) {
1057  memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1058  dma_unmap_single(imx21->dev,
1059  etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1060  } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1062  imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1063  bytes_xfrd);
1064  }
1065  }
1066 
1067  kfree(etd->bounce_buffer);
1068  etd->bounce_buffer = NULL;
1069  free_dmem(imx21, etd);
1070 
1071  urb->error_count = 0;
1072  if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1073  && (cc == TD_DATAUNDERRUN))
1074  cc = TD_CC_NOERROR;
1075 
1076  if (cc != 0)
1077  dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1078 
1079  etd_done = (cc_to_error[cc] != 0); /* stop if error */
1080 
1081  switch (usb_pipetype(urb->pipe)) {
1082  case PIPE_CONTROL:
1083  switch (urb_priv->state) {
1084  case US_CTRL_SETUP:
1085  if (urb->transfer_buffer_length > 0)
1086  urb_priv->state = US_CTRL_DATA;
1087  else
1088  urb_priv->state = US_CTRL_ACK;
1089  break;
1090  case US_CTRL_DATA:
1091  urb->actual_length += bytes_xfrd;
1092  urb_priv->state = US_CTRL_ACK;
1093  break;
1094  case US_CTRL_ACK:
1095  etd_done = 1;
1096  break;
1097  default:
1098  dev_err(imx21->dev,
1099  "Invalid pipe state %d\n", urb_priv->state);
1100  etd_done = 1;
1101  break;
1102  }
1103  break;
1104 
1105  case PIPE_BULK:
1106  urb->actual_length += bytes_xfrd;
1107  if ((urb_priv->state == US_BULK)
1108  && (urb->transfer_flags & URB_ZERO_PACKET)
1109  && urb->transfer_buffer_length > 0
1110  && ((urb->transfer_buffer_length %
1111  usb_maxpacket(urb->dev, urb->pipe,
1112  usb_pipeout(urb->pipe))) == 0)) {
1113  /* need a 0-packet */
1114  urb_priv->state = US_BULK0;
1115  } else {
1116  etd_done = 1;
1117  }
1118  break;
1119 
1120  case PIPE_INTERRUPT:
1121  urb->actual_length += bytes_xfrd;
1122  etd_done = 1;
1123  break;
1124  }
1125 
1126  if (etd_done)
1127  nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1128  else {
1129  dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1130  schedule_nonisoc_etd(imx21, urb);
1131  }
1132 }
1133 
1134 
1135 static struct ep_priv *alloc_ep(void)
1136 {
1137  int i;
1138  struct ep_priv *ep_priv;
1139 
1140  ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1141  if (!ep_priv)
1142  return NULL;
1143 
1144  for (i = 0; i < NUM_ISO_ETDS; ++i)
1145  ep_priv->etd[i] = -1;
1146 
1147  return ep_priv;
1148 }
1149 
1150 static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1151  struct urb *urb, gfp_t mem_flags)
1152 {
1153  struct imx21 *imx21 = hcd_to_imx21(hcd);
1154  struct usb_host_endpoint *ep = urb->ep;
1155  struct urb_priv *urb_priv;
1156  struct ep_priv *ep_priv;
1157  struct etd_priv *etd;
1158  int ret;
1159  unsigned long flags;
1160 
1161  dev_vdbg(imx21->dev,
1162  "enqueue urb=%p ep=%p len=%d "
1163  "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1164  urb, ep,
1165  urb->transfer_buffer_length,
1166  urb->transfer_buffer, urb->transfer_dma,
1167  urb->setup_packet, urb->setup_dma);
1168 
1169  if (usb_pipeisoc(urb->pipe))
1170  return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1171 
1172  urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1173  if (!urb_priv)
1174  return -ENOMEM;
1175 
1176  spin_lock_irqsave(&imx21->lock, flags);
1177 
1178  ep_priv = ep->hcpriv;
1179  if (ep_priv == NULL) {
1180  ep_priv = alloc_ep();
1181  if (!ep_priv) {
1182  ret = -ENOMEM;
1183  goto failed_alloc_ep;
1184  }
1185  ep->hcpriv = ep_priv;
1186  ep_priv->ep = ep;
1187  }
1188 
1189  ret = usb_hcd_link_urb_to_ep(hcd, urb);
1190  if (ret)
1191  goto failed_link;
1192 
1193  urb->status = -EINPROGRESS;
1194  urb->actual_length = 0;
1195  urb->error_count = 0;
1196  urb->hcpriv = urb_priv;
1197  urb_priv->ep = ep;
1198 
1199  switch (usb_pipetype(urb->pipe)) {
1200  case PIPE_CONTROL:
1201  urb_priv->state = US_CTRL_SETUP;
1202  break;
1203  case PIPE_BULK:
1204  urb_priv->state = US_BULK;
1205  break;
1206  }
1207 
1208  debug_urb_submitted(imx21, urb);
1209  if (ep_priv->etd[0] < 0) {
1210  if (ep_priv->waiting_etd) {
1211  dev_dbg(imx21->dev,
1212  "no ETD available already queued %p\n",
1213  ep_priv);
1214  debug_urb_queued_for_etd(imx21, urb);
1215  goto out;
1216  }
1217  ep_priv->etd[0] = alloc_etd(imx21);
1218  if (ep_priv->etd[0] < 0) {
1219  dev_dbg(imx21->dev,
1220  "no ETD available queueing %p\n", ep_priv);
1221  debug_urb_queued_for_etd(imx21, urb);
1222  list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1223  ep_priv->waiting_etd = 1;
1224  goto out;
1225  }
1226  }
1227 
1228  /* Schedule if no URB already active for this endpoint */
1229  etd = &imx21->etd[ep_priv->etd[0]];
1230  if (etd->urb == NULL) {
1231  DEBUG_LOG_FRAME(imx21, etd, last_req);
1232  schedule_nonisoc_etd(imx21, urb);
1233  }
1234 
1235 out:
1236  spin_unlock_irqrestore(&imx21->lock, flags);
1237  return 0;
1238 
1239 failed_link:
1240 failed_alloc_ep:
1241  spin_unlock_irqrestore(&imx21->lock, flags);
1242  kfree(urb_priv);
1243  return ret;
1244 }
1245 
1246 static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1247  int status)
1248 {
1249  struct imx21 *imx21 = hcd_to_imx21(hcd);
1250  unsigned long flags;
1251  struct usb_host_endpoint *ep;
1252  struct ep_priv *ep_priv;
1253  struct urb_priv *urb_priv = urb->hcpriv;
1254  int ret = -EINVAL;
1255 
1256  dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1257  urb, usb_pipeisoc(urb->pipe), status);
1258 
1259  spin_lock_irqsave(&imx21->lock, flags);
1260 
1261  ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1262  if (ret)
1263  goto fail;
1264  ep = urb_priv->ep;
1265  ep_priv = ep->hcpriv;
1266 
1267  debug_urb_unlinked(imx21, urb);
1268 
1269  if (usb_pipeisoc(urb->pipe)) {
1270  dequeue_isoc_urb(imx21, urb, ep_priv);
1271  schedule_isoc_etds(hcd, ep);
1272  } else if (urb_priv->active) {
1273  int etd_num = ep_priv->etd[0];
1274  if (etd_num != -1) {
1275  struct etd_priv *etd = &imx21->etd[etd_num];
1276 
1277  disactivate_etd(imx21, etd_num);
1278  free_dmem(imx21, etd);
1279  etd->urb = NULL;
1280  kfree(etd->bounce_buffer);
1281  etd->bounce_buffer = NULL;
1282  }
1283  }
1284 
1285  urb_done(hcd, urb, status);
1286 
1287  spin_unlock_irqrestore(&imx21->lock, flags);
1288  return 0;
1289 
1290 fail:
1291  spin_unlock_irqrestore(&imx21->lock, flags);
1292  return ret;
1293 }
1294 
1295 /* =========================================== */
1296 /* Interrupt dispatch */
1297 /* =========================================== */
1298 
1299 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1300 {
1301  int etd_num;
1302  int enable_sof_int = 0;
1303  unsigned long flags;
1304 
1305  spin_lock_irqsave(&imx21->lock, flags);
1306 
1307  for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1308  u32 etd_mask = 1 << etd_num;
1309  u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1310  u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1311  struct etd_priv *etd = &imx21->etd[etd_num];
1312 
1313 
1314  if (done) {
1315  DEBUG_LOG_FRAME(imx21, etd, last_int);
1316  } else {
1317 /*
1318  * Kludge warning!
1319  *
1320  * When multiple transfers are using the bus we sometimes get into a state
1321  * where the transfer has completed (the CC field of the ETD is != 0x0F),
1322  * the ETD has self disabled but the ETDDONESTAT flag is not set
1323  * (and hence no interrupt occurs).
1324  * This causes the transfer in question to hang.
1325  * The kludge below checks for this condition at each SOF and processes any
1326  * blocked ETDs (after an arbitrary 10 frame wait)
1327  *
1328  * With a single active transfer the usbtest test suite will run for days
1329  * without the kludge.
1330  * With other bus activity (eg mass storage) even just test1 will hang without
1331  * the kludge.
1332  */
1333  u32 dword0;
1334  int cc;
1335 
1336  if (etd->active_count && !enabled) /* suspicious... */
1337  enable_sof_int = 1;
1338 
1339  if (!sof || enabled || !etd->active_count)
1340  continue;
1341 
1342  cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1343  if (cc == TD_NOTACCESSED)
1344  continue;
1345 
1346  if (++etd->active_count < 10)
1347  continue;
1348 
1349  dword0 = etd_readl(imx21, etd_num, 0);
1350  dev_dbg(imx21->dev,
1351  "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1352  etd_num, dword0 & 0x7F,
1353  (dword0 >> DW0_ENDPNT) & 0x0F,
1354  cc);
1355 
1356 #ifdef DEBUG
1357  dev_dbg(imx21->dev,
1358  "frame: act=%d disact=%d"
1359  " int=%d req=%d cur=%d\n",
1360  etd->activated_frame,
1361  etd->disactivated_frame,
1362  etd->last_int_frame,
1363  etd->last_req_frame,
1364  readl(imx21->regs + USBH_FRMNUB));
1365  imx21->debug_unblocks++;
1366 #endif
1367  etd->active_count = 0;
1368 /* End of kludge */
1369  }
1370 
1371  if (etd->ep == NULL || etd->urb == NULL) {
1372  dev_dbg(imx21->dev,
1373  "Interrupt for unexpected etd %d"
1374  " ep=%p urb=%p\n",
1375  etd_num, etd->ep, etd->urb);
1376  disactivate_etd(imx21, etd_num);
1377  continue;
1378  }
1379 
1380  if (usb_pipeisoc(etd->urb->pipe))
1381  isoc_etd_done(hcd, etd_num);
1382  else
1383  nonisoc_etd_done(hcd, etd_num);
1384  }
1385 
1386  /* only enable SOF interrupt if it may be needed for the kludge */
1387  if (enable_sof_int)
1388  set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1389  else
1390  clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1391 
1392 
1393  spin_unlock_irqrestore(&imx21->lock, flags);
1394 }
1395 
1396 static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1397 {
1398  struct imx21 *imx21 = hcd_to_imx21(hcd);
1399  u32 ints = readl(imx21->regs + USBH_SYSISR);
1400 
1401  if (ints & USBH_SYSIEN_HERRINT)
1402  dev_dbg(imx21->dev, "Scheduling error\n");
1403 
1404  if (ints & USBH_SYSIEN_SORINT)
1405  dev_dbg(imx21->dev, "Scheduling overrun\n");
1406 
1407  if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1408  process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1409 
1410  writel(ints, imx21->regs + USBH_SYSISR);
1411  return IRQ_HANDLED;
1412 }
1413 
1414 static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1415  struct usb_host_endpoint *ep)
1416 {
1417  struct imx21 *imx21 = hcd_to_imx21(hcd);
1418  unsigned long flags;
1419  struct ep_priv *ep_priv;
1420  int i;
1421 
1422  if (ep == NULL)
1423  return;
1424 
1425  spin_lock_irqsave(&imx21->lock, flags);
1426  ep_priv = ep->hcpriv;
1427  dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1428 
1429  if (!list_empty(&ep->urb_list))
1430  dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1431 
1432  if (ep_priv != NULL) {
1433  for (i = 0; i < NUM_ISO_ETDS; i++) {
1434  if (ep_priv->etd[i] > -1)
1435  dev_dbg(imx21->dev, "free etd %d for disable\n",
1436  ep_priv->etd[i]);
1437 
1438  free_etd(imx21, ep_priv->etd[i]);
1439  }
1440  kfree(ep_priv);
1441  ep->hcpriv = NULL;
1442  }
1443 
1444  for (i = 0; i < USB_NUM_ETD; i++) {
1445  if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1446  dev_err(imx21->dev,
1447  "Active etd %d for disabled ep=%p!\n", i, ep);
1448  free_etd(imx21, i);
1449  }
1450  }
1451  free_epdmem(imx21, ep);
1452  spin_unlock_irqrestore(&imx21->lock, flags);
1453 }
1454 
1455 /* =========================================== */
1456 /* Hub handling */
1457 /* =========================================== */
1458 
1459 static int get_hub_descriptor(struct usb_hcd *hcd,
1460  struct usb_hub_descriptor *desc)
1461 {
1462  struct imx21 *imx21 = hcd_to_imx21(hcd);
1463  desc->bDescriptorType = 0x29; /* HUB descriptor */
1464  desc->bHubContrCurrent = 0;
1465 
1466  desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1468  desc->bDescLength = 9;
1469  desc->bPwrOn2PwrGood = 0;
1471  0x0002 | /* No power switching */
1472  0x0010 | /* No over current protection */
1473  0);
1474 
1475  desc->u.hs.DeviceRemovable[0] = 1 << 1;
1476  desc->u.hs.DeviceRemovable[1] = ~0;
1477  return 0;
1478 }
1479 
1480 static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1481 {
1482  struct imx21 *imx21 = hcd_to_imx21(hcd);
1483  int ports;
1484  int changed = 0;
1485  int i;
1486  unsigned long flags;
1487 
1488  spin_lock_irqsave(&imx21->lock, flags);
1489  ports = readl(imx21->regs + USBH_ROOTHUBA)
1491  if (ports > 7) {
1492  ports = 7;
1493  dev_err(imx21->dev, "ports %d > 7\n", ports);
1494  }
1495  for (i = 0; i < ports; i++) {
1496  if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1502 
1503  changed = 1;
1504  buf[0] |= 1 << (i + 1);
1505  }
1506  }
1507  spin_unlock_irqrestore(&imx21->lock, flags);
1508 
1509  if (changed)
1510  dev_info(imx21->dev, "Hub status changed\n");
1511  return changed;
1512 }
1513 
1514 static int imx21_hc_hub_control(struct usb_hcd *hcd,
1515  u16 typeReq,
1516  u16 wValue, u16 wIndex, char *buf, u16 wLength)
1517 {
1518  struct imx21 *imx21 = hcd_to_imx21(hcd);
1519  int rc = 0;
1520  u32 status_write = 0;
1521 
1522  switch (typeReq) {
1523  case ClearHubFeature:
1524  dev_dbg(imx21->dev, "ClearHubFeature\n");
1525  switch (wValue) {
1526  case C_HUB_OVER_CURRENT:
1527  dev_dbg(imx21->dev, " OVER_CURRENT\n");
1528  break;
1529  case C_HUB_LOCAL_POWER:
1530  dev_dbg(imx21->dev, " LOCAL_POWER\n");
1531  break;
1532  default:
1533  dev_dbg(imx21->dev, " unknown\n");
1534  rc = -EINVAL;
1535  break;
1536  }
1537  break;
1538 
1539  case ClearPortFeature:
1540  dev_dbg(imx21->dev, "ClearPortFeature\n");
1541  switch (wValue) {
1542  case USB_PORT_FEAT_ENABLE:
1543  dev_dbg(imx21->dev, " ENABLE\n");
1544  status_write = USBH_PORTSTAT_CURCONST;
1545  break;
1546  case USB_PORT_FEAT_SUSPEND:
1547  dev_dbg(imx21->dev, " SUSPEND\n");
1548  status_write = USBH_PORTSTAT_PRTOVRCURI;
1549  break;
1550  case USB_PORT_FEAT_POWER:
1551  dev_dbg(imx21->dev, " POWER\n");
1552  status_write = USBH_PORTSTAT_LSDEVCON;
1553  break;
1555  dev_dbg(imx21->dev, " C_ENABLE\n");
1556  status_write = USBH_PORTSTAT_PRTENBLSC;
1557  break;
1559  dev_dbg(imx21->dev, " C_SUSPEND\n");
1560  status_write = USBH_PORTSTAT_PRTSTATSC;
1561  break;
1563  dev_dbg(imx21->dev, " C_CONNECTION\n");
1564  status_write = USBH_PORTSTAT_CONNECTSC;
1565  break;
1567  dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
1568  status_write = USBH_PORTSTAT_OVRCURIC;
1569  break;
1570  case USB_PORT_FEAT_C_RESET:
1571  dev_dbg(imx21->dev, " C_RESET\n");
1572  status_write = USBH_PORTSTAT_PRTRSTSC;
1573  break;
1574  default:
1575  dev_dbg(imx21->dev, " unknown\n");
1576  rc = -EINVAL;
1577  break;
1578  }
1579 
1580  break;
1581 
1582  case GetHubDescriptor:
1583  dev_dbg(imx21->dev, "GetHubDescriptor\n");
1584  rc = get_hub_descriptor(hcd, (void *)buf);
1585  break;
1586 
1587  case GetHubStatus:
1588  dev_dbg(imx21->dev, " GetHubStatus\n");
1589  *(__le32 *) buf = 0;
1590  break;
1591 
1592  case GetPortStatus:
1593  dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1594  wIndex, USBH_PORTSTAT(wIndex - 1));
1595  *(__le32 *) buf = readl(imx21->regs +
1596  USBH_PORTSTAT(wIndex - 1));
1597  break;
1598 
1599  case SetHubFeature:
1600  dev_dbg(imx21->dev, "SetHubFeature\n");
1601  switch (wValue) {
1602  case C_HUB_OVER_CURRENT:
1603  dev_dbg(imx21->dev, " OVER_CURRENT\n");
1604  break;
1605 
1606  case C_HUB_LOCAL_POWER:
1607  dev_dbg(imx21->dev, " LOCAL_POWER\n");
1608  break;
1609  default:
1610  dev_dbg(imx21->dev, " unknown\n");
1611  rc = -EINVAL;
1612  break;
1613  }
1614 
1615  break;
1616 
1617  case SetPortFeature:
1618  dev_dbg(imx21->dev, "SetPortFeature\n");
1619  switch (wValue) {
1620  case USB_PORT_FEAT_SUSPEND:
1621  dev_dbg(imx21->dev, " SUSPEND\n");
1622  status_write = USBH_PORTSTAT_PRTSUSPST;
1623  break;
1624  case USB_PORT_FEAT_POWER:
1625  dev_dbg(imx21->dev, " POWER\n");
1626  status_write = USBH_PORTSTAT_PRTPWRST;
1627  break;
1628  case USB_PORT_FEAT_RESET:
1629  dev_dbg(imx21->dev, " RESET\n");
1630  status_write = USBH_PORTSTAT_PRTRSTST;
1631  break;
1632  default:
1633  dev_dbg(imx21->dev, " unknown\n");
1634  rc = -EINVAL;
1635  break;
1636  }
1637  break;
1638 
1639  default:
1640  dev_dbg(imx21->dev, " unknown\n");
1641  rc = -EINVAL;
1642  break;
1643  }
1644 
1645  if (status_write)
1646  writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1647  return rc;
1648 }
1649 
1650 /* =========================================== */
1651 /* Host controller management */
1652 /* =========================================== */
1653 
1654 static int imx21_hc_reset(struct usb_hcd *hcd)
1655 {
1656  struct imx21 *imx21 = hcd_to_imx21(hcd);
1657  unsigned long timeout;
1658  unsigned long flags;
1659 
1660  spin_lock_irqsave(&imx21->lock, flags);
1661 
1662  /* Reset the Host controller modules */
1665  imx21->regs + USBOTG_RST_CTRL);
1666 
1667  /* Wait for reset to finish */
1668  timeout = jiffies + HZ;
1669  while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1670  if (time_after(jiffies, timeout)) {
1671  spin_unlock_irqrestore(&imx21->lock, flags);
1672  dev_err(imx21->dev, "timeout waiting for reset\n");
1673  return -ETIMEDOUT;
1674  }
1675  spin_unlock_irq(&imx21->lock);
1677  spin_lock_irq(&imx21->lock);
1678  }
1679  spin_unlock_irqrestore(&imx21->lock, flags);
1680  return 0;
1681 }
1682 
1683 static int __devinit imx21_hc_start(struct usb_hcd *hcd)
1684 {
1685  struct imx21 *imx21 = hcd_to_imx21(hcd);
1686  unsigned long flags;
1687  int i, j;
1688  u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1689  u32 usb_control = 0;
1690 
1691  hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1693  hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1695 
1696  if (imx21->pdata->host1_txenoe)
1697  usb_control |= USBCTRL_HOST1_TXEN_OE;
1698 
1699  if (!imx21->pdata->host1_xcverless)
1700  usb_control |= USBCTRL_HOST1_BYP_TLL;
1701 
1702  if (imx21->pdata->otg_ext_xcvr)
1703  usb_control |= USBCTRL_OTC_RCV_RXDP;
1704 
1705 
1706  spin_lock_irqsave(&imx21->lock, flags);
1707 
1709  imx21->regs + USBOTG_CLK_CTRL);
1710  writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1711  writel(usb_control, imx21->regs + USBCTRL);
1713  imx21->regs + USB_MISCCONTROL);
1714 
1715  /* Clear the ETDs */
1716  for (i = 0; i < USB_NUM_ETD; i++)
1717  for (j = 0; j < 4; j++)
1718  etd_writel(imx21, i, j, 0);
1719 
1720  /* Take the HC out of reset */
1722  imx21->regs + USBH_HOST_CTRL);
1723 
1724  /* Enable ports */
1725  if (imx21->pdata->enable_otg_host)
1727  imx21->regs + USBH_PORTSTAT(0));
1728 
1729  if (imx21->pdata->enable_host1)
1731  imx21->regs + USBH_PORTSTAT(1));
1732 
1733  if (imx21->pdata->enable_host2)
1735  imx21->regs + USBH_PORTSTAT(2));
1736 
1737 
1738  hcd->state = HC_STATE_RUNNING;
1739 
1740  /* Enable host controller interrupts */
1741  set_register_bits(imx21, USBH_SYSIEN,
1742  USBH_SYSIEN_HERRINT |
1743  USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1744  set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1745 
1746  spin_unlock_irqrestore(&imx21->lock, flags);
1747 
1748  return 0;
1749 }
1750 
1751 static void imx21_hc_stop(struct usb_hcd *hcd)
1752 {
1753  struct imx21 *imx21 = hcd_to_imx21(hcd);
1754  unsigned long flags;
1755 
1756  spin_lock_irqsave(&imx21->lock, flags);
1757 
1758  writel(0, imx21->regs + USBH_SYSIEN);
1759  clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1760  clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1761  USBOTG_CLK_CTRL);
1762  spin_unlock_irqrestore(&imx21->lock, flags);
1763 }
1764 
1765 /* =========================================== */
1766 /* Driver glue */
1767 /* =========================================== */
1768 
1769 static struct hc_driver imx21_hc_driver = {
1770  .description = hcd_name,
1771  .product_desc = "IMX21 USB Host Controller",
1772  .hcd_priv_size = sizeof(struct imx21),
1773 
1774  .flags = HCD_USB11,
1775  .irq = imx21_irq,
1776 
1777  .reset = imx21_hc_reset,
1778  .start = imx21_hc_start,
1779  .stop = imx21_hc_stop,
1780 
1781  /* I/O requests */
1782  .urb_enqueue = imx21_hc_urb_enqueue,
1783  .urb_dequeue = imx21_hc_urb_dequeue,
1784  .endpoint_disable = imx21_hc_endpoint_disable,
1785 
1786  /* scheduling support */
1787  .get_frame_number = imx21_hc_get_frame,
1788 
1789  /* Root hub support */
1790  .hub_status_data = imx21_hc_hub_status_data,
1791  .hub_control = imx21_hc_hub_control,
1792 
1793 };
1794 
1795 static struct mx21_usbh_platform_data default_pdata = {
1796  .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1797  .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1798  .enable_host1 = 1,
1799  .enable_host2 = 1,
1800  .enable_otg_host = 1,
1801 
1802 };
1803 
1804 static int imx21_remove(struct platform_device *pdev)
1805 {
1806  struct usb_hcd *hcd = platform_get_drvdata(pdev);
1807  struct imx21 *imx21 = hcd_to_imx21(hcd);
1808  struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1809 
1810  remove_debug_files(imx21);
1811  usb_remove_hcd(hcd);
1812 
1813  if (res != NULL) {
1814  clk_disable_unprepare(imx21->clk);
1815  clk_put(imx21->clk);
1816  iounmap(imx21->regs);
1817  release_mem_region(res->start, resource_size(res));
1818  }
1819 
1820  kfree(hcd);
1821  return 0;
1822 }
1823 
1824 
1825 static int imx21_probe(struct platform_device *pdev)
1826 {
1827  struct usb_hcd *hcd;
1828  struct imx21 *imx21;
1829  struct resource *res;
1830  int ret;
1831  int irq;
1832 
1833  printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1834 
1835  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1836  if (!res)
1837  return -ENODEV;
1838  irq = platform_get_irq(pdev, 0);
1839  if (irq < 0)
1840  return -ENXIO;
1841 
1842  hcd = usb_create_hcd(&imx21_hc_driver,
1843  &pdev->dev, dev_name(&pdev->dev));
1844  if (hcd == NULL) {
1845  dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1846  dev_name(&pdev->dev));
1847  return -ENOMEM;
1848  }
1849 
1850  imx21 = hcd_to_imx21(hcd);
1851  imx21->hcd = hcd;
1852  imx21->dev = &pdev->dev;
1853  imx21->pdata = pdev->dev.platform_data;
1854  if (!imx21->pdata)
1855  imx21->pdata = &default_pdata;
1856 
1857  spin_lock_init(&imx21->lock);
1858  INIT_LIST_HEAD(&imx21->dmem_list);
1859  INIT_LIST_HEAD(&imx21->queue_for_etd);
1860  INIT_LIST_HEAD(&imx21->queue_for_dmem);
1861  create_debug_files(imx21);
1862 
1863  res = request_mem_region(res->start, resource_size(res), hcd_name);
1864  if (!res) {
1865  ret = -EBUSY;
1866  goto failed_request_mem;
1867  }
1868 
1869  imx21->regs = ioremap(res->start, resource_size(res));
1870  if (imx21->regs == NULL) {
1871  dev_err(imx21->dev, "Cannot map registers\n");
1872  ret = -ENOMEM;
1873  goto failed_ioremap;
1874  }
1875 
1876  /* Enable clocks source */
1877  imx21->clk = clk_get(imx21->dev, NULL);
1878  if (IS_ERR(imx21->clk)) {
1879  dev_err(imx21->dev, "no clock found\n");
1880  ret = PTR_ERR(imx21->clk);
1881  goto failed_clock_get;
1882  }
1883 
1884  ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1885  if (ret)
1886  goto failed_clock_set;
1887  ret = clk_prepare_enable(imx21->clk);
1888  if (ret)
1889  goto failed_clock_enable;
1890 
1891  dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1892  (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1893 
1894  ret = usb_add_hcd(hcd, irq, 0);
1895  if (ret != 0) {
1896  dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1897  goto failed_add_hcd;
1898  }
1899 
1900  return 0;
1901 
1902 failed_add_hcd:
1903  clk_disable_unprepare(imx21->clk);
1904 failed_clock_enable:
1905 failed_clock_set:
1906  clk_put(imx21->clk);
1907 failed_clock_get:
1908  iounmap(imx21->regs);
1909 failed_ioremap:
1910  release_mem_region(res->start, resource_size(res));
1911 failed_request_mem:
1912  remove_debug_files(imx21);
1913  usb_put_hcd(hcd);
1914  return ret;
1915 }
1916 
1917 static struct platform_driver imx21_hcd_driver = {
1918  .driver = {
1919  .name = (char *)hcd_name,
1920  },
1921  .probe = imx21_probe,
1922  .remove = imx21_remove,
1923  .suspend = NULL,
1924  .resume = NULL,
1925 };
1926 
1927 module_platform_driver(imx21_hcd_driver);
1928 
1929 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1930 MODULE_AUTHOR("Martin Fuzzey");
1931 MODULE_LICENSE("GPL");
1932 MODULE_ALIAS("platform:imx21-hcd");