Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fsl_qe_udc.c
Go to the documentation of this file.
1 /*
2  * driver/usb/gadget/fsl_qe_udc.c
3  *
4  * Copyright (c) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
5  *
6  * Xie Xiaobo <[email protected]>
7  * Li Yang <[email protected]>
8  * Based on bareboard code from Shlomi Gridish.
9  *
10  * Description:
11  * Freescle QE/CPM USB Pheripheral Controller Driver
12  * The controller can be found on MPC8360, MPC8272, and etc.
13  * MPC8360 Rev 1.1 may need QE mircocode update
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the
17  * Free Software Foundation; either version 2 of the License, or (at your
18  * option) any later version.
19  */
20 
21 #undef USB_TRACE
22 
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/ioport.h>
27 #include <linux/types.h>
28 #include <linux/errno.h>
29 #include <linux/err.h>
30 #include <linux/slab.h>
31 #include <linux/list.h>
32 #include <linux/interrupt.h>
33 #include <linux/io.h>
34 #include <linux/moduleparam.h>
35 #include <linux/of_address.h>
36 #include <linux/of_platform.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/usb/ch9.h>
39 #include <linux/usb/gadget.h>
40 #include <linux/usb/otg.h>
41 #include <asm/qe.h>
42 #include <asm/cpm.h>
43 #include <asm/dma.h>
44 #include <asm/reg.h>
45 #include "fsl_qe_udc.h"
46 
47 #define DRIVER_DESC "Freescale QE/CPM USB Device Controller driver"
48 #define DRIVER_AUTHOR "Xie XiaoBo"
49 #define DRIVER_VERSION "1.0"
50 
51 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
52 
53 static const char driver_name[] = "fsl_qe_udc";
54 static const char driver_desc[] = DRIVER_DESC;
55 
56 /*ep name is important in gadget, it should obey the convention of ep_match()*/
57 static const char *const ep_name[] = {
58  "ep0-control", /* everyone has ep0 */
59  /* 3 configurable endpoints */
60  "ep1",
61  "ep2",
62  "ep3",
63 };
64 
65 static struct usb_endpoint_descriptor qe_ep0_desc = {
66  .bLength = USB_DT_ENDPOINT_SIZE,
67  .bDescriptorType = USB_DT_ENDPOINT,
68 
69  .bEndpointAddress = 0,
70  .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
71  .wMaxPacketSize = USB_MAX_CTRL_PAYLOAD,
72 };
73 
74 /********************************************************************
75  * Internal Used Function Start
76 ********************************************************************/
77 /*-----------------------------------------------------------------
78  * done() - retire a request; caller blocked irqs
79  *--------------------------------------------------------------*/
80 static void done(struct qe_ep *ep, struct qe_req *req, int status)
81 {
82  struct qe_udc *udc = ep->udc;
83  unsigned char stopped = ep->stopped;
84 
85  /* the req->queue pointer is used by ep_queue() func, in which
86  * the request will be added into a udc_ep->queue 'd tail
87  * so here the req will be dropped from the ep->queue
88  */
89  list_del_init(&req->queue);
90 
91  /* req.status should be set as -EINPROGRESS in ep_queue() */
92  if (req->req.status == -EINPROGRESS)
93  req->req.status = status;
94  else
95  status = req->req.status;
96 
97  if (req->mapped) {
98  dma_unmap_single(udc->gadget.dev.parent,
99  req->req.dma, req->req.length,
100  ep_is_in(ep)
101  ? DMA_TO_DEVICE
102  : DMA_FROM_DEVICE);
103  req->req.dma = DMA_ADDR_INVALID;
104  req->mapped = 0;
105  } else
106  dma_sync_single_for_cpu(udc->gadget.dev.parent,
107  req->req.dma, req->req.length,
108  ep_is_in(ep)
109  ? DMA_TO_DEVICE
110  : DMA_FROM_DEVICE);
111 
112  if (status && (status != -ESHUTDOWN))
113  dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n",
114  ep->ep.name, &req->req, status,
115  req->req.actual, req->req.length);
116 
117  /* don't modify queue heads during completion callback */
118  ep->stopped = 1;
119  spin_unlock(&udc->lock);
120 
121  /* this complete() should a func implemented by gadget layer,
122  * eg fsg->bulk_in_complete() */
123  if (req->req.complete)
124  req->req.complete(&ep->ep, &req->req);
125 
126  spin_lock(&udc->lock);
127 
128  ep->stopped = stopped;
129 }
130 
131 /*-----------------------------------------------------------------
132  * nuke(): delete all requests related to this ep
133  *--------------------------------------------------------------*/
134 static void nuke(struct qe_ep *ep, int status)
135 {
136  /* Whether this eq has request linked */
137  while (!list_empty(&ep->queue)) {
138  struct qe_req *req = NULL;
139  req = list_entry(ep->queue.next, struct qe_req, queue);
140 
141  done(ep, req, status);
142  }
143 }
144 
145 /*---------------------------------------------------------------------------*
146  * USB and Endpoint manipulate process, include parameter and register *
147  *---------------------------------------------------------------------------*/
148 /* @value: 1--set stall 0--clean stall */
149 static int qe_eprx_stall_change(struct qe_ep *ep, int value)
150 {
151  u16 tem_usep;
152  u8 epnum = ep->epnum;
153  struct qe_udc *udc = ep->udc;
154 
155  tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
156  tem_usep = tem_usep & ~USB_RHS_MASK;
157  if (value == 1)
158  tem_usep |= USB_RHS_STALL;
159  else if (ep->dir == USB_DIR_IN)
160  tem_usep |= USB_RHS_IGNORE_OUT;
161 
162  out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
163  return 0;
164 }
165 
166 static int qe_eptx_stall_change(struct qe_ep *ep, int value)
167 {
168  u16 tem_usep;
169  u8 epnum = ep->epnum;
170  struct qe_udc *udc = ep->udc;
171 
172  tem_usep = in_be16(&udc->usb_regs->usb_usep[epnum]);
173  tem_usep = tem_usep & ~USB_THS_MASK;
174  if (value == 1)
175  tem_usep |= USB_THS_STALL;
176  else if (ep->dir == USB_DIR_OUT)
177  tem_usep |= USB_THS_IGNORE_IN;
178 
179  out_be16(&udc->usb_regs->usb_usep[epnum], tem_usep);
180 
181  return 0;
182 }
183 
184 static int qe_ep0_stall(struct qe_udc *udc)
185 {
186  qe_eptx_stall_change(&udc->eps[0], 1);
187  qe_eprx_stall_change(&udc->eps[0], 1);
188  udc->ep0_state = WAIT_FOR_SETUP;
189  udc->ep0_dir = 0;
190  return 0;
191 }
192 
193 static int qe_eprx_nack(struct qe_ep *ep)
194 {
195  u8 epnum = ep->epnum;
196  struct qe_udc *udc = ep->udc;
197 
198  if (ep->state == EP_STATE_IDLE) {
199  /* Set the ep's nack */
200  clrsetbits_be16(&udc->usb_regs->usb_usep[epnum],
202 
203  /* Mask Rx and Busy interrupts */
204  clrbits16(&udc->usb_regs->usb_usbmr,
206 
207  ep->state = EP_STATE_NACK;
208  }
209  return 0;
210 }
211 
212 static int qe_eprx_normal(struct qe_ep *ep)
213 {
214  struct qe_udc *udc = ep->udc;
215 
216  if (ep->state == EP_STATE_NACK) {
217  clrsetbits_be16(&udc->usb_regs->usb_usep[ep->epnum],
219 
220  /* Unmask RX interrupts */
221  out_be16(&udc->usb_regs->usb_usber,
223  setbits16(&udc->usb_regs->usb_usbmr,
225 
226  ep->state = EP_STATE_IDLE;
227  ep->has_data = 0;
228  }
229 
230  return 0;
231 }
232 
233 static int qe_ep_cmd_stoptx(struct qe_ep *ep)
234 {
235  if (ep->udc->soc_type == PORT_CPM)
238  else
239  qe_issue_cmd(QE_USB_STOP_TX, QE_CR_SUBBLOCK_USB,
240  ep->epnum, 0);
241 
242  return 0;
243 }
244 
245 static int qe_ep_cmd_restarttx(struct qe_ep *ep)
246 {
247  if (ep->udc->soc_type == PORT_CPM)
250  else
251  qe_issue_cmd(QE_USB_RESTART_TX, QE_CR_SUBBLOCK_USB,
252  ep->epnum, 0);
253 
254  return 0;
255 }
256 
257 static int qe_ep_flushtxfifo(struct qe_ep *ep)
258 {
259  struct qe_udc *udc = ep->udc;
260  int i;
261 
262  i = (int)ep->epnum;
263 
264  qe_ep_cmd_stoptx(ep);
265  out_8(&udc->usb_regs->usb_uscom,
267  out_be16(&udc->ep_param[i]->tbptr, in_be16(&udc->ep_param[i]->tbase));
268  out_be32(&udc->ep_param[i]->tstate, 0);
269  out_be16(&udc->ep_param[i]->tbcnt, 0);
270 
271  ep->c_txbd = ep->txbase;
272  ep->n_txbd = ep->txbase;
273  qe_ep_cmd_restarttx(ep);
274  return 0;
275 }
276 
277 static int qe_ep_filltxfifo(struct qe_ep *ep)
278 {
279  struct qe_udc *udc = ep->udc;
280 
281  out_8(&udc->usb_regs->usb_uscom,
283  return 0;
284 }
285 
286 static int qe_epbds_reset(struct qe_udc *udc, int pipe_num)
287 {
288  struct qe_ep *ep;
289  u32 bdring_len;
290  struct qe_bd __iomem *bd;
291  int i;
292 
293  ep = &udc->eps[pipe_num];
294 
295  if (ep->dir == USB_DIR_OUT)
296  bdring_len = USB_BDRING_LEN_RX;
297  else
298  bdring_len = USB_BDRING_LEN;
299 
300  bd = ep->rxbase;
301  for (i = 0; i < (bdring_len - 1); i++) {
302  out_be32((u32 __iomem *)bd, R_E | R_I);
303  bd++;
304  }
305  out_be32((u32 __iomem *)bd, R_E | R_I | R_W);
306 
307  bd = ep->txbase;
308  for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
309  out_be32(&bd->buf, 0);
310  out_be32((u32 __iomem *)bd, 0);
311  bd++;
312  }
313  out_be32((u32 __iomem *)bd, T_W);
314 
315  return 0;
316 }
317 
318 static int qe_ep_reset(struct qe_udc *udc, int pipe_num)
319 {
320  struct qe_ep *ep;
321  u16 tmpusep;
322 
323  ep = &udc->eps[pipe_num];
324  tmpusep = in_be16(&udc->usb_regs->usb_usep[pipe_num]);
325  tmpusep &= ~USB_RTHS_MASK;
326 
327  switch (ep->dir) {
328  case USB_DIR_BOTH:
329  qe_ep_flushtxfifo(ep);
330  break;
331  case USB_DIR_OUT:
332  tmpusep |= USB_THS_IGNORE_IN;
333  break;
334  case USB_DIR_IN:
335  qe_ep_flushtxfifo(ep);
336  tmpusep |= USB_RHS_IGNORE_OUT;
337  break;
338  default:
339  break;
340  }
341  out_be16(&udc->usb_regs->usb_usep[pipe_num], tmpusep);
342 
343  qe_epbds_reset(udc, pipe_num);
344 
345  return 0;
346 }
347 
348 static int qe_ep_toggledata01(struct qe_ep *ep)
349 {
350  ep->data01 ^= 0x1;
351  return 0;
352 }
353 
354 static int qe_ep_bd_init(struct qe_udc *udc, unsigned char pipe_num)
355 {
356  struct qe_ep *ep = &udc->eps[pipe_num];
357  unsigned long tmp_addr = 0;
358  struct usb_ep_para __iomem *epparam;
359  int i;
360  struct qe_bd __iomem *bd;
361  int bdring_len;
362 
363  if (ep->dir == USB_DIR_OUT)
364  bdring_len = USB_BDRING_LEN_RX;
365  else
366  bdring_len = USB_BDRING_LEN;
367 
368  epparam = udc->ep_param[pipe_num];
369  /* alloc multi-ram for BD rings and set the ep parameters */
370  tmp_addr = cpm_muram_alloc(sizeof(struct qe_bd) * (bdring_len +
371  USB_BDRING_LEN_TX), QE_ALIGNMENT_OF_BD);
372  if (IS_ERR_VALUE(tmp_addr))
373  return -ENOMEM;
374 
375  out_be16(&epparam->rbase, (u16)tmp_addr);
376  out_be16(&epparam->tbase, (u16)(tmp_addr +
377  (sizeof(struct qe_bd) * bdring_len)));
378 
379  out_be16(&epparam->rbptr, in_be16(&epparam->rbase));
380  out_be16(&epparam->tbptr, in_be16(&epparam->tbase));
381 
382  ep->rxbase = cpm_muram_addr(tmp_addr);
383  ep->txbase = cpm_muram_addr(tmp_addr + (sizeof(struct qe_bd)
384  * bdring_len));
385  ep->n_rxbd = ep->rxbase;
386  ep->e_rxbd = ep->rxbase;
387  ep->n_txbd = ep->txbase;
388  ep->c_txbd = ep->txbase;
389  ep->data01 = 0; /* data0 */
390 
391  /* Init TX and RX bds */
392  bd = ep->rxbase;
393  for (i = 0; i < bdring_len - 1; i++) {
394  out_be32(&bd->buf, 0);
395  out_be32((u32 __iomem *)bd, 0);
396  bd++;
397  }
398  out_be32(&bd->buf, 0);
399  out_be32((u32 __iomem *)bd, R_W);
400 
401  bd = ep->txbase;
402  for (i = 0; i < USB_BDRING_LEN_TX - 1; i++) {
403  out_be32(&bd->buf, 0);
404  out_be32((u32 __iomem *)bd, 0);
405  bd++;
406  }
407  out_be32(&bd->buf, 0);
408  out_be32((u32 __iomem *)bd, T_W);
409 
410  return 0;
411 }
412 
413 static int qe_ep_rxbd_update(struct qe_ep *ep)
414 {
415  unsigned int size;
416  int i;
417  unsigned int tmp;
418  struct qe_bd __iomem *bd;
419  unsigned int bdring_len;
420 
421  if (ep->rxbase == NULL)
422  return -EINVAL;
423 
424  bd = ep->rxbase;
425 
426  ep->rxframe = kmalloc(sizeof(*ep->rxframe), GFP_ATOMIC);
427  if (ep->rxframe == NULL) {
428  dev_err(ep->udc->dev, "malloc rxframe failed\n");
429  return -ENOMEM;
430  }
431 
432  qe_frame_init(ep->rxframe);
433 
434  if (ep->dir == USB_DIR_OUT)
435  bdring_len = USB_BDRING_LEN_RX;
436  else
437  bdring_len = USB_BDRING_LEN;
438 
439  size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (bdring_len + 1);
440  ep->rxbuffer = kzalloc(size, GFP_ATOMIC);
441  if (ep->rxbuffer == NULL) {
442  dev_err(ep->udc->dev, "malloc rxbuffer failed,size=%d\n",
443  size);
444  kfree(ep->rxframe);
445  return -ENOMEM;
446  }
447 
448  ep->rxbuf_d = virt_to_phys((void *)ep->rxbuffer);
449  if (ep->rxbuf_d == DMA_ADDR_INVALID) {
450  ep->rxbuf_d = dma_map_single(ep->udc->gadget.dev.parent,
451  ep->rxbuffer,
452  size,
454  ep->rxbufmap = 1;
455  } else {
456  dma_sync_single_for_device(ep->udc->gadget.dev.parent,
457  ep->rxbuf_d, size,
459  ep->rxbufmap = 0;
460  }
461 
462  size = ep->ep.maxpacket + USB_CRC_SIZE + 2;
463  tmp = ep->rxbuf_d;
464  tmp = (u32)(((tmp >> 2) << 2) + 4);
465 
466  for (i = 0; i < bdring_len - 1; i++) {
467  out_be32(&bd->buf, tmp);
468  out_be32((u32 __iomem *)bd, (R_E | R_I));
469  tmp = tmp + size;
470  bd++;
471  }
472  out_be32(&bd->buf, tmp);
473  out_be32((u32 __iomem *)bd, (R_E | R_I | R_W));
474 
475  return 0;
476 }
477 
478 static int qe_ep_register_init(struct qe_udc *udc, unsigned char pipe_num)
479 {
480  struct qe_ep *ep = &udc->eps[pipe_num];
481  struct usb_ep_para __iomem *epparam;
482  u16 usep, logepnum;
483  u16 tmp;
484  u8 rtfcr = 0;
485 
486  epparam = udc->ep_param[pipe_num];
487 
488  usep = 0;
489  logepnum = (ep->ep.desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
490  usep |= (logepnum << USB_EPNUM_SHIFT);
491 
492  switch (ep->ep.desc->bmAttributes & 0x03) {
494  usep |= USB_TRANS_BULK;
495  break;
497  usep |= USB_TRANS_ISO;
498  break;
500  usep |= USB_TRANS_INT;
501  break;
502  default:
503  usep |= USB_TRANS_CTR;
504  break;
505  }
506 
507  switch (ep->dir) {
508  case USB_DIR_OUT:
509  usep |= USB_THS_IGNORE_IN;
510  break;
511  case USB_DIR_IN:
512  usep |= USB_RHS_IGNORE_OUT;
513  break;
514  default:
515  break;
516  }
517  out_be16(&udc->usb_regs->usb_usep[pipe_num], usep);
518 
519  rtfcr = 0x30;
520  out_8(&epparam->rbmr, rtfcr);
521  out_8(&epparam->tbmr, rtfcr);
522 
523  tmp = (u16)(ep->ep.maxpacket + USB_CRC_SIZE);
524  /* MRBLR must be divisble by 4 */
525  tmp = (u16)(((tmp >> 2) << 2) + 4);
526  out_be16(&epparam->mrblr, tmp);
527 
528  return 0;
529 }
530 
531 static int qe_ep_init(struct qe_udc *udc,
532  unsigned char pipe_num,
533  const struct usb_endpoint_descriptor *desc)
534 {
535  struct qe_ep *ep = &udc->eps[pipe_num];
536  unsigned long flags;
537  int reval = 0;
538  u16 max = 0;
539 
540  max = usb_endpoint_maxp(desc);
541 
542  /* check the max package size validate for this endpoint */
543  /* Refer to USB2.0 spec table 9-13,
544  */
545  if (pipe_num != 0) {
546  switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
548  if (strstr(ep->ep.name, "-iso")
549  || strstr(ep->ep.name, "-int"))
550  goto en_done;
551  switch (udc->gadget.speed) {
552  case USB_SPEED_HIGH:
553  if ((max == 128) || (max == 256) || (max == 512))
554  break;
555  default:
556  switch (max) {
557  case 4:
558  case 8:
559  case 16:
560  case 32:
561  case 64:
562  break;
563  default:
564  case USB_SPEED_LOW:
565  goto en_done;
566  }
567  }
568  break;
570  if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
571  goto en_done;
572  switch (udc->gadget.speed) {
573  case USB_SPEED_HIGH:
574  if (max <= 1024)
575  break;
576  case USB_SPEED_FULL:
577  if (max <= 64)
578  break;
579  default:
580  if (max <= 8)
581  break;
582  goto en_done;
583  }
584  break;
586  if (strstr(ep->ep.name, "-bulk")
587  || strstr(ep->ep.name, "-int"))
588  goto en_done;
589  switch (udc->gadget.speed) {
590  case USB_SPEED_HIGH:
591  if (max <= 1024)
592  break;
593  case USB_SPEED_FULL:
594  if (max <= 1023)
595  break;
596  default:
597  goto en_done;
598  }
599  break;
601  if (strstr(ep->ep.name, "-iso")
602  || strstr(ep->ep.name, "-int"))
603  goto en_done;
604  switch (udc->gadget.speed) {
605  case USB_SPEED_HIGH:
606  case USB_SPEED_FULL:
607  switch (max) {
608  case 1:
609  case 2:
610  case 4:
611  case 8:
612  case 16:
613  case 32:
614  case 64:
615  break;
616  default:
617  goto en_done;
618  }
619  case USB_SPEED_LOW:
620  switch (max) {
621  case 1:
622  case 2:
623  case 4:
624  case 8:
625  break;
626  default:
627  goto en_done;
628  }
629  default:
630  goto en_done;
631  }
632  break;
633 
634  default:
635  goto en_done;
636  }
637  } /* if ep0*/
638 
639  spin_lock_irqsave(&udc->lock, flags);
640 
641  /* initialize ep structure */
642  ep->ep.maxpacket = max;
643  ep->tm = (u8)(desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
644  ep->ep.desc = desc;
645  ep->stopped = 0;
646  ep->init = 1;
647 
648  if (pipe_num == 0) {
649  ep->dir = USB_DIR_BOTH;
650  udc->ep0_dir = USB_DIR_OUT;
651  udc->ep0_state = WAIT_FOR_SETUP;
652  } else {
653  switch (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) {
654  case USB_DIR_OUT:
655  ep->dir = USB_DIR_OUT;
656  break;
657  case USB_DIR_IN:
658  ep->dir = USB_DIR_IN;
659  default:
660  break;
661  }
662  }
663 
664  /* hardware special operation */
665  qe_ep_bd_init(udc, pipe_num);
666  if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_OUT)) {
667  reval = qe_ep_rxbd_update(ep);
668  if (reval)
669  goto en_done1;
670  }
671 
672  if ((ep->tm == USBP_TM_CTL) || (ep->dir == USB_DIR_IN)) {
673  ep->txframe = kmalloc(sizeof(*ep->txframe), GFP_ATOMIC);
674  if (ep->txframe == NULL) {
675  dev_err(udc->dev, "malloc txframe failed\n");
676  goto en_done2;
677  }
678  qe_frame_init(ep->txframe);
679  }
680 
681  qe_ep_register_init(udc, pipe_num);
682 
683  /* Now HW will be NAKing transfers to that EP,
684  * until a buffer is queued to it. */
685  spin_unlock_irqrestore(&udc->lock, flags);
686 
687  return 0;
688 en_done2:
689  kfree(ep->rxbuffer);
690  kfree(ep->rxframe);
691 en_done1:
692  spin_unlock_irqrestore(&udc->lock, flags);
693 en_done:
694  dev_err(udc->dev, "failed to initialize %s\n", ep->ep.name);
695  return -ENODEV;
696 }
697 
698 static inline void qe_usb_enable(struct qe_udc *udc)
699 {
700  setbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
701 }
702 
703 static inline void qe_usb_disable(struct qe_udc *udc)
704 {
705  clrbits8(&udc->usb_regs->usb_usmod, USB_MODE_EN);
706 }
707 
708 /*----------------------------------------------------------------------------*
709  * USB and EP basic manipulate function end *
710  *----------------------------------------------------------------------------*/
711 
712 
713 /******************************************************************************
714  UDC transmit and receive process
715  ******************************************************************************/
716 static void recycle_one_rxbd(struct qe_ep *ep)
717 {
718  u32 bdstatus;
719 
720  bdstatus = in_be32((u32 __iomem *)ep->e_rxbd);
721  bdstatus = R_I | R_E | (bdstatus & R_W);
722  out_be32((u32 __iomem *)ep->e_rxbd, bdstatus);
723 
724  if (bdstatus & R_W)
725  ep->e_rxbd = ep->rxbase;
726  else
727  ep->e_rxbd++;
728 }
729 
730 static void recycle_rxbds(struct qe_ep *ep, unsigned char stopatnext)
731 {
732  u32 bdstatus;
733  struct qe_bd __iomem *bd, *nextbd;
734  unsigned char stop = 0;
735 
736  nextbd = ep->n_rxbd;
737  bd = ep->e_rxbd;
738  bdstatus = in_be32((u32 __iomem *)bd);
739 
740  while (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK) && !stop) {
741  bdstatus = R_E | R_I | (bdstatus & R_W);
742  out_be32((u32 __iomem *)bd, bdstatus);
743 
744  if (bdstatus & R_W)
745  bd = ep->rxbase;
746  else
747  bd++;
748 
749  bdstatus = in_be32((u32 __iomem *)bd);
750  if (stopatnext && (bd == nextbd))
751  stop = 1;
752  }
753 
754  ep->e_rxbd = bd;
755 }
756 
757 static void ep_recycle_rxbds(struct qe_ep *ep)
758 {
759  struct qe_bd __iomem *bd = ep->n_rxbd;
760  u32 bdstatus;
761  u8 epnum = ep->epnum;
762  struct qe_udc *udc = ep->udc;
763 
764  bdstatus = in_be32((u32 __iomem *)bd);
765  if (!(bdstatus & R_E) && !(bdstatus & BD_LENGTH_MASK)) {
766  bd = ep->rxbase +
767  ((in_be16(&udc->ep_param[epnum]->rbptr) -
768  in_be16(&udc->ep_param[epnum]->rbase))
769  >> 3);
770  bdstatus = in_be32((u32 __iomem *)bd);
771 
772  if (bdstatus & R_W)
773  bd = ep->rxbase;
774  else
775  bd++;
776 
777  ep->e_rxbd = bd;
778  recycle_rxbds(ep, 0);
779  ep->e_rxbd = ep->n_rxbd;
780  } else
781  recycle_rxbds(ep, 1);
782 
783  if (in_be16(&udc->usb_regs->usb_usber) & USB_E_BSY_MASK)
784  out_be16(&udc->usb_regs->usb_usber, USB_E_BSY_MASK);
785 
786  if (ep->has_data <= 0 && (!list_empty(&ep->queue)))
787  qe_eprx_normal(ep);
788 
789  ep->localnack = 0;
790 }
791 
792 static void setup_received_handle(struct qe_udc *udc,
793  struct usb_ctrlrequest *setup);
794 static int qe_ep_rxframe_handle(struct qe_ep *ep);
795 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
796 /* when BD PID is setup, handle the packet */
797 static int ep0_setup_handle(struct qe_udc *udc)
798 {
799  struct qe_ep *ep = &udc->eps[0];
800  struct qe_frame *pframe;
801  unsigned int fsize;
802  u8 *cp;
803 
804  pframe = ep->rxframe;
805  if ((frame_get_info(pframe) & PID_SETUP)
806  && (udc->ep0_state == WAIT_FOR_SETUP)) {
807  fsize = frame_get_length(pframe);
808  if (unlikely(fsize != 8))
809  return -EINVAL;
810  cp = (u8 *)&udc->local_setup_buff;
811  memcpy(cp, pframe->data, fsize);
812  ep->data01 = 1;
813 
814  /* handle the usb command base on the usb_ctrlrequest */
815  setup_received_handle(udc, &udc->local_setup_buff);
816  return 0;
817  }
818  return -EINVAL;
819 }
820 
821 static int qe_ep0_rx(struct qe_udc *udc)
822 {
823  struct qe_ep *ep = &udc->eps[0];
824  struct qe_frame *pframe;
825  struct qe_bd __iomem *bd;
826  u32 bdstatus, length;
827  u32 vaddr;
828 
829  pframe = ep->rxframe;
830 
831  if (ep->dir == USB_DIR_IN) {
832  dev_err(udc->dev, "ep0 not a control endpoint\n");
833  return -EINVAL;
834  }
835 
836  bd = ep->n_rxbd;
837  bdstatus = in_be32((u32 __iomem *)bd);
838  length = bdstatus & BD_LENGTH_MASK;
839 
840  while (!(bdstatus & R_E) && length) {
841  if ((bdstatus & R_F) && (bdstatus & R_L)
842  && !(bdstatus & R_ERROR)) {
843  if (length == USB_CRC_SIZE) {
844  udc->ep0_state = WAIT_FOR_SETUP;
845  dev_vdbg(udc->dev,
846  "receive a ZLP in status phase\n");
847  } else {
848  qe_frame_clean(pframe);
849  vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
850  frame_set_data(pframe, (u8 *)vaddr);
851  frame_set_length(pframe,
852  (length - USB_CRC_SIZE));
853  frame_set_status(pframe, FRAME_OK);
854  switch (bdstatus & R_PID) {
855  case R_PID_SETUP:
856  frame_set_info(pframe, PID_SETUP);
857  break;
858  case R_PID_DATA1:
859  frame_set_info(pframe, PID_DATA1);
860  break;
861  default:
862  frame_set_info(pframe, PID_DATA0);
863  break;
864  }
865 
866  if ((bdstatus & R_PID) == R_PID_SETUP)
867  ep0_setup_handle(udc);
868  else
869  qe_ep_rxframe_handle(ep);
870  }
871  } else {
872  dev_err(udc->dev, "The receive frame with error!\n");
873  }
874 
875  /* note: don't clear the rxbd's buffer address */
876  recycle_one_rxbd(ep);
877 
878  /* Get next BD */
879  if (bdstatus & R_W)
880  bd = ep->rxbase;
881  else
882  bd++;
883 
884  bdstatus = in_be32((u32 __iomem *)bd);
885  length = bdstatus & BD_LENGTH_MASK;
886 
887  }
888 
889  ep->n_rxbd = bd;
890 
891  return 0;
892 }
893 
894 static int qe_ep_rxframe_handle(struct qe_ep *ep)
895 {
896  struct qe_frame *pframe;
897  u8 framepid = 0;
898  unsigned int fsize;
899  u8 *cp;
900  struct qe_req *req;
901 
902  pframe = ep->rxframe;
903 
904  if (frame_get_info(pframe) & PID_DATA1)
905  framepid = 0x1;
906 
907  if (framepid != ep->data01) {
908  dev_err(ep->udc->dev, "the data01 error!\n");
909  return -EIO;
910  }
911 
912  fsize = frame_get_length(pframe);
913  if (list_empty(&ep->queue)) {
914  dev_err(ep->udc->dev, "the %s have no requeue!\n", ep->name);
915  } else {
916  req = list_entry(ep->queue.next, struct qe_req, queue);
917 
918  cp = (u8 *)(req->req.buf) + req->req.actual;
919  if (cp) {
920  memcpy(cp, pframe->data, fsize);
921  req->req.actual += fsize;
922  if ((fsize < ep->ep.maxpacket) ||
923  (req->req.actual >= req->req.length)) {
924  if (ep->epnum == 0)
925  ep0_req_complete(ep->udc, req);
926  else
927  done(ep, req, 0);
928  if (list_empty(&ep->queue) && ep->epnum != 0)
929  qe_eprx_nack(ep);
930  }
931  }
932  }
933 
934  qe_ep_toggledata01(ep);
935 
936  return 0;
937 }
938 
939 static void ep_rx_tasklet(unsigned long data)
940 {
941  struct qe_udc *udc = (struct qe_udc *)data;
942  struct qe_ep *ep;
943  struct qe_frame *pframe;
944  struct qe_bd __iomem *bd;
945  unsigned long flags;
946  u32 bdstatus, length;
947  u32 vaddr, i;
948 
949  spin_lock_irqsave(&udc->lock, flags);
950 
951  for (i = 1; i < USB_MAX_ENDPOINTS; i++) {
952  ep = &udc->eps[i];
953 
954  if (ep->dir == USB_DIR_IN || ep->enable_tasklet == 0) {
955  dev_dbg(udc->dev,
956  "This is a transmit ep or disable tasklet!\n");
957  continue;
958  }
959 
960  pframe = ep->rxframe;
961  bd = ep->n_rxbd;
962  bdstatus = in_be32((u32 __iomem *)bd);
963  length = bdstatus & BD_LENGTH_MASK;
964 
965  while (!(bdstatus & R_E) && length) {
966  if (list_empty(&ep->queue)) {
967  qe_eprx_nack(ep);
968  dev_dbg(udc->dev,
969  "The rxep have noreq %d\n",
970  ep->has_data);
971  break;
972  }
973 
974  if ((bdstatus & R_F) && (bdstatus & R_L)
975  && !(bdstatus & R_ERROR)) {
976  qe_frame_clean(pframe);
977  vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
978  frame_set_data(pframe, (u8 *)vaddr);
979  frame_set_length(pframe,
980  (length - USB_CRC_SIZE));
981  frame_set_status(pframe, FRAME_OK);
982  switch (bdstatus & R_PID) {
983  case R_PID_DATA1:
984  frame_set_info(pframe, PID_DATA1);
985  break;
986  case R_PID_SETUP:
987  frame_set_info(pframe, PID_SETUP);
988  break;
989  default:
990  frame_set_info(pframe, PID_DATA0);
991  break;
992  }
993  /* handle the rx frame */
994  qe_ep_rxframe_handle(ep);
995  } else {
996  dev_err(udc->dev,
997  "error in received frame\n");
998  }
999  /* note: don't clear the rxbd's buffer address */
1000  /*clear the length */
1001  out_be32((u32 __iomem *)bd, bdstatus & BD_STATUS_MASK);
1002  ep->has_data--;
1003  if (!(ep->localnack))
1004  recycle_one_rxbd(ep);
1005 
1006  /* Get next BD */
1007  if (bdstatus & R_W)
1008  bd = ep->rxbase;
1009  else
1010  bd++;
1011 
1012  bdstatus = in_be32((u32 __iomem *)bd);
1013  length = bdstatus & BD_LENGTH_MASK;
1014  }
1015 
1016  ep->n_rxbd = bd;
1017 
1018  if (ep->localnack)
1019  ep_recycle_rxbds(ep);
1020 
1021  ep->enable_tasklet = 0;
1022  } /* for i=1 */
1023 
1024  spin_unlock_irqrestore(&udc->lock, flags);
1025 }
1026 
1027 static int qe_ep_rx(struct qe_ep *ep)
1028 {
1029  struct qe_udc *udc;
1030  struct qe_frame *pframe;
1031  struct qe_bd __iomem *bd;
1032  u16 swoffs, ucoffs, emptybds;
1033 
1034  udc = ep->udc;
1035  pframe = ep->rxframe;
1036 
1037  if (ep->dir == USB_DIR_IN) {
1038  dev_err(udc->dev, "transmit ep in rx function\n");
1039  return -EINVAL;
1040  }
1041 
1042  bd = ep->n_rxbd;
1043 
1044  swoffs = (u16)(bd - ep->rxbase);
1045  ucoffs = (u16)((in_be16(&udc->ep_param[ep->epnum]->rbptr) -
1046  in_be16(&udc->ep_param[ep->epnum]->rbase)) >> 3);
1047  if (swoffs < ucoffs)
1048  emptybds = USB_BDRING_LEN_RX - ucoffs + swoffs;
1049  else
1050  emptybds = swoffs - ucoffs;
1051 
1052  if (emptybds < MIN_EMPTY_BDS) {
1053  qe_eprx_nack(ep);
1054  ep->localnack = 1;
1055  dev_vdbg(udc->dev, "%d empty bds, send NACK\n", emptybds);
1056  }
1057  ep->has_data = USB_BDRING_LEN_RX - emptybds;
1058 
1059  if (list_empty(&ep->queue)) {
1060  qe_eprx_nack(ep);
1061  dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n",
1062  ep->has_data);
1063  return 0;
1064  }
1065 
1066  tasklet_schedule(&udc->rx_tasklet);
1067  ep->enable_tasklet = 1;
1068 
1069  return 0;
1070 }
1071 
1072 /* send data from a frame, no matter what tx_req */
1073 static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1074 {
1075  struct qe_udc *udc = ep->udc;
1076  struct qe_bd __iomem *bd;
1077  u16 saveusbmr;
1078  u32 bdstatus, pidmask;
1079  u32 paddr;
1080 
1081  if (ep->dir == USB_DIR_OUT) {
1082  dev_err(udc->dev, "receive ep passed to tx function\n");
1083  return -EINVAL;
1084  }
1085 
1086  /* Disable the Tx interrupt */
1087  saveusbmr = in_be16(&udc->usb_regs->usb_usbmr);
1088  out_be16(&udc->usb_regs->usb_usbmr,
1089  saveusbmr & ~(USB_E_TXB_MASK | USB_E_TXE_MASK));
1090 
1091  bd = ep->n_txbd;
1092  bdstatus = in_be32((u32 __iomem *)bd);
1093 
1094  if (!(bdstatus & (T_R | BD_LENGTH_MASK))) {
1095  if (frame_get_length(frame) == 0) {
1096  frame_set_data(frame, udc->nullbuf);
1097  frame_set_length(frame, 2);
1098  frame->info |= (ZLP | NO_CRC);
1099  dev_vdbg(udc->dev, "the frame size = 0\n");
1100  }
1101  paddr = virt_to_phys((void *)frame->data);
1102  out_be32(&bd->buf, paddr);
1103  bdstatus = (bdstatus&T_W);
1104  if (!(frame_get_info(frame) & NO_CRC))
1105  bdstatus |= T_R | T_I | T_L | T_TC
1106  | frame_get_length(frame);
1107  else
1108  bdstatus |= T_R | T_I | T_L | frame_get_length(frame);
1109 
1110  /* if the packet is a ZLP in status phase */
1111  if ((ep->epnum == 0) && (udc->ep0_state == DATA_STATE_NEED_ZLP))
1112  ep->data01 = 0x1;
1113 
1114  if (ep->data01) {
1115  pidmask = T_PID_DATA1;
1116  frame->info |= PID_DATA1;
1117  } else {
1118  pidmask = T_PID_DATA0;
1119  frame->info |= PID_DATA0;
1120  }
1121  bdstatus |= T_CNF;
1122  bdstatus |= pidmask;
1123  out_be32((u32 __iomem *)bd, bdstatus);
1124  qe_ep_filltxfifo(ep);
1125 
1126  /* enable the TX interrupt */
1127  out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1128 
1129  qe_ep_toggledata01(ep);
1130  if (bdstatus & T_W)
1131  ep->n_txbd = ep->txbase;
1132  else
1133  ep->n_txbd++;
1134 
1135  return 0;
1136  } else {
1137  out_be16(&udc->usb_regs->usb_usbmr, saveusbmr);
1138  dev_vdbg(udc->dev, "The tx bd is not ready!\n");
1139  return -EBUSY;
1140  }
1141 }
1142 
1143 /* when a bd was transmitted, the function can
1144  * handle the tx_req, not include ep0 */
1145 static int txcomplete(struct qe_ep *ep, unsigned char restart)
1146 {
1147  if (ep->tx_req != NULL) {
1148  struct qe_req *req = ep->tx_req;
1149  unsigned zlp = 0, last_len = 0;
1150 
1151  last_len = min_t(unsigned, req->req.length - ep->sent,
1152  ep->ep.maxpacket);
1153 
1154  if (!restart) {
1155  int asent = ep->last;
1156  ep->sent += asent;
1157  ep->last -= asent;
1158  } else {
1159  ep->last = 0;
1160  }
1161 
1162  /* zlp needed when req->re.zero is set */
1163  if (req->req.zero) {
1164  if (last_len == 0 ||
1165  (req->req.length % ep->ep.maxpacket) != 0)
1166  zlp = 0;
1167  else
1168  zlp = 1;
1169  } else
1170  zlp = 0;
1171 
1172  /* a request already were transmitted completely */
1173  if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1174  done(ep, ep->tx_req, 0);
1175  ep->tx_req = NULL;
1176  ep->last = 0;
1177  ep->sent = 0;
1178  }
1179  }
1180 
1181  /* we should gain a new tx_req fot this endpoint */
1182  if (ep->tx_req == NULL) {
1183  if (!list_empty(&ep->queue)) {
1184  ep->tx_req = list_entry(ep->queue.next, struct qe_req,
1185  queue);
1186  ep->last = 0;
1187  ep->sent = 0;
1188  }
1189  }
1190 
1191  return 0;
1192 }
1193 
1194 /* give a frame and a tx_req, send some data */
1195 static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1196 {
1197  unsigned int size;
1198  u8 *buf;
1199 
1200  qe_frame_clean(frame);
1201  size = min_t(u32, (ep->tx_req->req.length - ep->sent),
1202  ep->ep.maxpacket);
1203  buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1204  if (buf && size) {
1205  ep->last = size;
1206  ep->tx_req->req.actual += size;
1207  frame_set_data(frame, buf);
1208  frame_set_length(frame, size);
1209  frame_set_status(frame, FRAME_OK);
1210  frame_set_info(frame, 0);
1211  return qe_ep_tx(ep, frame);
1212  }
1213  return -EIO;
1214 }
1215 
1216 /* give a frame struct,send a ZLP */
1217 static int sendnulldata(struct qe_ep *ep, struct qe_frame *frame, uint infor)
1218 {
1219  struct qe_udc *udc = ep->udc;
1220 
1221  if (frame == NULL)
1222  return -ENODEV;
1223 
1224  qe_frame_clean(frame);
1225  frame_set_data(frame, (u8 *)udc->nullbuf);
1226  frame_set_length(frame, 2);
1227  frame_set_status(frame, FRAME_OK);
1228  frame_set_info(frame, (ZLP | NO_CRC | infor));
1229 
1230  return qe_ep_tx(ep, frame);
1231 }
1232 
1233 static int frame_create_tx(struct qe_ep *ep, struct qe_frame *frame)
1234 {
1235  struct qe_req *req = ep->tx_req;
1236  int reval;
1237 
1238  if (req == NULL)
1239  return -ENODEV;
1240 
1241  if ((req->req.length - ep->sent) > 0)
1242  reval = qe_usb_senddata(ep, frame);
1243  else
1244  reval = sendnulldata(ep, frame, 0);
1245 
1246  return reval;
1247 }
1248 
1249 /* if direction is DIR_IN, the status is Device->Host
1250  * if direction is DIR_OUT, the status transaction is Device<-Host
1251  * in status phase, udc create a request and gain status */
1252 static int ep0_prime_status(struct qe_udc *udc, int direction)
1253 {
1254 
1255  struct qe_ep *ep = &udc->eps[0];
1256 
1257  if (direction == USB_DIR_IN) {
1259  udc->ep0_dir = USB_DIR_IN;
1260  sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1261  } else {
1262  udc->ep0_dir = USB_DIR_OUT;
1264  }
1265 
1266  return 0;
1267 }
1268 
1269 /* a request complete in ep0, whether gadget request or udc request */
1270 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req)
1271 {
1272  struct qe_ep *ep = &udc->eps[0];
1273  /* because usb and ep's status already been set in ch9setaddress() */
1274 
1275  switch (udc->ep0_state) {
1276  case DATA_STATE_XMIT:
1277  done(ep, req, 0);
1278  /* receive status phase */
1279  if (ep0_prime_status(udc, USB_DIR_OUT))
1280  qe_ep0_stall(udc);
1281  break;
1282 
1283  case DATA_STATE_NEED_ZLP:
1284  done(ep, req, 0);
1285  udc->ep0_state = WAIT_FOR_SETUP;
1286  break;
1287 
1288  case DATA_STATE_RECV:
1289  done(ep, req, 0);
1290  /* send status phase */
1291  if (ep0_prime_status(udc, USB_DIR_IN))
1292  qe_ep0_stall(udc);
1293  break;
1294 
1295  case WAIT_FOR_OUT_STATUS:
1296  done(ep, req, 0);
1297  udc->ep0_state = WAIT_FOR_SETUP;
1298  break;
1299 
1300  case WAIT_FOR_SETUP:
1301  dev_vdbg(udc->dev, "Unexpected interrupt\n");
1302  break;
1303 
1304  default:
1305  qe_ep0_stall(udc);
1306  break;
1307  }
1308 }
1309 
1310 static int ep0_txcomplete(struct qe_ep *ep, unsigned char restart)
1311 {
1312  struct qe_req *tx_req = NULL;
1313  struct qe_frame *frame = ep->txframe;
1314 
1315  if ((frame_get_info(frame) & (ZLP | NO_REQ)) == (ZLP | NO_REQ)) {
1316  if (!restart)
1317  ep->udc->ep0_state = WAIT_FOR_SETUP;
1318  else
1319  sendnulldata(ep, ep->txframe, SETUP_STATUS | NO_REQ);
1320  return 0;
1321  }
1322 
1323  tx_req = ep->tx_req;
1324  if (tx_req != NULL) {
1325  if (!restart) {
1326  int asent = ep->last;
1327  ep->sent += asent;
1328  ep->last -= asent;
1329  } else {
1330  ep->last = 0;
1331  }
1332 
1333  /* a request already were transmitted completely */
1334  if ((ep->tx_req->req.length - ep->sent) <= 0) {
1335  ep->tx_req->req.actual = (unsigned int)ep->sent;
1336  ep0_req_complete(ep->udc, ep->tx_req);
1337  ep->tx_req = NULL;
1338  ep->last = 0;
1339  ep->sent = 0;
1340  }
1341  } else {
1342  dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n");
1343  }
1344 
1345  return 0;
1346 }
1347 
1348 static int ep0_txframe_handle(struct qe_ep *ep)
1349 {
1350  /* if have error, transmit again */
1351  if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1352  qe_ep_flushtxfifo(ep);
1353  dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1354  if (frame_get_info(ep->txframe) & PID_DATA0)
1355  ep->data01 = 0;
1356  else
1357  ep->data01 = 1;
1358 
1359  ep0_txcomplete(ep, 1);
1360  } else
1361  ep0_txcomplete(ep, 0);
1362 
1363  frame_create_tx(ep, ep->txframe);
1364  return 0;
1365 }
1366 
1367 static int qe_ep0_txconf(struct qe_ep *ep)
1368 {
1369  struct qe_bd __iomem *bd;
1370  struct qe_frame *pframe;
1371  u32 bdstatus;
1372 
1373  bd = ep->c_txbd;
1374  bdstatus = in_be32((u32 __iomem *)bd);
1375  while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1376  pframe = ep->txframe;
1377 
1378  /* clear and recycle the BD */
1379  out_be32((u32 __iomem *)bd, bdstatus & T_W);
1380  out_be32(&bd->buf, 0);
1381  if (bdstatus & T_W)
1382  ep->c_txbd = ep->txbase;
1383  else
1384  ep->c_txbd++;
1385 
1386  if (ep->c_txbd == ep->n_txbd) {
1387  if (bdstatus & DEVICE_T_ERROR) {
1388  frame_set_status(pframe, FRAME_ERROR);
1389  if (bdstatus & T_TO)
1390  pframe->status |= TX_ER_TIMEOUT;
1391  if (bdstatus & T_UN)
1392  pframe->status |= TX_ER_UNDERUN;
1393  }
1394  ep0_txframe_handle(ep);
1395  }
1396 
1397  bd = ep->c_txbd;
1398  bdstatus = in_be32((u32 __iomem *)bd);
1399  }
1400 
1401  return 0;
1402 }
1403 
1404 static int ep_txframe_handle(struct qe_ep *ep)
1405 {
1406  if (frame_get_status(ep->txframe) & FRAME_ERROR) {
1407  qe_ep_flushtxfifo(ep);
1408  dev_vdbg(ep->udc->dev, "The EP0 transmit data have error!\n");
1409  if (frame_get_info(ep->txframe) & PID_DATA0)
1410  ep->data01 = 0;
1411  else
1412  ep->data01 = 1;
1413 
1414  txcomplete(ep, 1);
1415  } else
1416  txcomplete(ep, 0);
1417 
1418  frame_create_tx(ep, ep->txframe); /* send the data */
1419  return 0;
1420 }
1421 
1422 /* confirm the already trainsmited bd */
1423 static int qe_ep_txconf(struct qe_ep *ep)
1424 {
1425  struct qe_bd __iomem *bd;
1426  struct qe_frame *pframe = NULL;
1427  u32 bdstatus;
1428  unsigned char breakonrxinterrupt = 0;
1429 
1430  bd = ep->c_txbd;
1431  bdstatus = in_be32((u32 __iomem *)bd);
1432  while (!(bdstatus & T_R) && (bdstatus & ~T_W)) {
1433  pframe = ep->txframe;
1434  if (bdstatus & DEVICE_T_ERROR) {
1435  frame_set_status(pframe, FRAME_ERROR);
1436  if (bdstatus & T_TO)
1437  pframe->status |= TX_ER_TIMEOUT;
1438  if (bdstatus & T_UN)
1439  pframe->status |= TX_ER_UNDERUN;
1440  }
1441 
1442  /* clear and recycle the BD */
1443  out_be32((u32 __iomem *)bd, bdstatus & T_W);
1444  out_be32(&bd->buf, 0);
1445  if (bdstatus & T_W)
1446  ep->c_txbd = ep->txbase;
1447  else
1448  ep->c_txbd++;
1449 
1450  /* handle the tx frame */
1451  ep_txframe_handle(ep);
1452  bd = ep->c_txbd;
1453  bdstatus = in_be32((u32 __iomem *)bd);
1454  }
1455  if (breakonrxinterrupt)
1456  return -EIO;
1457  else
1458  return 0;
1459 }
1460 
1461 /* Add a request in queue, and try to transmit a packet */
1462 static int ep_req_send(struct qe_ep *ep, struct qe_req *req)
1463 {
1464  int reval = 0;
1465 
1466  if (ep->tx_req == NULL) {
1467  ep->sent = 0;
1468  ep->last = 0;
1469  txcomplete(ep, 0); /* can gain a new tx_req */
1470  reval = frame_create_tx(ep, ep->txframe);
1471  }
1472  return reval;
1473 }
1474 
1475 /* Maybe this is a good ideal */
1476 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req)
1477 {
1478  struct qe_udc *udc = ep->udc;
1479  struct qe_frame *pframe = NULL;
1480  struct qe_bd __iomem *bd;
1481  u32 bdstatus, length;
1482  u32 vaddr, fsize;
1483  u8 *cp;
1484  u8 finish_req = 0;
1485  u8 framepid;
1486 
1487  if (list_empty(&ep->queue)) {
1488  dev_vdbg(udc->dev, "the req already finish!\n");
1489  return 0;
1490  }
1491  pframe = ep->rxframe;
1492 
1493  bd = ep->n_rxbd;
1494  bdstatus = in_be32((u32 __iomem *)bd);
1495  length = bdstatus & BD_LENGTH_MASK;
1496 
1497  while (!(bdstatus & R_E) && length) {
1498  if (finish_req)
1499  break;
1500  if ((bdstatus & R_F) && (bdstatus & R_L)
1501  && !(bdstatus & R_ERROR)) {
1502  qe_frame_clean(pframe);
1503  vaddr = (u32)phys_to_virt(in_be32(&bd->buf));
1504  frame_set_data(pframe, (u8 *)vaddr);
1505  frame_set_length(pframe, (length - USB_CRC_SIZE));
1506  frame_set_status(pframe, FRAME_OK);
1507  switch (bdstatus & R_PID) {
1508  case R_PID_DATA1:
1509  frame_set_info(pframe, PID_DATA1); break;
1510  default:
1511  frame_set_info(pframe, PID_DATA0); break;
1512  }
1513  /* handle the rx frame */
1514 
1515  if (frame_get_info(pframe) & PID_DATA1)
1516  framepid = 0x1;
1517  else
1518  framepid = 0;
1519 
1520  if (framepid != ep->data01) {
1521  dev_vdbg(udc->dev, "the data01 error!\n");
1522  } else {
1523  fsize = frame_get_length(pframe);
1524 
1525  cp = (u8 *)(req->req.buf) + req->req.actual;
1526  if (cp) {
1527  memcpy(cp, pframe->data, fsize);
1528  req->req.actual += fsize;
1529  if ((fsize < ep->ep.maxpacket)
1530  || (req->req.actual >=
1531  req->req.length)) {
1532  finish_req = 1;
1533  done(ep, req, 0);
1534  if (list_empty(&ep->queue))
1535  qe_eprx_nack(ep);
1536  }
1537  }
1538  qe_ep_toggledata01(ep);
1539  }
1540  } else {
1541  dev_err(udc->dev, "The receive frame with error!\n");
1542  }
1543 
1544  /* note: don't clear the rxbd's buffer address *
1545  * only Clear the length */
1546  out_be32((u32 __iomem *)bd, (bdstatus & BD_STATUS_MASK));
1547  ep->has_data--;
1548 
1549  /* Get next BD */
1550  if (bdstatus & R_W)
1551  bd = ep->rxbase;
1552  else
1553  bd++;
1554 
1555  bdstatus = in_be32((u32 __iomem *)bd);
1556  length = bdstatus & BD_LENGTH_MASK;
1557  }
1558 
1559  ep->n_rxbd = bd;
1560  ep_recycle_rxbds(ep);
1561 
1562  return 0;
1563 }
1564 
1565 /* only add the request in queue */
1566 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req)
1567 {
1568  if (ep->state == EP_STATE_NACK) {
1569  if (ep->has_data <= 0) {
1570  /* Enable rx and unmask rx interrupt */
1571  qe_eprx_normal(ep);
1572  } else {
1573  /* Copy the exist BD data */
1574  ep_req_rx(ep, req);
1575  }
1576  }
1577 
1578  return 0;
1579 }
1580 
1581 /********************************************************************
1582  Internal Used Function End
1583 ********************************************************************/
1584 
1585 /*-----------------------------------------------------------------------
1586  Endpoint Management Functions For Gadget
1587  -----------------------------------------------------------------------*/
1588 static int qe_ep_enable(struct usb_ep *_ep,
1589  const struct usb_endpoint_descriptor *desc)
1590 {
1591  struct qe_udc *udc;
1592  struct qe_ep *ep;
1593  int retval = 0;
1594  unsigned char epnum;
1595 
1596  ep = container_of(_ep, struct qe_ep, ep);
1597 
1598  /* catch various bogus parameters */
1599  if (!_ep || !desc || _ep->name == ep_name[0] ||
1600  (desc->bDescriptorType != USB_DT_ENDPOINT))
1601  return -EINVAL;
1602 
1603  udc = ep->udc;
1604  if (!udc->driver || (udc->gadget.speed == USB_SPEED_UNKNOWN))
1605  return -ESHUTDOWN;
1606 
1607  epnum = (u8)desc->bEndpointAddress & 0xF;
1608 
1609  retval = qe_ep_init(udc, epnum, desc);
1610  if (retval != 0) {
1612  dev_dbg(udc->dev, "enable ep%d failed\n", ep->epnum);
1613  return -EINVAL;
1614  }
1615  dev_dbg(udc->dev, "enable ep%d successful\n", ep->epnum);
1616  return 0;
1617 }
1618 
1619 static int qe_ep_disable(struct usb_ep *_ep)
1620 {
1621  struct qe_udc *udc;
1622  struct qe_ep *ep;
1623  unsigned long flags;
1624  unsigned int size;
1625 
1626  ep = container_of(_ep, struct qe_ep, ep);
1627  udc = ep->udc;
1628 
1629  if (!_ep || !ep->ep.desc) {
1630  dev_dbg(udc->dev, "%s not enabled\n", _ep ? ep->ep.name : NULL);
1631  return -EINVAL;
1632  }
1633 
1634  spin_lock_irqsave(&udc->lock, flags);
1635  /* Nuke all pending requests (does flush) */
1636  nuke(ep, -ESHUTDOWN);
1637  ep->ep.desc = NULL;
1638  ep->stopped = 1;
1639  ep->tx_req = NULL;
1640  qe_ep_reset(udc, ep->epnum);
1641  spin_unlock_irqrestore(&udc->lock, flags);
1642 
1644 
1645  if (ep->dir == USB_DIR_OUT)
1646  size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1647  (USB_BDRING_LEN_RX + 1);
1648  else
1649  size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) *
1650  (USB_BDRING_LEN + 1);
1651 
1652  if (ep->dir != USB_DIR_IN) {
1653  kfree(ep->rxframe);
1654  if (ep->rxbufmap) {
1655  dma_unmap_single(udc->gadget.dev.parent,
1656  ep->rxbuf_d, size,
1657  DMA_FROM_DEVICE);
1658  ep->rxbuf_d = DMA_ADDR_INVALID;
1659  } else {
1661  udc->gadget.dev.parent,
1662  ep->rxbuf_d, size,
1663  DMA_FROM_DEVICE);
1664  }
1665  kfree(ep->rxbuffer);
1666  }
1667 
1668  if (ep->dir != USB_DIR_OUT)
1669  kfree(ep->txframe);
1670 
1671  dev_dbg(udc->dev, "disabled %s OK\n", _ep->name);
1672  return 0;
1673 }
1674 
1675 static struct usb_request *qe_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1676 {
1677  struct qe_req *req;
1678 
1679  req = kzalloc(sizeof(*req), gfp_flags);
1680  if (!req)
1681  return NULL;
1682 
1683  req->req.dma = DMA_ADDR_INVALID;
1684 
1685  INIT_LIST_HEAD(&req->queue);
1686 
1687  return &req->req;
1688 }
1689 
1690 static void qe_free_request(struct usb_ep *_ep, struct usb_request *_req)
1691 {
1692  struct qe_req *req;
1693 
1694  req = container_of(_req, struct qe_req, req);
1695 
1696  if (_req)
1697  kfree(req);
1698 }
1699 
1700 static int __qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req)
1701 {
1702  struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1703  struct qe_req *req = container_of(_req, struct qe_req, req);
1704  struct qe_udc *udc;
1705  int reval;
1706 
1707  udc = ep->udc;
1708  /* catch various bogus parameters */
1709  if (!_req || !req->req.complete || !req->req.buf
1710  || !list_empty(&req->queue)) {
1711  dev_dbg(udc->dev, "bad params\n");
1712  return -EINVAL;
1713  }
1714  if (!_ep || (!ep->ep.desc && ep_index(ep))) {
1715  dev_dbg(udc->dev, "bad ep\n");
1716  return -EINVAL;
1717  }
1718 
1719  if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
1720  return -ESHUTDOWN;
1721 
1722  req->ep = ep;
1723 
1724  /* map virtual address to hardware */
1725  if (req->req.dma == DMA_ADDR_INVALID) {
1726  req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1727  req->req.buf,
1728  req->req.length,
1729  ep_is_in(ep)
1730  ? DMA_TO_DEVICE :
1731  DMA_FROM_DEVICE);
1732  req->mapped = 1;
1733  } else {
1734  dma_sync_single_for_device(ep->udc->gadget.dev.parent,
1735  req->req.dma, req->req.length,
1736  ep_is_in(ep)
1737  ? DMA_TO_DEVICE :
1738  DMA_FROM_DEVICE);
1739  req->mapped = 0;
1740  }
1741 
1742  req->req.status = -EINPROGRESS;
1743  req->req.actual = 0;
1744 
1745  list_add_tail(&req->queue, &ep->queue);
1746  dev_vdbg(udc->dev, "gadget have request in %s! %d\n",
1747  ep->name, req->req.length);
1748 
1749  /* push the request to device */
1750  if (ep_is_in(ep))
1751  reval = ep_req_send(ep, req);
1752 
1753  /* EP0 */
1754  if (ep_index(ep) == 0 && req->req.length > 0) {
1755  if (ep_is_in(ep))
1756  udc->ep0_state = DATA_STATE_XMIT;
1757  else
1758  udc->ep0_state = DATA_STATE_RECV;
1759  }
1760 
1761  if (ep->dir == USB_DIR_OUT)
1762  reval = ep_req_receive(ep, req);
1763 
1764  return 0;
1765 }
1766 
1767 /* queues (submits) an I/O request to an endpoint */
1768 static int qe_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
1769  gfp_t gfp_flags)
1770 {
1771  struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1772  struct qe_udc *udc = ep->udc;
1773  unsigned long flags;
1774  int ret;
1775 
1776  spin_lock_irqsave(&udc->lock, flags);
1777  ret = __qe_ep_queue(_ep, _req);
1778  spin_unlock_irqrestore(&udc->lock, flags);
1779  return ret;
1780 }
1781 
1782 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
1783 static int qe_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1784 {
1785  struct qe_ep *ep = container_of(_ep, struct qe_ep, ep);
1786  struct qe_req *req;
1787  unsigned long flags;
1788 
1789  if (!_ep || !_req)
1790  return -EINVAL;
1791 
1792  spin_lock_irqsave(&ep->udc->lock, flags);
1793 
1794  /* make sure it's actually queued on this endpoint */
1795  list_for_each_entry(req, &ep->queue, queue) {
1796  if (&req->req == _req)
1797  break;
1798  }
1799 
1800  if (&req->req != _req) {
1801  spin_unlock_irqrestore(&ep->udc->lock, flags);
1802  return -EINVAL;
1803  }
1804 
1805  done(ep, req, -ECONNRESET);
1806 
1807  spin_unlock_irqrestore(&ep->udc->lock, flags);
1808  return 0;
1809 }
1810 
1811 /*-----------------------------------------------------------------
1812  * modify the endpoint halt feature
1813  * @ep: the non-isochronous endpoint being stalled
1814  * @value: 1--set halt 0--clear halt
1815  * Returns zero, or a negative error code.
1816 *----------------------------------------------------------------*/
1817 static int qe_ep_set_halt(struct usb_ep *_ep, int value)
1818 {
1819  struct qe_ep *ep;
1820  unsigned long flags;
1821  int status = -EOPNOTSUPP;
1822  struct qe_udc *udc;
1823 
1824  ep = container_of(_ep, struct qe_ep, ep);
1825  if (!_ep || !ep->ep.desc) {
1826  status = -EINVAL;
1827  goto out;
1828  }
1829 
1830  udc = ep->udc;
1831  /* Attempt to halt IN ep will fail if any transfer requests
1832  * are still queue */
1833  if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
1834  status = -EAGAIN;
1835  goto out;
1836  }
1837 
1838  status = 0;
1839  spin_lock_irqsave(&ep->udc->lock, flags);
1840  qe_eptx_stall_change(ep, value);
1841  qe_eprx_stall_change(ep, value);
1842  spin_unlock_irqrestore(&ep->udc->lock, flags);
1843 
1844  if (ep->epnum == 0) {
1845  udc->ep0_state = WAIT_FOR_SETUP;
1846  udc->ep0_dir = 0;
1847  }
1848 
1849  /* set data toggle to DATA0 on clear halt */
1850  if (value == 0)
1851  ep->data01 = 0;
1852 out:
1853  dev_vdbg(udc->dev, "%s %s halt stat %d\n", ep->ep.name,
1854  value ? "set" : "clear", status);
1855 
1856  return status;
1857 }
1858 
1859 static struct usb_ep_ops qe_ep_ops = {
1860  .enable = qe_ep_enable,
1861  .disable = qe_ep_disable,
1862 
1863  .alloc_request = qe_alloc_request,
1864  .free_request = qe_free_request,
1865 
1866  .queue = qe_ep_queue,
1867  .dequeue = qe_ep_dequeue,
1868 
1869  .set_halt = qe_ep_set_halt,
1870 };
1871 
1872 /*------------------------------------------------------------------------
1873  Gadget Driver Layer Operations
1874  ------------------------------------------------------------------------*/
1875 
1876 /* Get the current frame number */
1877 static int qe_get_frame(struct usb_gadget *gadget)
1878 {
1879  struct qe_udc *udc = container_of(gadget, struct qe_udc, gadget);
1880  u16 tmp;
1881 
1882  tmp = in_be16(&udc->usb_param->frame_n);
1883  if (tmp & 0x8000)
1884  tmp = tmp & 0x07ff;
1885  else
1886  tmp = -EINVAL;
1887 
1888  return (int)tmp;
1889 }
1890 
1891 static int fsl_qe_start(struct usb_gadget *gadget,
1892  struct usb_gadget_driver *driver);
1893 static int fsl_qe_stop(struct usb_gadget *gadget,
1894  struct usb_gadget_driver *driver);
1895 
1896 /* defined in usb_gadget.h */
1897 static struct usb_gadget_ops qe_gadget_ops = {
1898  .get_frame = qe_get_frame,
1899  .udc_start = fsl_qe_start,
1900  .udc_stop = fsl_qe_stop,
1901 };
1902 
1903 /*-------------------------------------------------------------------------
1904  USB ep0 Setup process in BUS Enumeration
1905  -------------------------------------------------------------------------*/
1906 static int udc_reset_ep_queue(struct qe_udc *udc, u8 pipe)
1907 {
1908  struct qe_ep *ep = &udc->eps[pipe];
1909 
1910  nuke(ep, -ECONNRESET);
1911  ep->tx_req = NULL;
1912  return 0;
1913 }
1914 
1915 static int reset_queues(struct qe_udc *udc)
1916 {
1917  u8 pipe;
1918 
1919  for (pipe = 0; pipe < USB_MAX_ENDPOINTS; pipe++)
1920  udc_reset_ep_queue(udc, pipe);
1921 
1922  /* report disconnect; the driver is already quiesced */
1923  spin_unlock(&udc->lock);
1924  udc->driver->disconnect(&udc->gadget);
1925  spin_lock(&udc->lock);
1926 
1927  return 0;
1928 }
1929 
1930 static void ch9setaddress(struct qe_udc *udc, u16 value, u16 index,
1931  u16 length)
1932 {
1933  /* Save the new address to device struct */
1934  udc->device_address = (u8) value;
1935  /* Update usb state */
1937 
1938  /* Status phase , send a ZLP */
1939  if (ep0_prime_status(udc, USB_DIR_IN))
1940  qe_ep0_stall(udc);
1941 }
1942 
1943 static void ownercomplete(struct usb_ep *_ep, struct usb_request *_req)
1944 {
1945  struct qe_req *req = container_of(_req, struct qe_req, req);
1946 
1947  req->req.buf = NULL;
1948  kfree(req);
1949 }
1950 
1951 static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
1952  u16 index, u16 length)
1953 {
1954  u16 usb_status = 0;
1955  struct qe_req *req;
1956  struct qe_ep *ep;
1957  int status = 0;
1958 
1959  ep = &udc->eps[0];
1960  if ((request_type & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1961  /* Get device status */
1962  usb_status = 1 << USB_DEVICE_SELF_POWERED;
1963  } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_INTERFACE) {
1964  /* Get interface status */
1965  /* We don't have interface information in udc driver */
1966  usb_status = 0;
1967  } else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
1968  /* Get endpoint status */
1969  int pipe = index & USB_ENDPOINT_NUMBER_MASK;
1970  struct qe_ep *target_ep = &udc->eps[pipe];
1971  u16 usep;
1972 
1973  /* stall if endpoint doesn't exist */
1974  if (!target_ep->ep.desc)
1975  goto stall;
1976 
1977  usep = in_be16(&udc->usb_regs->usb_usep[pipe]);
1978  if (index & USB_DIR_IN) {
1979  if (target_ep->dir != USB_DIR_IN)
1980  goto stall;
1981  if ((usep & USB_THS_MASK) == USB_THS_STALL)
1982  usb_status = 1 << USB_ENDPOINT_HALT;
1983  } else {
1984  if (target_ep->dir != USB_DIR_OUT)
1985  goto stall;
1986  if ((usep & USB_RHS_MASK) == USB_RHS_STALL)
1987  usb_status = 1 << USB_ENDPOINT_HALT;
1988  }
1989  }
1990 
1991  req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL),
1992  struct qe_req, req);
1993  req->req.length = 2;
1994  req->req.buf = udc->statusbuf;
1995  *(u16 *)req->req.buf = cpu_to_le16(usb_status);
1996  req->req.status = -EINPROGRESS;
1997  req->req.actual = 0;
1998  req->req.complete = ownercomplete;
1999 
2000  udc->ep0_dir = USB_DIR_IN;
2001 
2002  /* data phase */
2003  status = __qe_ep_queue(&ep->ep, &req->req);
2004 
2005  if (status == 0)
2006  return;
2007 stall:
2008  dev_err(udc->dev, "Can't respond to getstatus request \n");
2009  qe_ep0_stall(udc);
2010 }
2011 
2012 /* only handle the setup request, suppose the device in normal status */
2013 static void setup_received_handle(struct qe_udc *udc,
2014  struct usb_ctrlrequest *setup)
2015 {
2016  /* Fix Endian (udc->local_setup_buff is cpu Endian now)*/
2017  u16 wValue = le16_to_cpu(setup->wValue);
2018  u16 wIndex = le16_to_cpu(setup->wIndex);
2019  u16 wLength = le16_to_cpu(setup->wLength);
2020 
2021  /* clear the previous request in the ep0 */
2022  udc_reset_ep_queue(udc, 0);
2023 
2024  if (setup->bRequestType & USB_DIR_IN)
2025  udc->ep0_dir = USB_DIR_IN;
2026  else
2027  udc->ep0_dir = USB_DIR_OUT;
2028 
2029  switch (setup->bRequest) {
2030  case USB_REQ_GET_STATUS:
2031  /* Data+Status phase form udc */
2032  if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
2033  != (USB_DIR_IN | USB_TYPE_STANDARD))
2034  break;
2035  ch9getstatus(udc, setup->bRequestType, wValue, wIndex,
2036  wLength);
2037  return;
2038 
2039  case USB_REQ_SET_ADDRESS:
2040  /* Status phase from udc */
2041  if (setup->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
2043  break;
2044  ch9setaddress(udc, wValue, wIndex, wLength);
2045  return;
2046 
2047  case USB_REQ_CLEAR_FEATURE:
2048  case USB_REQ_SET_FEATURE:
2049  /* Requests with no data phase, status phase from udc */
2050  if ((setup->bRequestType & USB_TYPE_MASK)
2051  != USB_TYPE_STANDARD)
2052  break;
2053 
2054  if ((setup->bRequestType & USB_RECIP_MASK)
2055  == USB_RECIP_ENDPOINT) {
2056  int pipe = wIndex & USB_ENDPOINT_NUMBER_MASK;
2057  struct qe_ep *ep;
2058 
2059  if (wValue != 0 || wLength != 0
2060  || pipe > USB_MAX_ENDPOINTS)
2061  break;
2062  ep = &udc->eps[pipe];
2063 
2064  spin_unlock(&udc->lock);
2065  qe_ep_set_halt(&ep->ep,
2066  (setup->bRequest == USB_REQ_SET_FEATURE)
2067  ? 1 : 0);
2068  spin_lock(&udc->lock);
2069  }
2070 
2071  ep0_prime_status(udc, USB_DIR_IN);
2072 
2073  return;
2074 
2075  default:
2076  break;
2077  }
2078 
2079  if (wLength) {
2080  /* Data phase from gadget, status phase from udc */
2081  if (setup->bRequestType & USB_DIR_IN) {
2082  udc->ep0_state = DATA_STATE_XMIT;
2083  udc->ep0_dir = USB_DIR_IN;
2084  } else {
2085  udc->ep0_state = DATA_STATE_RECV;
2086  udc->ep0_dir = USB_DIR_OUT;
2087  }
2088  spin_unlock(&udc->lock);
2089  if (udc->driver->setup(&udc->gadget,
2090  &udc->local_setup_buff) < 0)
2091  qe_ep0_stall(udc);
2092  spin_lock(&udc->lock);
2093  } else {
2094  /* No data phase, IN status from gadget */
2095  udc->ep0_dir = USB_DIR_IN;
2096  spin_unlock(&udc->lock);
2097  if (udc->driver->setup(&udc->gadget,
2098  &udc->local_setup_buff) < 0)
2099  qe_ep0_stall(udc);
2100  spin_lock(&udc->lock);
2102  }
2103 }
2104 
2105 /*-------------------------------------------------------------------------
2106  USB Interrupt handlers
2107  -------------------------------------------------------------------------*/
2108 static void suspend_irq(struct qe_udc *udc)
2109 {
2110  udc->resume_state = udc->usb_state;
2112 
2113  /* report suspend to the driver ,serial.c not support this*/
2114  if (udc->driver->suspend)
2115  udc->driver->suspend(&udc->gadget);
2116 }
2117 
2118 static void resume_irq(struct qe_udc *udc)
2119 {
2120  udc->usb_state = udc->resume_state;
2121  udc->resume_state = 0;
2122 
2123  /* report resume to the driver , serial.c not support this*/
2124  if (udc->driver->resume)
2125  udc->driver->resume(&udc->gadget);
2126 }
2127 
2128 static void idle_irq(struct qe_udc *udc)
2129 {
2130  u8 usbs;
2131 
2132  usbs = in_8(&udc->usb_regs->usb_usbs);
2133  if (usbs & USB_IDLE_STATUS_MASK) {
2134  if ((udc->usb_state) != USB_STATE_SUSPENDED)
2135  suspend_irq(udc);
2136  } else {
2137  if (udc->usb_state == USB_STATE_SUSPENDED)
2138  resume_irq(udc);
2139  }
2140 }
2141 
2142 static int reset_irq(struct qe_udc *udc)
2143 {
2144  unsigned char i;
2145 
2146  if (udc->usb_state == USB_STATE_DEFAULT)
2147  return 0;
2148 
2149  qe_usb_disable(udc);
2150  out_8(&udc->usb_regs->usb_usadr, 0);
2151 
2152  for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2153  if (udc->eps[i].init)
2154  qe_ep_reset(udc, i);
2155  }
2156 
2157  reset_queues(udc);
2159  udc->ep0_state = WAIT_FOR_SETUP;
2160  udc->ep0_dir = USB_DIR_OUT;
2161  qe_usb_enable(udc);
2162  return 0;
2163 }
2164 
2165 static int bsy_irq(struct qe_udc *udc)
2166 {
2167  return 0;
2168 }
2169 
2170 static int txe_irq(struct qe_udc *udc)
2171 {
2172  return 0;
2173 }
2174 
2175 /* ep0 tx interrupt also in here */
2176 static int tx_irq(struct qe_udc *udc)
2177 {
2178  struct qe_ep *ep;
2179  struct qe_bd __iomem *bd;
2180  int i, res = 0;
2181 
2182  if ((udc->usb_state == USB_STATE_ADDRESS)
2183  && (in_8(&udc->usb_regs->usb_usadr) == 0))
2184  out_8(&udc->usb_regs->usb_usadr, udc->device_address);
2185 
2186  for (i = (USB_MAX_ENDPOINTS-1); ((i >= 0) && (res == 0)); i--) {
2187  ep = &udc->eps[i];
2188  if (ep && ep->init && (ep->dir != USB_DIR_OUT)) {
2189  bd = ep->c_txbd;
2190  if (!(in_be32((u32 __iomem *)bd) & T_R)
2191  && (in_be32(&bd->buf))) {
2192  /* confirm the transmitted bd */
2193  if (ep->epnum == 0)
2194  res = qe_ep0_txconf(ep);
2195  else
2196  res = qe_ep_txconf(ep);
2197  }
2198  }
2199  }
2200  return res;
2201 }
2202 
2203 
2204 /* setup packect's rx is handle in the function too */
2205 static void rx_irq(struct qe_udc *udc)
2206 {
2207  struct qe_ep *ep;
2208  struct qe_bd __iomem *bd;
2209  int i;
2210 
2211  for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2212  ep = &udc->eps[i];
2213  if (ep && ep->init && (ep->dir != USB_DIR_IN)) {
2214  bd = ep->n_rxbd;
2215  if (!(in_be32((u32 __iomem *)bd) & R_E)
2216  && (in_be32(&bd->buf))) {
2217  if (ep->epnum == 0) {
2218  qe_ep0_rx(udc);
2219  } else {
2220  /*non-setup package receive*/
2221  qe_ep_rx(ep);
2222  }
2223  }
2224  }
2225  }
2226 }
2227 
2228 static irqreturn_t qe_udc_irq(int irq, void *_udc)
2229 {
2230  struct qe_udc *udc = (struct qe_udc *)_udc;
2231  u16 irq_src;
2232  irqreturn_t status = IRQ_NONE;
2233  unsigned long flags;
2234 
2235  spin_lock_irqsave(&udc->lock, flags);
2236 
2237  irq_src = in_be16(&udc->usb_regs->usb_usber) &
2238  in_be16(&udc->usb_regs->usb_usbmr);
2239  /* Clear notification bits */
2240  out_be16(&udc->usb_regs->usb_usber, irq_src);
2241  /* USB Interrupt */
2242  if (irq_src & USB_E_IDLE_MASK) {
2243  idle_irq(udc);
2244  irq_src &= ~USB_E_IDLE_MASK;
2245  status = IRQ_HANDLED;
2246  }
2247 
2248  if (irq_src & USB_E_TXB_MASK) {
2249  tx_irq(udc);
2250  irq_src &= ~USB_E_TXB_MASK;
2251  status = IRQ_HANDLED;
2252  }
2253 
2254  if (irq_src & USB_E_RXB_MASK) {
2255  rx_irq(udc);
2256  irq_src &= ~USB_E_RXB_MASK;
2257  status = IRQ_HANDLED;
2258  }
2259 
2260  if (irq_src & USB_E_RESET_MASK) {
2261  reset_irq(udc);
2262  irq_src &= ~USB_E_RESET_MASK;
2263  status = IRQ_HANDLED;
2264  }
2265 
2266  if (irq_src & USB_E_BSY_MASK) {
2267  bsy_irq(udc);
2268  irq_src &= ~USB_E_BSY_MASK;
2269  status = IRQ_HANDLED;
2270  }
2271 
2272  if (irq_src & USB_E_TXE_MASK) {
2273  txe_irq(udc);
2274  irq_src &= ~USB_E_TXE_MASK;
2275  status = IRQ_HANDLED;
2276  }
2277 
2278  spin_unlock_irqrestore(&udc->lock, flags);
2279 
2280  return status;
2281 }
2282 
2283 /*-------------------------------------------------------------------------
2284  Gadget driver probe and unregister.
2285  --------------------------------------------------------------------------*/
2286 static int fsl_qe_start(struct usb_gadget *gadget,
2287  struct usb_gadget_driver *driver)
2288 {
2289  struct qe_udc *udc;
2290  unsigned long flags;
2291 
2292  udc = container_of(gadget, struct qe_udc, gadget);
2293  /* lock is needed but whether should use this lock or another */
2294  spin_lock_irqsave(&udc->lock, flags);
2295 
2296  driver->driver.bus = NULL;
2297  /* hook up the driver */
2298  udc->driver = driver;
2299  udc->gadget.dev.driver = &driver->driver;
2300  udc->gadget.speed = driver->max_speed;
2301 
2302  /* Enable IRQ reg and Set usbcmd reg EN bit */
2303  qe_usb_enable(udc);
2304 
2305  out_be16(&udc->usb_regs->usb_usber, 0xffff);
2306  out_be16(&udc->usb_regs->usb_usbmr, USB_E_DEFAULT_DEVICE);
2308  udc->ep0_state = WAIT_FOR_SETUP;
2309  udc->ep0_dir = USB_DIR_OUT;
2310  spin_unlock_irqrestore(&udc->lock, flags);
2311 
2312  dev_info(udc->dev, "%s bind to driver %s\n", udc->gadget.name,
2313  driver->driver.name);
2314  return 0;
2315 }
2316 
2317 static int fsl_qe_stop(struct usb_gadget *gadget,
2318  struct usb_gadget_driver *driver)
2319 {
2320  struct qe_udc *udc;
2321  struct qe_ep *loop_ep;
2322  unsigned long flags;
2323 
2324  udc = container_of(gadget, struct qe_udc, gadget);
2325  /* stop usb controller, disable intr */
2326  qe_usb_disable(udc);
2327 
2328  /* in fact, no needed */
2330  udc->ep0_state = WAIT_FOR_SETUP;
2331  udc->ep0_dir = 0;
2332 
2333  /* stand operation */
2334  spin_lock_irqsave(&udc->lock, flags);
2335  udc->gadget.speed = USB_SPEED_UNKNOWN;
2336  nuke(&udc->eps[0], -ESHUTDOWN);
2337  list_for_each_entry(loop_ep, &udc->gadget.ep_list, ep.ep_list)
2338  nuke(loop_ep, -ESHUTDOWN);
2339  spin_unlock_irqrestore(&udc->lock, flags);
2340 
2341  udc->gadget.dev.driver = NULL;
2342  udc->driver = NULL;
2343 
2344  dev_info(udc->dev, "unregistered gadget driver '%s'\r\n",
2345  driver->driver.name);
2346  return 0;
2347 }
2348 
2349 /* udc structure's alloc and setup, include ep-param alloc */
2350 static struct qe_udc __devinit *qe_udc_config(struct platform_device *ofdev)
2351 {
2352  struct qe_udc *udc;
2353  struct device_node *np = ofdev->dev.of_node;
2354  unsigned int tmp_addr = 0;
2355  struct usb_device_para __iomem *usbpram;
2356  unsigned int i;
2357  u64 size;
2358  u32 offset;
2359 
2360  udc = kzalloc(sizeof(*udc), GFP_KERNEL);
2361  if (udc == NULL) {
2362  dev_err(&ofdev->dev, "malloc udc failed\n");
2363  goto cleanup;
2364  }
2365 
2366  udc->dev = &ofdev->dev;
2367 
2368  /* get default address of usb parameter in MURAM from device tree */
2369  offset = *of_get_address(np, 1, &size, NULL);
2370  udc->usb_param = cpm_muram_addr(offset);
2371  memset_io(udc->usb_param, 0, size);
2372 
2373  usbpram = udc->usb_param;
2374  out_be16(&usbpram->frame_n, 0);
2375  out_be32(&usbpram->rstate, 0);
2376 
2377  tmp_addr = cpm_muram_alloc((USB_MAX_ENDPOINTS *
2378  sizeof(struct usb_ep_para)),
2380  if (IS_ERR_VALUE(tmp_addr))
2381  goto cleanup;
2382 
2383  for (i = 0; i < USB_MAX_ENDPOINTS; i++) {
2384  out_be16(&usbpram->epptr[i], (u16)tmp_addr);
2385  udc->ep_param[i] = cpm_muram_addr(tmp_addr);
2386  tmp_addr += 32;
2387  }
2388 
2389  memset_io(udc->ep_param[0], 0,
2390  USB_MAX_ENDPOINTS * sizeof(struct usb_ep_para));
2391 
2394  udc->ep0_dir = 0;
2395 
2396  spin_lock_init(&udc->lock);
2397  return udc;
2398 
2399 cleanup:
2400  kfree(udc);
2401  return NULL;
2402 }
2403 
2404 /* USB Controller register init */
2405 static int __devinit qe_udc_reg_init(struct qe_udc *udc)
2406 {
2407  struct usb_ctlr __iomem *qe_usbregs;
2408  qe_usbregs = udc->usb_regs;
2409 
2410  /* Spec says that we must enable the USB controller to change mode. */
2411  out_8(&qe_usbregs->usb_usmod, 0x01);
2412  /* Mode changed, now disable it, since muram isn't initialized yet. */
2413  out_8(&qe_usbregs->usb_usmod, 0x00);
2414 
2415  /* Initialize the rest. */
2416  out_be16(&qe_usbregs->usb_usbmr, 0);
2417  out_8(&qe_usbregs->usb_uscom, 0);
2418  out_be16(&qe_usbregs->usb_usber, USBER_ALL_CLEAR);
2419 
2420  return 0;
2421 }
2422 
2423 static int __devinit qe_ep_config(struct qe_udc *udc, unsigned char pipe_num)
2424 {
2425  struct qe_ep *ep = &udc->eps[pipe_num];
2426 
2427  ep->udc = udc;
2428  strcpy(ep->name, ep_name[pipe_num]);
2429  ep->ep.name = ep_name[pipe_num];
2430 
2431  ep->ep.ops = &qe_ep_ops;
2432  ep->stopped = 1;
2433  ep->ep.maxpacket = (unsigned short) ~0;
2434  ep->ep.desc = NULL;
2435  ep->dir = 0xff;
2436  ep->epnum = (u8)pipe_num;
2437  ep->sent = 0;
2438  ep->last = 0;
2439  ep->init = 0;
2440  ep->rxframe = NULL;
2441  ep->txframe = NULL;
2442  ep->tx_req = NULL;
2443  ep->state = EP_STATE_IDLE;
2444  ep->has_data = 0;
2445 
2446  /* the queue lists any req for this ep */
2447  INIT_LIST_HEAD(&ep->queue);
2448 
2449  /* gagdet.ep_list used for ep_autoconfig so no ep0*/
2450  if (pipe_num != 0)
2451  list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2452 
2453  ep->gadget = &udc->gadget;
2454 
2455  return 0;
2456 }
2457 
2458 /*-----------------------------------------------------------------------
2459  * UDC device Driver operation functions *
2460  *----------------------------------------------------------------------*/
2461 static void qe_udc_release(struct device *dev)
2462 {
2463  struct qe_udc *udc = container_of(dev, struct qe_udc, gadget.dev);
2464  int i;
2465 
2466  complete(udc->done);
2468  for (i = 0; i < USB_MAX_ENDPOINTS; i++)
2469  udc->ep_param[i] = NULL;
2470 
2471  kfree(udc);
2472 }
2473 
2474 /* Driver probe functions */
2475 static const struct of_device_id qe_udc_match[];
2476 static int __devinit qe_udc_probe(struct platform_device *ofdev)
2477 {
2478  struct qe_udc *udc;
2479  const struct of_device_id *match;
2480  struct device_node *np = ofdev->dev.of_node;
2481  struct qe_ep *ep;
2482  unsigned int ret = 0;
2483  unsigned int i;
2484  const void *prop;
2485 
2486  match = of_match_device(qe_udc_match, &ofdev->dev);
2487  if (!match)
2488  return -EINVAL;
2489 
2490  prop = of_get_property(np, "mode", NULL);
2491  if (!prop || strcmp(prop, "peripheral"))
2492  return -ENODEV;
2493 
2494  /* Initialize the udc structure including QH member and other member */
2495  udc = qe_udc_config(ofdev);
2496  if (!udc) {
2497  dev_err(&ofdev->dev, "failed to initialize\n");
2498  return -ENOMEM;
2499  }
2500 
2501  udc->soc_type = (unsigned long)match->data;
2502  udc->usb_regs = of_iomap(np, 0);
2503  if (!udc->usb_regs) {
2504  ret = -ENOMEM;
2505  goto err1;
2506  }
2507 
2508  /* initialize usb hw reg except for regs for EP,
2509  * leave usbintr reg untouched*/
2510  qe_udc_reg_init(udc);
2511 
2512  /* here comes the stand operations for probe
2513  * set the qe_udc->gadget.xxx */
2514  udc->gadget.ops = &qe_gadget_ops;
2515 
2516  /* gadget.ep0 is a pointer */
2517  udc->gadget.ep0 = &udc->eps[0].ep;
2518 
2519  INIT_LIST_HEAD(&udc->gadget.ep_list);
2520 
2521  /* modify in register gadget process */
2522  udc->gadget.speed = USB_SPEED_UNKNOWN;
2523 
2524  /* name: Identifies the controller hardware type. */
2525  udc->gadget.name = driver_name;
2526 
2527  device_initialize(&udc->gadget.dev);
2528 
2529  dev_set_name(&udc->gadget.dev, "gadget");
2530 
2531  udc->gadget.dev.release = qe_udc_release;
2532  udc->gadget.dev.parent = &ofdev->dev;
2533 
2534  /* initialize qe_ep struct */
2535  for (i = 0; i < USB_MAX_ENDPOINTS ; i++) {
2536  /* because the ep type isn't decide here so
2537  * qe_ep_init() should be called in ep_enable() */
2538 
2539  /* setup the qe_ep struct and link ep.ep.list
2540  * into gadget.ep_list */
2541  qe_ep_config(udc, (unsigned char)i);
2542  }
2543 
2544  /* ep0 initialization in here */
2545  ret = qe_ep_init(udc, 0, &qe_ep0_desc);
2546  if (ret)
2547  goto err2;
2548 
2549  /* create a buf for ZLP send, need to remain zeroed */
2550  udc->nullbuf = kzalloc(256, GFP_KERNEL);
2551  if (udc->nullbuf == NULL) {
2552  dev_err(udc->dev, "cannot alloc nullbuf\n");
2553  ret = -ENOMEM;
2554  goto err3;
2555  }
2556 
2557  /* buffer for data of get_status request */
2558  udc->statusbuf = kzalloc(2, GFP_KERNEL);
2559  if (udc->statusbuf == NULL) {
2560  ret = -ENOMEM;
2561  goto err4;
2562  }
2563 
2564  udc->nullp = virt_to_phys((void *)udc->nullbuf);
2565  if (udc->nullp == DMA_ADDR_INVALID) {
2566  udc->nullp = dma_map_single(
2567  udc->gadget.dev.parent,
2568  udc->nullbuf,
2569  256,
2570  DMA_TO_DEVICE);
2571  udc->nullmap = 1;
2572  } else {
2573  dma_sync_single_for_device(udc->gadget.dev.parent,
2574  udc->nullp, 256,
2575  DMA_TO_DEVICE);
2576  }
2577 
2578  tasklet_init(&udc->rx_tasklet, ep_rx_tasklet,
2579  (unsigned long)udc);
2580  /* request irq and disable DR */
2581  udc->usb_irq = irq_of_parse_and_map(np, 0);
2582  if (!udc->usb_irq) {
2583  ret = -EINVAL;
2584  goto err_noirq;
2585  }
2586 
2587  ret = request_irq(udc->usb_irq, qe_udc_irq, 0,
2588  driver_name, udc);
2589  if (ret) {
2590  dev_err(udc->dev, "cannot request irq %d err %d\n",
2591  udc->usb_irq, ret);
2592  goto err5;
2593  }
2594 
2595  ret = device_add(&udc->gadget.dev);
2596  if (ret)
2597  goto err6;
2598 
2599  ret = usb_add_gadget_udc(&ofdev->dev, &udc->gadget);
2600  if (ret)
2601  goto err7;
2602 
2603  dev_set_drvdata(&ofdev->dev, udc);
2604  dev_info(udc->dev,
2605  "%s USB controller initialized as device\n",
2606  (udc->soc_type == PORT_QE) ? "QE" : "CPM");
2607  return 0;
2608 
2609 err7:
2610  device_unregister(&udc->gadget.dev);
2611 err6:
2612  free_irq(udc->usb_irq, udc);
2613 err5:
2615 err_noirq:
2616  if (udc->nullmap) {
2617  dma_unmap_single(udc->gadget.dev.parent,
2618  udc->nullp, 256,
2619  DMA_TO_DEVICE);
2620  udc->nullp = DMA_ADDR_INVALID;
2621  } else {
2622  dma_sync_single_for_cpu(udc->gadget.dev.parent,
2623  udc->nullp, 256,
2624  DMA_TO_DEVICE);
2625  }
2626  kfree(udc->statusbuf);
2627 err4:
2628  kfree(udc->nullbuf);
2629 err3:
2630  ep = &udc->eps[0];
2632  kfree(ep->rxframe);
2633  kfree(ep->rxbuffer);
2634  kfree(ep->txframe);
2635 err2:
2636  iounmap(udc->usb_regs);
2637 err1:
2638  kfree(udc);
2639  return ret;
2640 }
2641 
2642 #ifdef CONFIG_PM
2643 static int qe_udc_suspend(struct platform_device *dev, pm_message_t state)
2644 {
2645  return -ENOTSUPP;
2646 }
2647 
2648 static int qe_udc_resume(struct platform_device *dev)
2649 {
2650  return -ENOTSUPP;
2651 }
2652 #endif
2653 
2654 static int __devexit qe_udc_remove(struct platform_device *ofdev)
2655 {
2656  struct qe_udc *udc = dev_get_drvdata(&ofdev->dev);
2657  struct qe_ep *ep;
2658  unsigned int size;
2660 
2661  usb_del_gadget_udc(&udc->gadget);
2662 
2663  udc->done = &done;
2664  tasklet_disable(&udc->rx_tasklet);
2665 
2666  if (udc->nullmap) {
2667  dma_unmap_single(udc->gadget.dev.parent,
2668  udc->nullp, 256,
2669  DMA_TO_DEVICE);
2670  udc->nullp = DMA_ADDR_INVALID;
2671  } else {
2672  dma_sync_single_for_cpu(udc->gadget.dev.parent,
2673  udc->nullp, 256,
2674  DMA_TO_DEVICE);
2675  }
2676  kfree(udc->statusbuf);
2677  kfree(udc->nullbuf);
2678 
2679  ep = &udc->eps[0];
2681  size = (ep->ep.maxpacket + USB_CRC_SIZE + 2) * (USB_BDRING_LEN + 1);
2682 
2683  kfree(ep->rxframe);
2684  if (ep->rxbufmap) {
2685  dma_unmap_single(udc->gadget.dev.parent,
2686  ep->rxbuf_d, size,
2687  DMA_FROM_DEVICE);
2688  ep->rxbuf_d = DMA_ADDR_INVALID;
2689  } else {
2690  dma_sync_single_for_cpu(udc->gadget.dev.parent,
2691  ep->rxbuf_d, size,
2692  DMA_FROM_DEVICE);
2693  }
2694 
2695  kfree(ep->rxbuffer);
2696  kfree(ep->txframe);
2697 
2698  free_irq(udc->usb_irq, udc);
2700 
2701  tasklet_kill(&udc->rx_tasklet);
2702 
2703  iounmap(udc->usb_regs);
2704 
2705  device_unregister(&udc->gadget.dev);
2706  /* wait for release() of gadget.dev to free udc */
2708 
2709  return 0;
2710 }
2711 
2712 /*-------------------------------------------------------------------------*/
2713 static const struct of_device_id qe_udc_match[] __devinitconst = {
2714  {
2715  .compatible = "fsl,mpc8323-qe-usb",
2716  .data = (void *)PORT_QE,
2717  },
2718  {
2719  .compatible = "fsl,mpc8360-qe-usb",
2720  .data = (void *)PORT_QE,
2721  },
2722  {
2723  .compatible = "fsl,mpc8272-cpm-usb",
2724  .data = (void *)PORT_CPM,
2725  },
2726  {},
2727 };
2728 
2729 MODULE_DEVICE_TABLE(of, qe_udc_match);
2730 
2731 static struct platform_driver udc_driver = {
2732  .driver = {
2733  .name = (char *)driver_name,
2734  .owner = THIS_MODULE,
2735  .of_match_table = qe_udc_match,
2736  },
2737  .probe = qe_udc_probe,
2738  .remove = __devexit_p(qe_udc_remove),
2739 #ifdef CONFIG_PM
2740  .suspend = qe_udc_suspend,
2741  .resume = qe_udc_resume,
2742 #endif
2743 };
2744 
2745 module_platform_driver(udc_driver);
2746 
2749 MODULE_LICENSE("GPL");
2750