Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
s3c-hsotg.c
Go to the documentation of this file.
1 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22 #include <linux/interrupt.h>
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include <linux/clk.h>
32 
33 #include <linux/usb/ch9.h>
34 #include <linux/usb/gadget.h>
36 
37 #include <mach/map.h>
38 
39 #include "s3c-hsotg.h"
40 
41 #define DMA_ADDR_INVALID (~((dma_addr_t)0))
42 
43 static const char * const s3c_hsotg_supply_names[] = {
44  "vusb_d", /* digital USB supply, 1.2V */
45  "vusb_a", /* analog USB supply, 1.1V */
46 };
47 
48 /*
49  * EP0_MPS_LIMIT
50  *
51  * Unfortunately there seems to be a limit of the amount of data that can
52  * be transferred by IN transactions on EP0. This is either 127 bytes or 3
53  * packets (which practically means 1 packet and 63 bytes of data) when the
54  * MPS is set to 64.
55  *
56  * This means if we are wanting to move >127 bytes of data, we need to
57  * split the transactions up, but just doing one packet at a time does
58  * not work (this may be an implicit DATA0 PID on first packet of the
59  * transaction) and doing 2 packets is outside the controller's limits.
60  *
61  * If we try to lower the MPS size for EP0, then no transfers work properly
62  * for EP0, and the system will fail basic enumeration. As no cause for this
63  * has currently been found, we cannot support any large IN transfers for
64  * EP0.
65  */
66 #define EP0_MPS_LIMIT 64
67 
68 struct s3c_hsotg;
69 struct s3c_hsotg_req;
70 
108 struct s3c_hsotg_ep {
109  struct usb_ep ep;
110  struct list_head queue;
111  struct s3c_hsotg *parent;
113  struct dentry *debugfs;
114 
115 
116  unsigned long total_data;
117  unsigned int size_loaded;
118  unsigned int last_load;
119  unsigned int fifo_load;
120  unsigned short fifo_size;
121 
122  unsigned char dir_in;
123  unsigned char index;
124 
125  unsigned int halted:1;
126  unsigned int periodic:1;
127  unsigned int sent_zlp:1;
128 
129  char name[10];
130 };
131 
153 struct s3c_hsotg {
154  struct device *dev;
157 
159 
160  void __iomem *regs;
161  int irq;
162  struct clk *clk;
163 
164  struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsotg_supply_names)];
165 
166  unsigned int dedicated_fifos:1;
167  unsigned char num_of_eps;
168 
172 
177 
179  unsigned int setup;
180  unsigned long last_rst;
181  struct s3c_hsotg_ep *eps;
182 };
183 
192  struct usb_request req;
193  struct list_head queue;
194  unsigned char in_progress;
195  unsigned char mapped;
196 };
197 
198 /* conversion functions */
199 static inline struct s3c_hsotg_req *our_req(struct usb_request *req)
200 {
201  return container_of(req, struct s3c_hsotg_req, req);
202 }
203 
204 static inline struct s3c_hsotg_ep *our_ep(struct usb_ep *ep)
205 {
206  return container_of(ep, struct s3c_hsotg_ep, ep);
207 }
208 
209 static inline struct s3c_hsotg *to_hsotg(struct usb_gadget *gadget)
210 {
211  return container_of(gadget, struct s3c_hsotg, gadget);
212 }
213 
214 static inline void __orr32(void __iomem *ptr, u32 val)
215 {
216  writel(readl(ptr) | val, ptr);
217 }
218 
219 static inline void __bic32(void __iomem *ptr, u32 val)
220 {
221  writel(readl(ptr) & ~val, ptr);
222 }
223 
224 /* forward decleration of functions */
225 static void s3c_hsotg_dump(struct s3c_hsotg *hsotg);
226 
246 static inline bool using_dma(struct s3c_hsotg *hsotg)
247 {
248  return false; /* support is not complete */
249 }
250 
256 static void s3c_hsotg_en_gsint(struct s3c_hsotg *hsotg, u32 ints)
257 {
258  u32 gsintmsk = readl(hsotg->regs + GINTMSK);
259  u32 new_gsintmsk;
260 
261  new_gsintmsk = gsintmsk | ints;
262 
263  if (new_gsintmsk != gsintmsk) {
264  dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
265  writel(new_gsintmsk, hsotg->regs + GINTMSK);
266  }
267 }
268 
274 static void s3c_hsotg_disable_gsint(struct s3c_hsotg *hsotg, u32 ints)
275 {
276  u32 gsintmsk = readl(hsotg->regs + GINTMSK);
277  u32 new_gsintmsk;
278 
279  new_gsintmsk = gsintmsk & ~ints;
280 
281  if (new_gsintmsk != gsintmsk)
282  writel(new_gsintmsk, hsotg->regs + GINTMSK);
283 }
284 
295 static void s3c_hsotg_ctrl_epint(struct s3c_hsotg *hsotg,
296  unsigned int ep, unsigned int dir_in,
297  unsigned int en)
298 {
299  unsigned long flags;
300  u32 bit = 1 << ep;
301  u32 daint;
302 
303  if (!dir_in)
304  bit <<= 16;
305 
306  local_irq_save(flags);
307  daint = readl(hsotg->regs + DAINTMSK);
308  if (en)
309  daint |= bit;
310  else
311  daint &= ~bit;
312  writel(daint, hsotg->regs + DAINTMSK);
313  local_irq_restore(flags);
314 }
315 
320 static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
321 {
322  unsigned int ep;
323  unsigned int addr;
324  unsigned int size;
325  int timeout;
326  u32 val;
327 
328  /* set FIFO sizes to 2048/1024 */
329 
330  writel(2048, hsotg->regs + GRXFSIZ);
332  GNPTXFSIZ_NPTxFDep(1024),
333  hsotg->regs + GNPTXFSIZ);
334 
335  /*
336  * arange all the rest of the TX FIFOs, as some versions of this
337  * block have overlapping default addresses. This also ensures
338  * that if the settings have been changed, then they are set to
339  * known values.
340  */
341 
342  /* start at the end of the GNPTXFSIZ, rounded up */
343  addr = 2048 + 1024;
344  size = 768;
345 
346  /*
347  * currently we allocate TX FIFOs for all possible endpoints,
348  * and assume that they are all the same size.
349  */
350 
351  for (ep = 1; ep <= 15; ep++) {
352  val = addr;
353  val |= size << DPTXFSIZn_DPTxFSize_SHIFT;
354  addr += size;
355 
356  writel(val, hsotg->regs + DPTXFSIZn(ep));
357  }
358 
359  /*
360  * according to p428 of the design guide, we need to ensure that
361  * all fifos are flushed before continuing
362  */
363 
365  GRSTCTL_RxFFlsh, hsotg->regs + GRSTCTL);
366 
367  /* wait until the fifos are both flushed */
368  timeout = 100;
369  while (1) {
370  val = readl(hsotg->regs + GRSTCTL);
371 
372  if ((val & (GRSTCTL_TxFFlsh | GRSTCTL_RxFFlsh)) == 0)
373  break;
374 
375  if (--timeout == 0) {
376  dev_err(hsotg->dev,
377  "%s: timeout flushing fifos (GRSTCTL=%08x)\n",
378  __func__, val);
379  }
380 
381  udelay(1);
382  }
383 
384  dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
385 }
386 
393 static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
394  gfp_t flags)
395 {
396  struct s3c_hsotg_req *req;
397 
398  req = kzalloc(sizeof(struct s3c_hsotg_req), flags);
399  if (!req)
400  return NULL;
401 
402  INIT_LIST_HEAD(&req->queue);
403 
404  req->req.dma = DMA_ADDR_INVALID;
405  return &req->req;
406 }
407 
415 static inline int is_ep_periodic(struct s3c_hsotg_ep *hs_ep)
416 {
417  return hs_ep->periodic;
418 }
419 
429 static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
430  struct s3c_hsotg_ep *hs_ep,
431  struct s3c_hsotg_req *hs_req)
432 {
433  struct usb_request *req = &hs_req->req;
434  enum dma_data_direction dir;
435 
436  dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
437 
438  /* ignore this if we're not moving any data */
439  if (hs_req->req.length == 0)
440  return;
441 
442  if (hs_req->mapped) {
443  /* we mapped this, so unmap and remove the dma */
444 
445  dma_unmap_single(hsotg->dev, req->dma, req->length, dir);
446 
447  req->dma = DMA_ADDR_INVALID;
448  hs_req->mapped = 0;
449  } else {
450  dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
451  }
452 }
453 
470 static int s3c_hsotg_write_fifo(struct s3c_hsotg *hsotg,
471  struct s3c_hsotg_ep *hs_ep,
472  struct s3c_hsotg_req *hs_req)
473 {
474  bool periodic = is_ep_periodic(hs_ep);
475  u32 gnptxsts = readl(hsotg->regs + GNPTXSTS);
476  int buf_pos = hs_req->req.actual;
477  int to_write = hs_ep->size_loaded;
478  void *data;
479  int can_write;
480  int pkt_round;
481 
482  to_write -= (buf_pos - hs_ep->last_load);
483 
484  /* if there's nothing to write, get out early */
485  if (to_write == 0)
486  return 0;
487 
488  if (periodic && !hsotg->dedicated_fifos) {
489  u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
490  int size_left;
491  int size_done;
492 
493  /*
494  * work out how much data was loaded so we can calculate
495  * how much data is left in the fifo.
496  */
497 
498  size_left = DxEPTSIZ_XferSize_GET(epsize);
499 
500  /*
501  * if shared fifo, we cannot write anything until the
502  * previous data has been completely sent.
503  */
504  if (hs_ep->fifo_load != 0) {
505  s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
506  return -ENOSPC;
507  }
508 
509  dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
510  __func__, size_left,
511  hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
512 
513  /* how much of the data has moved */
514  size_done = hs_ep->size_loaded - size_left;
515 
516  /* how much data is left in the fifo */
517  can_write = hs_ep->fifo_load - size_done;
518  dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
519  __func__, can_write);
520 
521  can_write = hs_ep->fifo_size - can_write;
522  dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
523  __func__, can_write);
524 
525  if (can_write <= 0) {
526  s3c_hsotg_en_gsint(hsotg, GINTSTS_PTxFEmp);
527  return -ENOSPC;
528  }
529  } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
530  can_write = readl(hsotg->regs + DTXFSTS(hs_ep->index));
531 
532  can_write &= 0xffff;
533  can_write *= 4;
534  } else {
535  if (GNPTXSTS_NPTxQSpcAvail_GET(gnptxsts) == 0) {
536  dev_dbg(hsotg->dev,
537  "%s: no queue slots available (0x%08x)\n",
538  __func__, gnptxsts);
539 
540  s3c_hsotg_en_gsint(hsotg, GINTSTS_NPTxFEmp);
541  return -ENOSPC;
542  }
543 
544  can_write = GNPTXSTS_NPTxFSpcAvail_GET(gnptxsts);
545  can_write *= 4; /* fifo size is in 32bit quantities. */
546  }
547 
548  dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, mps %d\n",
549  __func__, gnptxsts, can_write, to_write, hs_ep->ep.maxpacket);
550 
551  /*
552  * limit to 512 bytes of data, it seems at least on the non-periodic
553  * FIFO, requests of >512 cause the endpoint to get stuck with a
554  * fragment of the end of the transfer in it.
555  */
556  if (can_write > 512)
557  can_write = 512;
558 
559  /*
560  * limit the write to one max-packet size worth of data, but allow
561  * the transfer to return that it did not run out of fifo space
562  * doing it.
563  */
564  if (to_write > hs_ep->ep.maxpacket) {
565  to_write = hs_ep->ep.maxpacket;
566 
567  s3c_hsotg_en_gsint(hsotg,
568  periodic ? GINTSTS_PTxFEmp :
570  }
571 
572  /* see if we can write data */
573 
574  if (to_write > can_write) {
575  to_write = can_write;
576  pkt_round = to_write % hs_ep->ep.maxpacket;
577 
578  /*
579  * Round the write down to an
580  * exact number of packets.
581  *
582  * Note, we do not currently check to see if we can ever
583  * write a full packet or not to the FIFO.
584  */
585 
586  if (pkt_round)
587  to_write -= pkt_round;
588 
589  /*
590  * enable correct FIFO interrupt to alert us when there
591  * is more room left.
592  */
593 
594  s3c_hsotg_en_gsint(hsotg,
595  periodic ? GINTSTS_PTxFEmp :
597  }
598 
599  dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
600  to_write, hs_req->req.length, can_write, buf_pos);
601 
602  if (to_write <= 0)
603  return -ENOSPC;
604 
605  hs_req->req.actual = buf_pos + to_write;
606  hs_ep->total_data += to_write;
607 
608  if (periodic)
609  hs_ep->fifo_load += to_write;
610 
611  to_write = DIV_ROUND_UP(to_write, 4);
612  data = hs_req->req.buf + buf_pos;
613 
614  writesl(hsotg->regs + EPFIFO(hs_ep->index), data, to_write);
615 
616  return (to_write >= can_write) ? -ENOSPC : 0;
617 }
618 
626 static unsigned get_ep_limit(struct s3c_hsotg_ep *hs_ep)
627 {
628  int index = hs_ep->index;
629  unsigned maxsize;
630  unsigned maxpkt;
631 
632  if (index != 0) {
633  maxsize = DxEPTSIZ_XferSize_LIMIT + 1;
634  maxpkt = DxEPTSIZ_PktCnt_LIMIT + 1;
635  } else {
636  maxsize = 64+64;
637  if (hs_ep->dir_in)
638  maxpkt = DIEPTSIZ0_PktCnt_LIMIT + 1;
639  else
640  maxpkt = 2;
641  }
642 
643  /* we made the constant loading easier above by using +1 */
644  maxpkt--;
645  maxsize--;
646 
647  /*
648  * constrain by packet count if maxpkts*pktsize is greater
649  * than the length register size.
650  */
651 
652  if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
653  maxsize = maxpkt * hs_ep->ep.maxpacket;
654 
655  return maxsize;
656 }
657 
668 static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
669  struct s3c_hsotg_ep *hs_ep,
670  struct s3c_hsotg_req *hs_req,
671  bool continuing)
672 {
673  struct usb_request *ureq = &hs_req->req;
674  int index = hs_ep->index;
675  int dir_in = hs_ep->dir_in;
676  u32 epctrl_reg;
677  u32 epsize_reg;
678  u32 epsize;
679  u32 ctrl;
680  unsigned length;
681  unsigned packets;
682  unsigned maxreq;
683 
684  if (index != 0) {
685  if (hs_ep->req && !continuing) {
686  dev_err(hsotg->dev, "%s: active request\n", __func__);
687  WARN_ON(1);
688  return;
689  } else if (hs_ep->req != hs_req && continuing) {
690  dev_err(hsotg->dev,
691  "%s: continue different req\n", __func__);
692  WARN_ON(1);
693  return;
694  }
695  }
696 
697  epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
698  epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
699 
700  dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
701  __func__, readl(hsotg->regs + epctrl_reg), index,
702  hs_ep->dir_in ? "in" : "out");
703 
704  /* If endpoint is stalled, we will restart request later */
705  ctrl = readl(hsotg->regs + epctrl_reg);
706 
707  if (ctrl & DxEPCTL_Stall) {
708  dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
709  return;
710  }
711 
712  length = ureq->length - ureq->actual;
713  dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
714  ureq->length, ureq->actual);
715  if (0)
716  dev_dbg(hsotg->dev,
717  "REQ buf %p len %d dma 0x%08x noi=%d zp=%d snok=%d\n",
718  ureq->buf, length, ureq->dma,
719  ureq->no_interrupt, ureq->zero, ureq->short_not_ok);
720 
721  maxreq = get_ep_limit(hs_ep);
722  if (length > maxreq) {
723  int round = maxreq % hs_ep->ep.maxpacket;
724 
725  dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
726  __func__, length, maxreq, round);
727 
728  /* round down to multiple of packets */
729  if (round)
730  maxreq -= round;
731 
732  length = maxreq;
733  }
734 
735  if (length)
736  packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
737  else
738  packets = 1; /* send one packet if length is zero. */
739 
740  if (dir_in && index != 0)
741  epsize = DxEPTSIZ_MC(1);
742  else
743  epsize = 0;
744 
745  if (index != 0 && ureq->zero) {
746  /*
747  * test for the packets being exactly right for the
748  * transfer
749  */
750 
751  if (length == (packets * hs_ep->ep.maxpacket))
752  packets++;
753  }
754 
755  epsize |= DxEPTSIZ_PktCnt(packets);
756  epsize |= DxEPTSIZ_XferSize(length);
757 
758  dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
759  __func__, packets, length, ureq->length, epsize, epsize_reg);
760 
761  /* store the request as the current one we're doing */
762  hs_ep->req = hs_req;
763 
764  /* write size / packets */
765  writel(epsize, hsotg->regs + epsize_reg);
766 
767  if (using_dma(hsotg) && !continuing) {
768  unsigned int dma_reg;
769 
770  /*
771  * write DMA address to control register, buffer already
772  * synced by s3c_hsotg_ep_queue().
773  */
774 
775  dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
776  writel(ureq->dma, hsotg->regs + dma_reg);
777 
778  dev_dbg(hsotg->dev, "%s: 0x%08x => 0x%08x\n",
779  __func__, ureq->dma, dma_reg);
780  }
781 
782  ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
783  ctrl |= DxEPCTL_USBActEp;
784 
785  dev_dbg(hsotg->dev, "setup req:%d\n", hsotg->setup);
786 
787  /* For Setup request do not clear NAK */
788  if (hsotg->setup && index == 0)
789  hsotg->setup = 0;
790  else
791  ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
792 
793 
794  dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
795  writel(ctrl, hsotg->regs + epctrl_reg);
796 
797  /*
798  * set these, it seems that DMA support increments past the end
799  * of the packet buffer so we need to calculate the length from
800  * this information.
801  */
802  hs_ep->size_loaded = length;
803  hs_ep->last_load = ureq->actual;
804 
805  if (dir_in && !using_dma(hsotg)) {
806  /* set these anyway, we may need them for non-periodic in */
807  hs_ep->fifo_load = 0;
808 
809  s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
810  }
811 
812  /*
813  * clear the INTknTXFEmpMsk when we start request, more as a aide
814  * to debugging to see what is going on.
815  */
816  if (dir_in)
818  hsotg->regs + DIEPINT(index));
819 
820  /*
821  * Note, trying to clear the NAK here causes problems with transmit
822  * on the S3C6400 ending up with the TXFIFO becoming full.
823  */
824 
825  /* check ep is enabled */
826  if (!(readl(hsotg->regs + epctrl_reg) & DxEPCTL_EPEna))
827  dev_warn(hsotg->dev,
828  "ep%d: failed to become enabled (DxEPCTL=0x%08x)?\n",
829  index, readl(hsotg->regs + epctrl_reg));
830 
831  dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n",
832  __func__, readl(hsotg->regs + epctrl_reg));
833 }
834 
847 static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
848  struct s3c_hsotg_ep *hs_ep,
849  struct usb_request *req)
850 {
851  enum dma_data_direction dir;
852  struct s3c_hsotg_req *hs_req = our_req(req);
853 
854  dir = hs_ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
855 
856  /* if the length is zero, ignore the DMA data */
857  if (hs_req->req.length == 0)
858  return 0;
859 
860  if (req->dma == DMA_ADDR_INVALID) {
861  dma_addr_t dma;
862 
863  dma = dma_map_single(hsotg->dev, req->buf, req->length, dir);
864 
865  if (unlikely(dma_mapping_error(hsotg->dev, dma)))
866  goto dma_error;
867 
868  if (dma & 3) {
869  dev_err(hsotg->dev, "%s: unaligned dma buffer\n",
870  __func__);
871 
872  dma_unmap_single(hsotg->dev, dma, req->length, dir);
873  return -EINVAL;
874  }
875 
876  hs_req->mapped = 1;
877  req->dma = dma;
878  } else {
879  dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
880  hs_req->mapped = 0;
881  }
882 
883  return 0;
884 
885 dma_error:
886  dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
887  __func__, req->buf, req->length);
888 
889  return -EIO;
890 }
891 
892 static int s3c_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
893  gfp_t gfp_flags)
894 {
895  struct s3c_hsotg_req *hs_req = our_req(req);
896  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
897  struct s3c_hsotg *hs = hs_ep->parent;
898  bool first;
899 
900  dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
901  ep->name, req, req->length, req->buf, req->no_interrupt,
902  req->zero, req->short_not_ok);
903 
904  /* initialise status of the request */
905  INIT_LIST_HEAD(&hs_req->queue);
906  req->actual = 0;
907  req->status = -EINPROGRESS;
908 
909  /* if we're using DMA, sync the buffers as necessary */
910  if (using_dma(hs)) {
911  int ret = s3c_hsotg_map_dma(hs, hs_ep, req);
912  if (ret)
913  return ret;
914  }
915 
916  first = list_empty(&hs_ep->queue);
917  list_add_tail(&hs_req->queue, &hs_ep->queue);
918 
919  if (first)
920  s3c_hsotg_start_req(hs, hs_ep, hs_req, false);
921 
922  return 0;
923 }
924 
925 static int s3c_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req,
926  gfp_t gfp_flags)
927 {
928  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
929  struct s3c_hsotg *hs = hs_ep->parent;
930  unsigned long flags = 0;
931  int ret = 0;
932 
933  spin_lock_irqsave(&hs->lock, flags);
934  ret = s3c_hsotg_ep_queue(ep, req, gfp_flags);
935  spin_unlock_irqrestore(&hs->lock, flags);
936 
937  return ret;
938 }
939 
940 static void s3c_hsotg_ep_free_request(struct usb_ep *ep,
941  struct usb_request *req)
942 {
943  struct s3c_hsotg_req *hs_req = our_req(req);
944 
945  kfree(hs_req);
946 }
947 
956 static void s3c_hsotg_complete_oursetup(struct usb_ep *ep,
957  struct usb_request *req)
958 {
959  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
960  struct s3c_hsotg *hsotg = hs_ep->parent;
961 
962  dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
963 
964  s3c_hsotg_ep_free_request(ep, req);
965 }
966 
975 static struct s3c_hsotg_ep *ep_from_windex(struct s3c_hsotg *hsotg,
976  u32 windex)
977 {
978  struct s3c_hsotg_ep *ep = &hsotg->eps[windex & 0x7F];
979  int dir = (windex & USB_DIR_IN) ? 1 : 0;
980  int idx = windex & 0x7F;
981 
982  if (windex >= 0x100)
983  return NULL;
984 
985  if (idx > hsotg->num_of_eps)
986  return NULL;
987 
988  if (idx && ep->dir_in != dir)
989  return NULL;
990 
991  return ep;
992 }
993 
1004 static int s3c_hsotg_send_reply(struct s3c_hsotg *hsotg,
1005  struct s3c_hsotg_ep *ep,
1006  void *buff,
1007  int length)
1008 {
1009  struct usb_request *req;
1010  int ret;
1011 
1012  dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1013 
1014  req = s3c_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1015  hsotg->ep0_reply = req;
1016  if (!req) {
1017  dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1018  return -ENOMEM;
1019  }
1020 
1021  req->buf = hsotg->ep0_buff;
1022  req->length = length;
1023  req->zero = 1; /* always do zero-length final transfer */
1024  req->complete = s3c_hsotg_complete_oursetup;
1025 
1026  if (length)
1027  memcpy(req->buf, buff, length);
1028  else
1029  ep->sent_zlp = 1;
1030 
1031  ret = s3c_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1032  if (ret) {
1033  dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1034  return ret;
1035  }
1036 
1037  return 0;
1038 }
1039 
1045 static int s3c_hsotg_process_req_status(struct s3c_hsotg *hsotg,
1046  struct usb_ctrlrequest *ctrl)
1047 {
1048  struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1049  struct s3c_hsotg_ep *ep;
1050  __le16 reply;
1051  int ret;
1052 
1053  dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1054 
1055  if (!ep0->dir_in) {
1056  dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1057  return -EINVAL;
1058  }
1059 
1060  switch (ctrl->bRequestType & USB_RECIP_MASK) {
1061  case USB_RECIP_DEVICE:
1062  reply = cpu_to_le16(0); /* bit 0 => self powered,
1063  * bit 1 => remote wakeup */
1064  break;
1065 
1066  case USB_RECIP_INTERFACE:
1067  /* currently, the data result should be zero */
1068  reply = cpu_to_le16(0);
1069  break;
1070 
1071  case USB_RECIP_ENDPOINT:
1072  ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1073  if (!ep)
1074  return -ENOENT;
1075 
1076  reply = cpu_to_le16(ep->halted ? 1 : 0);
1077  break;
1078 
1079  default:
1080  return 0;
1081  }
1082 
1083  if (le16_to_cpu(ctrl->wLength) != 2)
1084  return -EINVAL;
1085 
1086  ret = s3c_hsotg_send_reply(hsotg, ep0, &reply, 2);
1087  if (ret) {
1088  dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1089  return ret;
1090  }
1091 
1092  return 1;
1093 }
1094 
1095 static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value);
1096 
1103 static struct s3c_hsotg_req *get_ep_head(struct s3c_hsotg_ep *hs_ep)
1104 {
1105  if (list_empty(&hs_ep->queue))
1106  return NULL;
1107 
1108  return list_first_entry(&hs_ep->queue, struct s3c_hsotg_req, queue);
1109 }
1110 
1116 static int s3c_hsotg_process_req_feature(struct s3c_hsotg *hsotg,
1117  struct usb_ctrlrequest *ctrl)
1118 {
1119  struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1120  struct s3c_hsotg_req *hs_req;
1121  bool restart;
1122  bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1123  struct s3c_hsotg_ep *ep;
1124  int ret;
1125 
1126  dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1127  __func__, set ? "SET" : "CLEAR");
1128 
1129  if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
1130  ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1131  if (!ep) {
1132  dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1133  __func__, le16_to_cpu(ctrl->wIndex));
1134  return -ENOENT;
1135  }
1136 
1137  switch (le16_to_cpu(ctrl->wValue)) {
1138  case USB_ENDPOINT_HALT:
1139  s3c_hsotg_ep_sethalt(&ep->ep, set);
1140 
1141  ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1142  if (ret) {
1143  dev_err(hsotg->dev,
1144  "%s: failed to send reply\n", __func__);
1145  return ret;
1146  }
1147 
1148  if (!set) {
1149  /*
1150  * If we have request in progress,
1151  * then complete it
1152  */
1153  if (ep->req) {
1154  hs_req = ep->req;
1155  ep->req = NULL;
1156  list_del_init(&hs_req->queue);
1157  hs_req->req.complete(&ep->ep,
1158  &hs_req->req);
1159  }
1160 
1161  /* If we have pending request, then start it */
1162  restart = !list_empty(&ep->queue);
1163  if (restart) {
1164  hs_req = get_ep_head(ep);
1165  s3c_hsotg_start_req(hsotg, ep,
1166  hs_req, false);
1167  }
1168  }
1169 
1170  break;
1171 
1172  default:
1173  return -ENOENT;
1174  }
1175  } else
1176  return -ENOENT; /* currently only deal with endpoint */
1177 
1178  return 1;
1179 }
1180 
1190 static void s3c_hsotg_process_control(struct s3c_hsotg *hsotg,
1191  struct usb_ctrlrequest *ctrl)
1192 {
1193  struct s3c_hsotg_ep *ep0 = &hsotg->eps[0];
1194  int ret = 0;
1195  u32 dcfg;
1196 
1197  ep0->sent_zlp = 0;
1198 
1199  dev_dbg(hsotg->dev, "ctrl Req=%02x, Type=%02x, V=%04x, L=%04x\n",
1200  ctrl->bRequest, ctrl->bRequestType,
1201  ctrl->wValue, ctrl->wLength);
1202 
1203  /*
1204  * record the direction of the request, for later use when enquing
1205  * packets onto EP0.
1206  */
1207 
1208  ep0->dir_in = (ctrl->bRequestType & USB_DIR_IN) ? 1 : 0;
1209  dev_dbg(hsotg->dev, "ctrl: dir_in=%d\n", ep0->dir_in);
1210 
1211  /*
1212  * if we've no data with this request, then the last part of the
1213  * transaction is going to implicitly be IN.
1214  */
1215  if (ctrl->wLength == 0)
1216  ep0->dir_in = 1;
1217 
1218  if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1219  switch (ctrl->bRequest) {
1220  case USB_REQ_SET_ADDRESS:
1221  dcfg = readl(hsotg->regs + DCFG);
1222  dcfg &= ~DCFG_DevAddr_MASK;
1223  dcfg |= ctrl->wValue << DCFG_DevAddr_SHIFT;
1224  writel(dcfg, hsotg->regs + DCFG);
1225 
1226  dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1227 
1228  ret = s3c_hsotg_send_reply(hsotg, ep0, NULL, 0);
1229  return;
1230 
1231  case USB_REQ_GET_STATUS:
1232  ret = s3c_hsotg_process_req_status(hsotg, ctrl);
1233  break;
1234 
1235  case USB_REQ_CLEAR_FEATURE:
1236  case USB_REQ_SET_FEATURE:
1237  ret = s3c_hsotg_process_req_feature(hsotg, ctrl);
1238  break;
1239  }
1240  }
1241 
1242  /* as a fallback, try delivering it to the driver to deal with */
1243 
1244  if (ret == 0 && hsotg->driver) {
1245  ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1246  if (ret < 0)
1247  dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1248  }
1249 
1250  /*
1251  * the request is either unhandlable, or is not formatted correctly
1252  * so respond with a STALL for the status stage to indicate failure.
1253  */
1254 
1255  if (ret < 0) {
1256  u32 reg;
1257  u32 ctrl;
1258 
1259  dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1260  reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1261 
1262  /*
1263  * DxEPCTL_Stall will be cleared by EP once it has
1264  * taken effect, so no need to clear later.
1265  */
1266 
1267  ctrl = readl(hsotg->regs + reg);
1268  ctrl |= DxEPCTL_Stall;
1269  ctrl |= DxEPCTL_CNAK;
1270  writel(ctrl, hsotg->regs + reg);
1271 
1272  dev_dbg(hsotg->dev,
1273  "written DxEPCTL=0x%08x to %08x (DxEPCTL=0x%08x)\n",
1274  ctrl, reg, readl(hsotg->regs + reg));
1275 
1276  /*
1277  * don't believe we need to anything more to get the EP
1278  * to reply with a STALL packet
1279  */
1280  }
1281 }
1282 
1283 static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg);
1284 
1293 static void s3c_hsotg_complete_setup(struct usb_ep *ep,
1294  struct usb_request *req)
1295 {
1296  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
1297  struct s3c_hsotg *hsotg = hs_ep->parent;
1298 
1299  if (req->status < 0) {
1300  dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
1301  return;
1302  }
1303 
1304  if (req->actual == 0)
1305  s3c_hsotg_enqueue_setup(hsotg);
1306  else
1307  s3c_hsotg_process_control(hsotg, req->buf);
1308 }
1309 
1317 static void s3c_hsotg_enqueue_setup(struct s3c_hsotg *hsotg)
1318 {
1319  struct usb_request *req = hsotg->ctrl_req;
1320  struct s3c_hsotg_req *hs_req = our_req(req);
1321  int ret;
1322 
1323  dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
1324 
1325  req->zero = 0;
1326  req->length = 8;
1327  req->buf = hsotg->ctrl_buff;
1328  req->complete = s3c_hsotg_complete_setup;
1329 
1330  if (!list_empty(&hs_req->queue)) {
1331  dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
1332  return;
1333  }
1334 
1335  hsotg->eps[0].dir_in = 0;
1336 
1337  ret = s3c_hsotg_ep_queue(&hsotg->eps[0].ep, req, GFP_ATOMIC);
1338  if (ret < 0) {
1339  dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
1340  /*
1341  * Don't think there's much we can do other than watch the
1342  * driver fail.
1343  */
1344  }
1345 }
1346 
1360 static void s3c_hsotg_complete_request(struct s3c_hsotg *hsotg,
1361  struct s3c_hsotg_ep *hs_ep,
1362  struct s3c_hsotg_req *hs_req,
1363  int result)
1364 {
1365  bool restart;
1366 
1367  if (!hs_req) {
1368  dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
1369  return;
1370  }
1371 
1372  dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
1373  hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
1374 
1375  /*
1376  * only replace the status if we've not already set an error
1377  * from a previous transaction
1378  */
1379 
1380  if (hs_req->req.status == -EINPROGRESS)
1381  hs_req->req.status = result;
1382 
1383  hs_ep->req = NULL;
1384  list_del_init(&hs_req->queue);
1385 
1386  if (using_dma(hsotg))
1387  s3c_hsotg_unmap_dma(hsotg, hs_ep, hs_req);
1388 
1389  /*
1390  * call the complete request with the locks off, just in case the
1391  * request tries to queue more work for this endpoint.
1392  */
1393 
1394  if (hs_req->req.complete) {
1395  spin_unlock(&hsotg->lock);
1396  hs_req->req.complete(&hs_ep->ep, &hs_req->req);
1397  spin_lock(&hsotg->lock);
1398  }
1399 
1400  /*
1401  * Look to see if there is anything else to do. Note, the completion
1402  * of the previous request may have caused a new request to be started
1403  * so be careful when doing this.
1404  */
1405 
1406  if (!hs_ep->req && result >= 0) {
1407  restart = !list_empty(&hs_ep->queue);
1408  if (restart) {
1409  hs_req = get_ep_head(hs_ep);
1410  s3c_hsotg_start_req(hsotg, hs_ep, hs_req, false);
1411  }
1412  }
1413 }
1414 
1425 static void s3c_hsotg_rx_data(struct s3c_hsotg *hsotg, int ep_idx, int size)
1426 {
1427  struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep_idx];
1428  struct s3c_hsotg_req *hs_req = hs_ep->req;
1429  void __iomem *fifo = hsotg->regs + EPFIFO(ep_idx);
1430  int to_read;
1431  int max_req;
1432  int read_ptr;
1433 
1434 
1435  if (!hs_req) {
1436  u32 epctl = readl(hsotg->regs + DOEPCTL(ep_idx));
1437  int ptr;
1438 
1439  dev_warn(hsotg->dev,
1440  "%s: FIFO %d bytes on ep%d but no req (DxEPCTl=0x%08x)\n",
1441  __func__, size, ep_idx, epctl);
1442 
1443  /* dump the data from the FIFO, we've nothing we can do */
1444  for (ptr = 0; ptr < size; ptr += 4)
1445  (void)readl(fifo);
1446 
1447  return;
1448  }
1449 
1450  to_read = size;
1451  read_ptr = hs_req->req.actual;
1452  max_req = hs_req->req.length - read_ptr;
1453 
1454  dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
1455  __func__, to_read, max_req, read_ptr, hs_req->req.length);
1456 
1457  if (to_read > max_req) {
1458  /*
1459  * more data appeared than we where willing
1460  * to deal with in this request.
1461  */
1462 
1463  /* currently we don't deal this */
1464  WARN_ON_ONCE(1);
1465  }
1466 
1467  hs_ep->total_data += to_read;
1468  hs_req->req.actual += to_read;
1469  to_read = DIV_ROUND_UP(to_read, 4);
1470 
1471  /*
1472  * note, we might over-write the buffer end by 3 bytes depending on
1473  * alignment of the data.
1474  */
1475  readsl(fifo, hs_req->req.buf + read_ptr, to_read);
1476 }
1477 
1490 static void s3c_hsotg_send_zlp(struct s3c_hsotg *hsotg,
1491  struct s3c_hsotg_req *req)
1492 {
1493  u32 ctrl;
1494 
1495  if (!req) {
1496  dev_warn(hsotg->dev, "%s: no request?\n", __func__);
1497  return;
1498  }
1499 
1500  if (req->req.length == 0) {
1501  hsotg->eps[0].sent_zlp = 1;
1502  s3c_hsotg_enqueue_setup(hsotg);
1503  return;
1504  }
1505 
1506  hsotg->eps[0].dir_in = 1;
1507  hsotg->eps[0].sent_zlp = 1;
1508 
1509  dev_dbg(hsotg->dev, "sending zero-length packet\n");
1510 
1511  /* issue a zero-sized packet to terminate this */
1513  DxEPTSIZ_XferSize(0), hsotg->regs + DIEPTSIZ(0));
1514 
1515  ctrl = readl(hsotg->regs + DIEPCTL0);
1516  ctrl |= DxEPCTL_CNAK; /* clear NAK set by core */
1517  ctrl |= DxEPCTL_EPEna; /* ensure ep enabled */
1518  ctrl |= DxEPCTL_USBActEp;
1519  writel(ctrl, hsotg->regs + DIEPCTL0);
1520 }
1521 
1532 static void s3c_hsotg_handle_outdone(struct s3c_hsotg *hsotg,
1533  int epnum, bool was_setup)
1534 {
1535  u32 epsize = readl(hsotg->regs + DOEPTSIZ(epnum));
1536  struct s3c_hsotg_ep *hs_ep = &hsotg->eps[epnum];
1537  struct s3c_hsotg_req *hs_req = hs_ep->req;
1538  struct usb_request *req = &hs_req->req;
1539  unsigned size_left = DxEPTSIZ_XferSize_GET(epsize);
1540  int result = 0;
1541 
1542  if (!hs_req) {
1543  dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
1544  return;
1545  }
1546 
1547  if (using_dma(hsotg)) {
1548  unsigned size_done;
1549 
1550  /*
1551  * Calculate the size of the transfer by checking how much
1552  * is left in the endpoint size register and then working it
1553  * out from the amount we loaded for the transfer.
1554  *
1555  * We need to do this as DMA pointers are always 32bit aligned
1556  * so may overshoot/undershoot the transfer.
1557  */
1558 
1559  size_done = hs_ep->size_loaded - size_left;
1560  size_done += hs_ep->last_load;
1561 
1562  req->actual = size_done;
1563  }
1564 
1565  /* if there is more request to do, schedule new transfer */
1566  if (req->actual < req->length && size_left == 0) {
1567  s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1568  return;
1569  } else if (epnum == 0) {
1570  /*
1571  * After was_setup = 1 =>
1572  * set CNAK for non Setup requests
1573  */
1574  hsotg->setup = was_setup ? 0 : 1;
1575  }
1576 
1577  if (req->actual < req->length && req->short_not_ok) {
1578  dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
1579  __func__, req->actual, req->length);
1580 
1581  /*
1582  * todo - what should we return here? there's no one else
1583  * even bothering to check the status.
1584  */
1585  }
1586 
1587  if (epnum == 0) {
1588  /*
1589  * Condition req->complete != s3c_hsotg_complete_setup says:
1590  * send ZLP when we have an asynchronous request from gadget
1591  */
1592  if (!was_setup && req->complete != s3c_hsotg_complete_setup)
1593  s3c_hsotg_send_zlp(hsotg, hs_req);
1594  }
1595 
1596  s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
1597 }
1598 
1605 static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
1606 {
1607  u32 dsts;
1608 
1609  dsts = readl(hsotg->regs + DSTS);
1610  dsts &= DSTS_SOFFN_MASK;
1611  dsts >>= DSTS_SOFFN_SHIFT;
1612 
1613  return dsts;
1614 }
1615 
1632 static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
1633 {
1634  u32 grxstsr = readl(hsotg->regs + GRXSTSP);
1635  u32 epnum, status, size;
1636 
1637  WARN_ON(using_dma(hsotg));
1638 
1639  epnum = grxstsr & GRXSTS_EPNum_MASK;
1640  status = grxstsr & GRXSTS_PktSts_MASK;
1641 
1642  size = grxstsr & GRXSTS_ByteCnt_MASK;
1643  size >>= GRXSTS_ByteCnt_SHIFT;
1644 
1645  if (1)
1646  dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
1647  __func__, grxstsr, size, epnum);
1648 
1649 #define __status(x) ((x) >> GRXSTS_PktSts_SHIFT)
1650 
1651  switch (status >> GRXSTS_PktSts_SHIFT) {
1653  dev_dbg(hsotg->dev, "GlobalOutNAK\n");
1654  break;
1655 
1657  dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
1658  s3c_hsotg_read_frameno(hsotg));
1659 
1660  if (!using_dma(hsotg))
1661  s3c_hsotg_handle_outdone(hsotg, epnum, false);
1662  break;
1663 
1665  dev_dbg(hsotg->dev,
1666  "SetupDone (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1667  s3c_hsotg_read_frameno(hsotg),
1668  readl(hsotg->regs + DOEPCTL(0)));
1669 
1670  s3c_hsotg_handle_outdone(hsotg, epnum, true);
1671  break;
1672 
1674  s3c_hsotg_rx_data(hsotg, epnum, size);
1675  break;
1676 
1678  dev_dbg(hsotg->dev,
1679  "SetupRX (Frame=0x%08x, DOPEPCTL=0x%08x)\n",
1680  s3c_hsotg_read_frameno(hsotg),
1681  readl(hsotg->regs + DOEPCTL(0)));
1682 
1683  s3c_hsotg_rx_data(hsotg, epnum, size);
1684  break;
1685 
1686  default:
1687  dev_warn(hsotg->dev, "%s: unknown status %08x\n",
1688  __func__, grxstsr);
1689 
1690  s3c_hsotg_dump(hsotg);
1691  break;
1692  }
1693 }
1694 
1699 static u32 s3c_hsotg_ep0_mps(unsigned int mps)
1700 {
1701  switch (mps) {
1702  case 64:
1703  return D0EPCTL_MPS_64;
1704  case 32:
1705  return D0EPCTL_MPS_32;
1706  case 16:
1707  return D0EPCTL_MPS_16;
1708  case 8:
1709  return D0EPCTL_MPS_8;
1710  }
1711 
1712  /* bad max packet size, warn and return invalid result */
1713  WARN_ON(1);
1714  return (u32)-1;
1715 }
1716 
1726 static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
1727  unsigned int ep, unsigned int mps)
1728 {
1729  struct s3c_hsotg_ep *hs_ep = &hsotg->eps[ep];
1730  void __iomem *regs = hsotg->regs;
1731  u32 mpsval;
1732  u32 reg;
1733 
1734  if (ep == 0) {
1735  /* EP0 is a special case */
1736  mpsval = s3c_hsotg_ep0_mps(mps);
1737  if (mpsval > 3)
1738  goto bad_mps;
1739  } else {
1740  if (mps >= DxEPCTL_MPS_LIMIT+1)
1741  goto bad_mps;
1742 
1743  mpsval = mps;
1744  }
1745 
1746  hs_ep->ep.maxpacket = mps;
1747 
1748  /*
1749  * update both the in and out endpoint controldir_ registers, even
1750  * if one of the directions may not be in use.
1751  */
1752 
1753  reg = readl(regs + DIEPCTL(ep));
1754  reg &= ~DxEPCTL_MPS_MASK;
1755  reg |= mpsval;
1756  writel(reg, regs + DIEPCTL(ep));
1757 
1758  if (ep) {
1759  reg = readl(regs + DOEPCTL(ep));
1760  reg &= ~DxEPCTL_MPS_MASK;
1761  reg |= mpsval;
1762  writel(reg, regs + DOEPCTL(ep));
1763  }
1764 
1765  return;
1766 
1767 bad_mps:
1768  dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
1769 }
1770 
1776 static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx)
1777 {
1778  int timeout;
1779  int val;
1780 
1782  hsotg->regs + GRSTCTL);
1783 
1784  /* wait until the fifo is flushed */
1785  timeout = 100;
1786 
1787  while (1) {
1788  val = readl(hsotg->regs + GRSTCTL);
1789 
1790  if ((val & (GRSTCTL_TxFFlsh)) == 0)
1791  break;
1792 
1793  if (--timeout == 0) {
1794  dev_err(hsotg->dev,
1795  "%s: timeout flushing fifo (GRSTCTL=%08x)\n",
1796  __func__, val);
1797  }
1798 
1799  udelay(1);
1800  }
1801 }
1802 
1811 static int s3c_hsotg_trytx(struct s3c_hsotg *hsotg,
1812  struct s3c_hsotg_ep *hs_ep)
1813 {
1814  struct s3c_hsotg_req *hs_req = hs_ep->req;
1815 
1816  if (!hs_ep->dir_in || !hs_req)
1817  return 0;
1818 
1819  if (hs_req->req.actual < hs_req->req.length) {
1820  dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
1821  hs_ep->index);
1822  return s3c_hsotg_write_fifo(hsotg, hs_ep, hs_req);
1823  }
1824 
1825  return 0;
1826 }
1827 
1836 static void s3c_hsotg_complete_in(struct s3c_hsotg *hsotg,
1837  struct s3c_hsotg_ep *hs_ep)
1838 {
1839  struct s3c_hsotg_req *hs_req = hs_ep->req;
1840  u32 epsize = readl(hsotg->regs + DIEPTSIZ(hs_ep->index));
1841  int size_left, size_done;
1842 
1843  if (!hs_req) {
1844  dev_dbg(hsotg->dev, "XferCompl but no req\n");
1845  return;
1846  }
1847 
1848  /* Finish ZLP handling for IN EP0 transactions */
1849  if (hsotg->eps[0].sent_zlp) {
1850  dev_dbg(hsotg->dev, "zlp packet received\n");
1851  s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1852  return;
1853  }
1854 
1855  /*
1856  * Calculate the size of the transfer by checking how much is left
1857  * in the endpoint size register and then working it out from
1858  * the amount we loaded for the transfer.
1859  *
1860  * We do this even for DMA, as the transfer may have incremented
1861  * past the end of the buffer (DMA transfers are always 32bit
1862  * aligned).
1863  */
1864 
1865  size_left = DxEPTSIZ_XferSize_GET(epsize);
1866 
1867  size_done = hs_ep->size_loaded - size_left;
1868  size_done += hs_ep->last_load;
1869 
1870  if (hs_req->req.actual != size_done)
1871  dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
1872  __func__, hs_req->req.actual, size_done);
1873 
1874  hs_req->req.actual = size_done;
1875  dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
1876  hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
1877 
1878  /*
1879  * Check if dealing with Maximum Packet Size(MPS) IN transfer at EP0
1880  * When sent data is a multiple MPS size (e.g. 64B ,128B ,192B
1881  * ,256B ... ), after last MPS sized packet send IN ZLP packet to
1882  * inform the host that no more data is available.
1883  * The state of req.zero member is checked to be sure that the value to
1884  * send is smaller than wValue expected from host.
1885  * Check req.length to NOT send another ZLP when the current one is
1886  * under completion (the one for which this completion has been called).
1887  */
1888  if (hs_req->req.length && hs_ep->index == 0 && hs_req->req.zero &&
1889  hs_req->req.length == hs_req->req.actual &&
1890  !(hs_req->req.length % hs_ep->ep.maxpacket)) {
1891 
1892  dev_dbg(hsotg->dev, "ep0 zlp IN packet sent\n");
1893  s3c_hsotg_send_zlp(hsotg, hs_req);
1894 
1895  return;
1896  }
1897 
1898  if (!size_left && hs_req->req.actual < hs_req->req.length) {
1899  dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
1900  s3c_hsotg_start_req(hsotg, hs_ep, hs_req, true);
1901  } else
1902  s3c_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
1903 }
1904 
1913 static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
1914  int dir_in)
1915 {
1916  struct s3c_hsotg_ep *hs_ep = &hsotg->eps[idx];
1917  u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
1918  u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
1919  u32 epsiz_reg = dir_in ? DIEPTSIZ(idx) : DOEPTSIZ(idx);
1920  u32 ints;
1921 
1922  ints = readl(hsotg->regs + epint_reg);
1923 
1924  /* Clear endpoint interrupts */
1925  writel(ints, hsotg->regs + epint_reg);
1926 
1927  dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
1928  __func__, idx, dir_in ? "in" : "out", ints);
1929 
1930  if (ints & DxEPINT_XferCompl) {
1931  dev_dbg(hsotg->dev,
1932  "%s: XferCompl: DxEPCTL=0x%08x, DxEPTSIZ=%08x\n",
1933  __func__, readl(hsotg->regs + epctl_reg),
1934  readl(hsotg->regs + epsiz_reg));
1935 
1936  /*
1937  * we get OutDone from the FIFO, so we only need to look
1938  * at completing IN requests here
1939  */
1940  if (dir_in) {
1941  s3c_hsotg_complete_in(hsotg, hs_ep);
1942 
1943  if (idx == 0 && !hs_ep->req)
1944  s3c_hsotg_enqueue_setup(hsotg);
1945  } else if (using_dma(hsotg)) {
1946  /*
1947  * We're using DMA, we need to fire an OutDone here
1948  * as we ignore the RXFIFO.
1949  */
1950 
1951  s3c_hsotg_handle_outdone(hsotg, idx, false);
1952  }
1953  }
1954 
1955  if (ints & DxEPINT_EPDisbld) {
1956  dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
1957 
1958  if (dir_in) {
1959  int epctl = readl(hsotg->regs + epctl_reg);
1960 
1961  s3c_hsotg_txfifo_flush(hsotg, idx);
1962 
1963  if ((epctl & DxEPCTL_Stall) &&
1964  (epctl & DxEPCTL_EPType_Bulk)) {
1965  int dctl = readl(hsotg->regs + DCTL);
1966 
1967  dctl |= DCTL_CGNPInNAK;
1968  writel(dctl, hsotg->regs + DCTL);
1969  }
1970  }
1971  }
1972 
1973  if (ints & DxEPINT_AHBErr)
1974  dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
1975 
1976  if (ints & DxEPINT_Setup) { /* Setup or Timeout */
1977  dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
1978 
1979  if (using_dma(hsotg) && idx == 0) {
1980  /*
1981  * this is the notification we've received a
1982  * setup packet. In non-DMA mode we'd get this
1983  * from the RXFIFO, instead we need to process
1984  * the setup here.
1985  */
1986 
1987  if (dir_in)
1988  WARN_ON_ONCE(1);
1989  else
1990  s3c_hsotg_handle_outdone(hsotg, 0, true);
1991  }
1992  }
1993 
1994  if (ints & DxEPINT_Back2BackSetup)
1995  dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
1996 
1997  if (dir_in) {
1998  /* not sure if this is important, but we'll clear it anyway */
1999  if (ints & DIEPMSK_INTknTXFEmpMsk) {
2000  dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
2001  __func__, idx);
2002  }
2003 
2004  /* this probably means something bad is happening */
2005  if (ints & DIEPMSK_INTknEPMisMsk) {
2006  dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
2007  __func__, idx);
2008  }
2009 
2010  /* FIFO has space or is empty (see GAHBCFG) */
2011  if (hsotg->dedicated_fifos &&
2012  ints & DIEPMSK_TxFIFOEmpty) {
2013  dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
2014  __func__, idx);
2015  if (!using_dma(hsotg))
2016  s3c_hsotg_trytx(hsotg, hs_ep);
2017  }
2018  }
2019 }
2020 
2028 static void s3c_hsotg_irq_enumdone(struct s3c_hsotg *hsotg)
2029 {
2030  u32 dsts = readl(hsotg->regs + DSTS);
2031  int ep0_mps = 0, ep_mps;
2032 
2033  /*
2034  * This should signal the finish of the enumeration phase
2035  * of the USB handshaking, so we should now know what rate
2036  * we connected at.
2037  */
2038 
2039  dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
2040 
2041  /*
2042  * note, since we're limited by the size of transfer on EP0, and
2043  * it seems IN transfers must be a even number of packets we do
2044  * not advertise a 64byte MPS on EP0.
2045  */
2046 
2047  /* catch both EnumSpd_FS and EnumSpd_FS48 */
2048  switch (dsts & DSTS_EnumSpd_MASK) {
2049  case DSTS_EnumSpd_FS:
2050  case DSTS_EnumSpd_FS48:
2051  hsotg->gadget.speed = USB_SPEED_FULL;
2052  ep0_mps = EP0_MPS_LIMIT;
2053  ep_mps = 64;
2054  break;
2055 
2056  case DSTS_EnumSpd_HS:
2057  hsotg->gadget.speed = USB_SPEED_HIGH;
2058  ep0_mps = EP0_MPS_LIMIT;
2059  ep_mps = 512;
2060  break;
2061 
2062  case DSTS_EnumSpd_LS:
2063  hsotg->gadget.speed = USB_SPEED_LOW;
2064  /*
2065  * note, we don't actually support LS in this driver at the
2066  * moment, and the documentation seems to imply that it isn't
2067  * supported by the PHYs on some of the devices.
2068  */
2069  break;
2070  }
2071  dev_info(hsotg->dev, "new device is %s\n",
2072  usb_speed_string(hsotg->gadget.speed));
2073 
2074  /*
2075  * we should now know the maximum packet size for an
2076  * endpoint, so set the endpoints to a default value.
2077  */
2078 
2079  if (ep0_mps) {
2080  int i;
2081  s3c_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps);
2082  for (i = 1; i < hsotg->num_of_eps; i++)
2083  s3c_hsotg_set_ep_maxpacket(hsotg, i, ep_mps);
2084  }
2085 
2086  /* ensure after enumeration our EP0 is active */
2087 
2088  s3c_hsotg_enqueue_setup(hsotg);
2089 
2090  dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2091  readl(hsotg->regs + DIEPCTL0),
2092  readl(hsotg->regs + DOEPCTL0));
2093 }
2094 
2105 static void kill_all_requests(struct s3c_hsotg *hsotg,
2106  struct s3c_hsotg_ep *ep,
2107  int result, bool force)
2108 {
2109  struct s3c_hsotg_req *req, *treq;
2110 
2111  list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2112  /*
2113  * currently, we can't do much about an already
2114  * running request on an in endpoint
2115  */
2116 
2117  if (ep->req == req && ep->dir_in && !force)
2118  continue;
2119 
2120  s3c_hsotg_complete_request(hsotg, ep, req,
2121  result);
2122  }
2123 }
2124 
2125 #define call_gadget(_hs, _entry) \
2126  if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \
2127  (_hs)->driver && (_hs)->driver->_entry) { \
2128  spin_unlock(&_hs->lock); \
2129  (_hs)->driver->_entry(&(_hs)->gadget); \
2130  spin_lock(&_hs->lock); \
2131  }
2132 
2141 static void s3c_hsotg_disconnect(struct s3c_hsotg *hsotg)
2142 {
2143  unsigned ep;
2144 
2145  for (ep = 0; ep < hsotg->num_of_eps; ep++)
2146  kill_all_requests(hsotg, &hsotg->eps[ep], -ESHUTDOWN, true);
2147 
2148  call_gadget(hsotg, disconnect);
2149 }
2150 
2156 static void s3c_hsotg_irq_fifoempty(struct s3c_hsotg *hsotg, bool periodic)
2157 {
2158  struct s3c_hsotg_ep *ep;
2159  int epno, ret;
2160 
2161  /* look through for any more data to transmit */
2162 
2163  for (epno = 0; epno < hsotg->num_of_eps; epno++) {
2164  ep = &hsotg->eps[epno];
2165 
2166  if (!ep->dir_in)
2167  continue;
2168 
2169  if ((periodic && !ep->periodic) ||
2170  (!periodic && ep->periodic))
2171  continue;
2172 
2173  ret = s3c_hsotg_trytx(hsotg, ep);
2174  if (ret < 0)
2175  break;
2176  }
2177 }
2178 
2179 /* IRQ flags which will trigger a retry around the IRQ loop */
2180 #define IRQ_RETRY_MASK (GINTSTS_NPTxFEmp | \
2181  GINTSTS_PTxFEmp | \
2182  GINTSTS_RxFLvl)
2183 
2190 static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
2191 {
2192  int timeout;
2193  u32 grstctl;
2194 
2195  dev_dbg(hsotg->dev, "resetting core\n");
2196 
2197  /* issue soft reset */
2198  writel(GRSTCTL_CSftRst, hsotg->regs + GRSTCTL);
2199 
2200  timeout = 10000;
2201  do {
2202  grstctl = readl(hsotg->regs + GRSTCTL);
2203  } while ((grstctl & GRSTCTL_CSftRst) && timeout-- > 0);
2204 
2205  if (grstctl & GRSTCTL_CSftRst) {
2206  dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
2207  return -EINVAL;
2208  }
2209 
2210  timeout = 10000;
2211 
2212  while (1) {
2213  u32 grstctl = readl(hsotg->regs + GRSTCTL);
2214 
2215  if (timeout-- < 0) {
2216  dev_info(hsotg->dev,
2217  "%s: reset failed, GRSTCTL=%08x\n",
2218  __func__, grstctl);
2219  return -ETIMEDOUT;
2220  }
2221 
2222  if (!(grstctl & GRSTCTL_AHBIdle))
2223  continue;
2224 
2225  break; /* reset done */
2226  }
2227 
2228  dev_dbg(hsotg->dev, "reset successful\n");
2229  return 0;
2230 }
2231 
2238 static void s3c_hsotg_core_init(struct s3c_hsotg *hsotg)
2239 {
2240  s3c_hsotg_corereset(hsotg);
2241 
2242  /*
2243  * we must now enable ep0 ready for host detection and then
2244  * set configuration.
2245  */
2246 
2247  /* set the PLL on, remove the HNP/SRP and set the PHY */
2249  (0x5 << 10), hsotg->regs + GUSBCFG);
2250 
2251  s3c_hsotg_init_fifo(hsotg);
2252 
2253  __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
2254 
2255  writel(1 << 18 | DCFG_DevSpd_HS, hsotg->regs + DCFG);
2256 
2257  /* Clear any pending OTG interrupts */
2258  writel(0xffffffff, hsotg->regs + GOTGINT);
2259 
2260  /* Clear any pending interrupts */
2261  writel(0xffffffff, hsotg->regs + GINTSTS);
2262 
2268  hsotg->regs + GINTMSK);
2269 
2270  if (using_dma(hsotg))
2273  hsotg->regs + GAHBCFG);
2274  else
2276 
2277  /*
2278  * Enabling INTknTXFEmpMsk here seems to be a big mistake, we end
2279  * up being flooded with interrupts if the host is polling the
2280  * endpoint to try and read data.
2281  */
2282 
2283  writel(((hsotg->dedicated_fifos) ? DIEPMSK_TxFIFOEmpty : 0) |
2286  DIEPMSK_INTknEPMisMsk,
2287  hsotg->regs + DIEPMSK);
2288 
2289  /*
2290  * don't need XferCompl, we get that from RXFIFO in slave mode. In
2291  * DMA mode we may need this.
2292  */
2293  writel((using_dma(hsotg) ? (DIEPMSK_XferComplMsk |
2294  DIEPMSK_TimeOUTMsk) : 0) |
2297  hsotg->regs + DOEPMSK);
2298 
2299  writel(0, hsotg->regs + DAINTMSK);
2300 
2301  dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2302  readl(hsotg->regs + DIEPCTL0),
2303  readl(hsotg->regs + DOEPCTL0));
2304 
2305  /* enable in and out endpoint interrupts */
2306  s3c_hsotg_en_gsint(hsotg, GINTSTS_OEPInt | GINTSTS_IEPInt);
2307 
2308  /*
2309  * Enable the RXFIFO when in slave mode, as this is how we collect
2310  * the data. In DMA mode, we get events from the FIFO but also
2311  * things we cannot process, so do not use it.
2312  */
2313  if (!using_dma(hsotg))
2314  s3c_hsotg_en_gsint(hsotg, GINTSTS_RxFLvl);
2315 
2316  /* Enable interrupts for EP0 in and out */
2317  s3c_hsotg_ctrl_epint(hsotg, 0, 0, 1);
2318  s3c_hsotg_ctrl_epint(hsotg, 0, 1, 1);
2319 
2320  __orr32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
2321  udelay(10); /* see openiboot */
2322  __bic32(hsotg->regs + DCTL, DCTL_PWROnPrgDone);
2323 
2324  dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + DCTL));
2325 
2326  /*
2327  * DxEPCTL_USBActEp says RO in manual, but seems to be set by
2328  * writing to the EPCTL register..
2329  */
2330 
2331  /* set to read 1 8byte packet */
2333  DxEPTSIZ_XferSize(8), hsotg->regs + DOEPTSIZ0);
2334 
2335  writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2336  DxEPCTL_CNAK | DxEPCTL_EPEna |
2338  hsotg->regs + DOEPCTL0);
2339 
2340  /* enable, but don't activate EP0in */
2341  writel(s3c_hsotg_ep0_mps(hsotg->eps[0].ep.maxpacket) |
2342  DxEPCTL_USBActEp, hsotg->regs + DIEPCTL0);
2343 
2344  s3c_hsotg_enqueue_setup(hsotg);
2345 
2346  dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2347  readl(hsotg->regs + DIEPCTL0),
2348  readl(hsotg->regs + DOEPCTL0));
2349 
2350  /* clear global NAKs */
2352  hsotg->regs + DCTL);
2353 
2354  /* must be at-least 3ms to allow bus to see disconnect */
2355  mdelay(3);
2356 
2357  /* remove the soft-disconnect and let's go */
2358  __bic32(hsotg->regs + DCTL, DCTL_SftDiscon);
2359 }
2360 
2366 static irqreturn_t s3c_hsotg_irq(int irq, void *pw)
2367 {
2368  struct s3c_hsotg *hsotg = pw;
2369  int retry_count = 8;
2370  u32 gintsts;
2371  u32 gintmsk;
2372 
2373  spin_lock(&hsotg->lock);
2374 irq_retry:
2375  gintsts = readl(hsotg->regs + GINTSTS);
2376  gintmsk = readl(hsotg->regs + GINTMSK);
2377 
2378  dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
2379  __func__, gintsts, gintsts & gintmsk, gintmsk, retry_count);
2380 
2381  gintsts &= gintmsk;
2382 
2383  if (gintsts & GINTSTS_OTGInt) {
2384  u32 otgint = readl(hsotg->regs + GOTGINT);
2385 
2386  dev_info(hsotg->dev, "OTGInt: %08x\n", otgint);
2387 
2388  writel(otgint, hsotg->regs + GOTGINT);
2389  }
2390 
2391  if (gintsts & GINTSTS_SessReqInt) {
2392  dev_dbg(hsotg->dev, "%s: SessReqInt\n", __func__);
2393  writel(GINTSTS_SessReqInt, hsotg->regs + GINTSTS);
2394  }
2395 
2396  if (gintsts & GINTSTS_EnumDone) {
2397  writel(GINTSTS_EnumDone, hsotg->regs + GINTSTS);
2398 
2399  s3c_hsotg_irq_enumdone(hsotg);
2400  }
2401 
2402  if (gintsts & GINTSTS_ConIDStsChng) {
2403  dev_dbg(hsotg->dev, "ConIDStsChg (DSTS=0x%08x, GOTCTL=%08x)\n",
2404  readl(hsotg->regs + DSTS),
2405  readl(hsotg->regs + GOTGCTL));
2406 
2407  writel(GINTSTS_ConIDStsChng, hsotg->regs + GINTSTS);
2408  }
2409 
2410  if (gintsts & (GINTSTS_OEPInt | GINTSTS_IEPInt)) {
2411  u32 daint = readl(hsotg->regs + DAINT);
2412  u32 daint_out = daint >> DAINT_OutEP_SHIFT;
2413  u32 daint_in = daint & ~(daint_out << DAINT_OutEP_SHIFT);
2414  int ep;
2415 
2416  dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
2417 
2418  for (ep = 0; ep < 15 && daint_out; ep++, daint_out >>= 1) {
2419  if (daint_out & 1)
2420  s3c_hsotg_epint(hsotg, ep, 0);
2421  }
2422 
2423  for (ep = 0; ep < 15 && daint_in; ep++, daint_in >>= 1) {
2424  if (daint_in & 1)
2425  s3c_hsotg_epint(hsotg, ep, 1);
2426  }
2427  }
2428 
2429  if (gintsts & GINTSTS_USBRst) {
2430 
2431  u32 usb_status = readl(hsotg->regs + GOTGCTL);
2432 
2433  dev_info(hsotg->dev, "%s: USBRst\n", __func__);
2434  dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
2435  readl(hsotg->regs + GNPTXSTS));
2436 
2437  writel(GINTSTS_USBRst, hsotg->regs + GINTSTS);
2438 
2439  if (usb_status & GOTGCTL_BSESVLD) {
2440  if (time_after(jiffies, hsotg->last_rst +
2441  msecs_to_jiffies(200))) {
2442 
2443  kill_all_requests(hsotg, &hsotg->eps[0],
2444  -ECONNRESET, true);
2445 
2446  s3c_hsotg_core_init(hsotg);
2447  hsotg->last_rst = jiffies;
2448  }
2449  }
2450  }
2451 
2452  /* check both FIFOs */
2453 
2454  if (gintsts & GINTSTS_NPTxFEmp) {
2455  dev_dbg(hsotg->dev, "NPTxFEmp\n");
2456 
2457  /*
2458  * Disable the interrupt to stop it happening again
2459  * unless one of these endpoint routines decides that
2460  * it needs re-enabling
2461  */
2462 
2463  s3c_hsotg_disable_gsint(hsotg, GINTSTS_NPTxFEmp);
2464  s3c_hsotg_irq_fifoempty(hsotg, false);
2465  }
2466 
2467  if (gintsts & GINTSTS_PTxFEmp) {
2468  dev_dbg(hsotg->dev, "PTxFEmp\n");
2469 
2470  /* See note in GINTSTS_NPTxFEmp */
2471 
2472  s3c_hsotg_disable_gsint(hsotg, GINTSTS_PTxFEmp);
2473  s3c_hsotg_irq_fifoempty(hsotg, true);
2474  }
2475 
2476  if (gintsts & GINTSTS_RxFLvl) {
2477  /*
2478  * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
2479  * we need to retry s3c_hsotg_handle_rx if this is still
2480  * set.
2481  */
2482 
2483  s3c_hsotg_handle_rx(hsotg);
2484  }
2485 
2486  if (gintsts & GINTSTS_ModeMis) {
2487  dev_warn(hsotg->dev, "warning, mode mismatch triggered\n");
2488  writel(GINTSTS_ModeMis, hsotg->regs + GINTSTS);
2489  }
2490 
2491  if (gintsts & GINTSTS_USBSusp) {
2492  dev_info(hsotg->dev, "GINTSTS_USBSusp\n");
2493  writel(GINTSTS_USBSusp, hsotg->regs + GINTSTS);
2494 
2495  call_gadget(hsotg, suspend);
2496  s3c_hsotg_disconnect(hsotg);
2497  }
2498 
2499  if (gintsts & GINTSTS_WkUpInt) {
2500  dev_info(hsotg->dev, "GINTSTS_WkUpIn\n");
2501  writel(GINTSTS_WkUpInt, hsotg->regs + GINTSTS);
2502 
2503  call_gadget(hsotg, resume);
2504  }
2505 
2506  if (gintsts & GINTSTS_ErlySusp) {
2507  dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
2508  writel(GINTSTS_ErlySusp, hsotg->regs + GINTSTS);
2509 
2510  s3c_hsotg_disconnect(hsotg);
2511  }
2512 
2513  /*
2514  * these next two seem to crop-up occasionally causing the core
2515  * to shutdown the USB transfer, so try clearing them and logging
2516  * the occurrence.
2517  */
2518 
2519  if (gintsts & GINTSTS_GOUTNakEff) {
2520  dev_info(hsotg->dev, "GOUTNakEff triggered\n");
2521 
2522  writel(DCTL_CGOUTNak, hsotg->regs + DCTL);
2523 
2524  s3c_hsotg_dump(hsotg);
2525  }
2526 
2527  if (gintsts & GINTSTS_GINNakEff) {
2528  dev_info(hsotg->dev, "GINNakEff triggered\n");
2529 
2530  writel(DCTL_CGNPInNAK, hsotg->regs + DCTL);
2531 
2532  s3c_hsotg_dump(hsotg);
2533  }
2534 
2535  /*
2536  * if we've had fifo events, we should try and go around the
2537  * loop again to see if there's any point in returning yet.
2538  */
2539 
2540  if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
2541  goto irq_retry;
2542 
2543  spin_unlock(&hsotg->lock);
2544 
2545  return IRQ_HANDLED;
2546 }
2547 
2555 static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2556  const struct usb_endpoint_descriptor *desc)
2557 {
2558  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2559  struct s3c_hsotg *hsotg = hs_ep->parent;
2560  unsigned long flags;
2561  int index = hs_ep->index;
2562  u32 epctrl_reg;
2563  u32 epctrl;
2564  u32 mps;
2565  int dir_in;
2566  int ret = 0;
2567 
2568  dev_dbg(hsotg->dev,
2569  "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
2570  __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
2571  desc->wMaxPacketSize, desc->bInterval);
2572 
2573  /* not to be called for EP0 */
2574  WARN_ON(index == 0);
2575 
2576  dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
2577  if (dir_in != hs_ep->dir_in) {
2578  dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
2579  return -EINVAL;
2580  }
2581 
2582  mps = usb_endpoint_maxp(desc);
2583 
2584  /* note, we handle this here instead of s3c_hsotg_set_ep_maxpacket */
2585 
2586  epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2587  epctrl = readl(hsotg->regs + epctrl_reg);
2588 
2589  dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
2590  __func__, epctrl, epctrl_reg);
2591 
2592  spin_lock_irqsave(&hsotg->lock, flags);
2593 
2594  epctrl &= ~(DxEPCTL_EPType_MASK | DxEPCTL_MPS_MASK);
2595  epctrl |= DxEPCTL_MPS(mps);
2596 
2597  /*
2598  * mark the endpoint as active, otherwise the core may ignore
2599  * transactions entirely for this endpoint
2600  */
2601  epctrl |= DxEPCTL_USBActEp;
2602 
2603  /*
2604  * set the NAK status on the endpoint, otherwise we might try and
2605  * do something with data that we've yet got a request to process
2606  * since the RXFIFO will take data for an endpoint even if the
2607  * size register hasn't been set.
2608  */
2609 
2610  epctrl |= DxEPCTL_SNAK;
2611 
2612  /* update the endpoint state */
2613  hs_ep->ep.maxpacket = mps;
2614 
2615  /* default, set to non-periodic */
2616  hs_ep->periodic = 0;
2617 
2618  switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
2620  dev_err(hsotg->dev, "no current ISOC support\n");
2621  ret = -EINVAL;
2622  goto out;
2623 
2625  epctrl |= DxEPCTL_EPType_Bulk;
2626  break;
2627 
2628  case USB_ENDPOINT_XFER_INT:
2629  if (dir_in) {
2630  /*
2631  * Allocate our TxFNum by simply using the index
2632  * of the endpoint for the moment. We could do
2633  * something better if the host indicates how
2634  * many FIFOs we are expecting to use.
2635  */
2636 
2637  hs_ep->periodic = 1;
2638  epctrl |= DxEPCTL_TxFNum(index);
2639  }
2640 
2641  epctrl |= DxEPCTL_EPType_Intterupt;
2642  break;
2643 
2645  epctrl |= DxEPCTL_EPType_Control;
2646  break;
2647  }
2648 
2649  /*
2650  * if the hardware has dedicated fifos, we must give each IN EP
2651  * a unique tx-fifo even if it is non-periodic.
2652  */
2653  if (dir_in && hsotg->dedicated_fifos)
2654  epctrl |= DxEPCTL_TxFNum(index);
2655 
2656  /* for non control endpoints, set PID to D0 */
2657  if (index)
2658  epctrl |= DxEPCTL_SetD0PID;
2659 
2660  dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
2661  __func__, epctrl);
2662 
2663  writel(epctrl, hsotg->regs + epctrl_reg);
2664  dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
2665  __func__, readl(hsotg->regs + epctrl_reg));
2666 
2667  /* enable the endpoint interrupt */
2668  s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2669 
2670 out:
2671  spin_unlock_irqrestore(&hsotg->lock, flags);
2672  return ret;
2673 }
2674 
2679 static int s3c_hsotg_ep_disable(struct usb_ep *ep)
2680 {
2681  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2682  struct s3c_hsotg *hsotg = hs_ep->parent;
2683  int dir_in = hs_ep->dir_in;
2684  int index = hs_ep->index;
2685  unsigned long flags;
2686  u32 epctrl_reg;
2687  u32 ctrl;
2688 
2689  dev_info(hsotg->dev, "%s(ep %p)\n", __func__, ep);
2690 
2691  if (ep == &hsotg->eps[0].ep) {
2692  dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
2693  return -EINVAL;
2694  }
2695 
2696  epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
2697 
2698  spin_lock_irqsave(&hsotg->lock, flags);
2699  /* terminate all requests with shutdown */
2700  kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
2701 
2702 
2703  ctrl = readl(hsotg->regs + epctrl_reg);
2704  ctrl &= ~DxEPCTL_EPEna;
2705  ctrl &= ~DxEPCTL_USBActEp;
2706  ctrl |= DxEPCTL_SNAK;
2707 
2708  dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
2709  writel(ctrl, hsotg->regs + epctrl_reg);
2710 
2711  /* disable endpoint interrupts */
2712  s3c_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
2713 
2714  spin_unlock_irqrestore(&hsotg->lock, flags);
2715  return 0;
2716 }
2717 
2723 static bool on_list(struct s3c_hsotg_ep *ep, struct s3c_hsotg_req *test)
2724 {
2725  struct s3c_hsotg_req *req, *treq;
2726 
2727  list_for_each_entry_safe(req, treq, &ep->queue, queue) {
2728  if (req == test)
2729  return true;
2730  }
2731 
2732  return false;
2733 }
2734 
2740 static int s3c_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2741 {
2742  struct s3c_hsotg_req *hs_req = our_req(req);
2743  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2744  struct s3c_hsotg *hs = hs_ep->parent;
2745  unsigned long flags;
2746 
2747  dev_info(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
2748 
2749  spin_lock_irqsave(&hs->lock, flags);
2750 
2751  if (!on_list(hs_ep, hs_req)) {
2752  spin_unlock_irqrestore(&hs->lock, flags);
2753  return -EINVAL;
2754  }
2755 
2756  s3c_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
2757  spin_unlock_irqrestore(&hs->lock, flags);
2758 
2759  return 0;
2760 }
2761 
2767 static int s3c_hsotg_ep_sethalt(struct usb_ep *ep, int value)
2768 {
2769  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2770  struct s3c_hsotg *hs = hs_ep->parent;
2771  int index = hs_ep->index;
2772  u32 epreg;
2773  u32 epctl;
2774  u32 xfertype;
2775 
2776  dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
2777 
2778  /* write both IN and OUT control registers */
2779 
2780  epreg = DIEPCTL(index);
2781  epctl = readl(hs->regs + epreg);
2782 
2783  if (value) {
2784  epctl |= DxEPCTL_Stall + DxEPCTL_SNAK;
2785  if (epctl & DxEPCTL_EPEna)
2786  epctl |= DxEPCTL_EPDis;
2787  } else {
2788  epctl &= ~DxEPCTL_Stall;
2789  xfertype = epctl & DxEPCTL_EPType_MASK;
2790  if (xfertype == DxEPCTL_EPType_Bulk ||
2791  xfertype == DxEPCTL_EPType_Intterupt)
2792  epctl |= DxEPCTL_SetD0PID;
2793  }
2794 
2795  writel(epctl, hs->regs + epreg);
2796 
2797  epreg = DOEPCTL(index);
2798  epctl = readl(hs->regs + epreg);
2799 
2800  if (value)
2801  epctl |= DxEPCTL_Stall;
2802  else {
2803  epctl &= ~DxEPCTL_Stall;
2804  xfertype = epctl & DxEPCTL_EPType_MASK;
2805  if (xfertype == DxEPCTL_EPType_Bulk ||
2806  xfertype == DxEPCTL_EPType_Intterupt)
2807  epctl |= DxEPCTL_SetD0PID;
2808  }
2809 
2810  writel(epctl, hs->regs + epreg);
2811 
2812  return 0;
2813 }
2814 
2820 static int s3c_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
2821 {
2822  struct s3c_hsotg_ep *hs_ep = our_ep(ep);
2823  struct s3c_hsotg *hs = hs_ep->parent;
2824  unsigned long flags = 0;
2825  int ret = 0;
2826 
2827  spin_lock_irqsave(&hs->lock, flags);
2828  ret = s3c_hsotg_ep_sethalt(ep, value);
2829  spin_unlock_irqrestore(&hs->lock, flags);
2830 
2831  return ret;
2832 }
2833 
2834 static struct usb_ep_ops s3c_hsotg_ep_ops = {
2835  .enable = s3c_hsotg_ep_enable,
2836  .disable = s3c_hsotg_ep_disable,
2837  .alloc_request = s3c_hsotg_ep_alloc_request,
2838  .free_request = s3c_hsotg_ep_free_request,
2839  .queue = s3c_hsotg_ep_queue_lock,
2840  .dequeue = s3c_hsotg_ep_dequeue,
2841  .set_halt = s3c_hsotg_ep_sethalt_lock,
2842  /* note, don't believe we have any call for the fifo routines */
2843 };
2844 
2852 static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg)
2853 {
2854  struct platform_device *pdev = to_platform_device(hsotg->dev);
2855 
2856  dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev);
2857  if (hsotg->plat->phy_init)
2858  hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
2859 }
2860 
2868 static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg)
2869 {
2870  struct platform_device *pdev = to_platform_device(hsotg->dev);
2871 
2872  if (hsotg->plat->phy_exit)
2873  hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
2874 }
2875 
2880 static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2881 {
2882  /* unmask subset of endpoint interrupts */
2883 
2886  hsotg->regs + DIEPMSK);
2887 
2890  hsotg->regs + DOEPMSK);
2891 
2892  writel(0, hsotg->regs + DAINTMSK);
2893 
2894  /* Be in disconnected state until gadget is registered */
2895  __orr32(hsotg->regs + DCTL, DCTL_SftDiscon);
2896 
2897  if (0) {
2898  /* post global nak until we're ready */
2900  hsotg->regs + DCTL);
2901  }
2902 
2903  /* setup fifos */
2904 
2905  dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
2906  readl(hsotg->regs + GRXFSIZ),
2907  readl(hsotg->regs + GNPTXFSIZ));
2908 
2909  s3c_hsotg_init_fifo(hsotg);
2910 
2911  /* set the PLL on, remove the HNP/SRP and set the PHY */
2912  writel(GUSBCFG_PHYIf16 | GUSBCFG_TOutCal(7) | (0x5 << 10),
2913  hsotg->regs + GUSBCFG);
2914 
2915  writel(using_dma(hsotg) ? GAHBCFG_DMAEn : 0x0,
2916  hsotg->regs + GAHBCFG);
2917 }
2918 
2927 static int s3c_hsotg_udc_start(struct usb_gadget *gadget,
2928  struct usb_gadget_driver *driver)
2929 {
2930  struct s3c_hsotg *hsotg = to_hsotg(gadget);
2931  int ret;
2932 
2933  if (!hsotg) {
2934  printk(KERN_ERR "%s: called with no device\n", __func__);
2935  return -ENODEV;
2936  }
2937 
2938  if (!driver) {
2939  dev_err(hsotg->dev, "%s: no driver\n", __func__);
2940  return -EINVAL;
2941  }
2942 
2943  if (driver->max_speed < USB_SPEED_FULL)
2944  dev_err(hsotg->dev, "%s: bad speed\n", __func__);
2945 
2946  if (!driver->setup) {
2947  dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
2948  return -EINVAL;
2949  }
2950 
2951  WARN_ON(hsotg->driver);
2952 
2953  driver->driver.bus = NULL;
2954  hsotg->driver = driver;
2955  hsotg->gadget.dev.driver = &driver->driver;
2956  hsotg->gadget.dev.of_node = hsotg->dev->of_node;
2957  hsotg->gadget.dev.dma_mask = hsotg->dev->dma_mask;
2958  hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2959 
2961  hsotg->supplies);
2962  if (ret) {
2963  dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
2964  goto err;
2965  }
2966 
2967  hsotg->last_rst = jiffies;
2968  dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
2969  return 0;
2970 
2971 err:
2972  hsotg->driver = NULL;
2973  hsotg->gadget.dev.driver = NULL;
2974  return ret;
2975 }
2976 
2984 static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2985  struct usb_gadget_driver *driver)
2986 {
2987  struct s3c_hsotg *hsotg = to_hsotg(gadget);
2988  unsigned long flags = 0;
2989  int ep;
2990 
2991  if (!hsotg)
2992  return -ENODEV;
2993 
2994  if (!driver || driver != hsotg->driver || !driver->unbind)
2995  return -EINVAL;
2996 
2997  /* all endpoints should be shutdown */
2998  for (ep = 0; ep < hsotg->num_of_eps; ep++)
2999  s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
3000 
3001  spin_lock_irqsave(&hsotg->lock, flags);
3002 
3003  s3c_hsotg_phy_disable(hsotg);
3005 
3006  hsotg->driver = NULL;
3007  hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3008  hsotg->gadget.dev.driver = NULL;
3009 
3010  spin_unlock_irqrestore(&hsotg->lock, flags);
3011 
3012  dev_info(hsotg->dev, "unregistered gadget driver '%s'\n",
3013  driver->driver.name);
3014 
3015  return 0;
3016 }
3017 
3024 static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
3025 {
3026  return s3c_hsotg_read_frameno(to_hsotg(gadget));
3027 }
3028 
3036 static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on)
3037 {
3038  struct s3c_hsotg *hsotg = to_hsotg(gadget);
3039  unsigned long flags = 0;
3040 
3041  dev_dbg(hsotg->dev, "%s: is_in: %d\n", __func__, is_on);
3042 
3043  spin_lock_irqsave(&hsotg->lock, flags);
3044  if (is_on) {
3045  s3c_hsotg_phy_enable(hsotg);
3046  s3c_hsotg_core_init(hsotg);
3047  } else {
3048  s3c_hsotg_disconnect(hsotg);
3049  s3c_hsotg_phy_disable(hsotg);
3050  }
3051 
3052  hsotg->gadget.speed = USB_SPEED_UNKNOWN;
3053  spin_unlock_irqrestore(&hsotg->lock, flags);
3054 
3055  return 0;
3056 }
3057 
3058 static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
3059  .get_frame = s3c_hsotg_gadget_getframe,
3060  .udc_start = s3c_hsotg_udc_start,
3061  .udc_stop = s3c_hsotg_udc_stop,
3062  .pullup = s3c_hsotg_pullup,
3063 };
3064 
3075 static void __devinit s3c_hsotg_initep(struct s3c_hsotg *hsotg,
3076  struct s3c_hsotg_ep *hs_ep,
3077  int epnum)
3078 {
3079  u32 ptxfifo;
3080  char *dir;
3081 
3082  if (epnum == 0)
3083  dir = "";
3084  else if ((epnum % 2) == 0) {
3085  dir = "out";
3086  } else {
3087  dir = "in";
3088  hs_ep->dir_in = 1;
3089  }
3090 
3091  hs_ep->index = epnum;
3092 
3093  snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
3094 
3095  INIT_LIST_HEAD(&hs_ep->queue);
3096  INIT_LIST_HEAD(&hs_ep->ep.ep_list);
3097 
3098  /* add to the list of endpoints known by the gadget driver */
3099  if (epnum)
3100  list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
3101 
3102  hs_ep->parent = hsotg;
3103  hs_ep->ep.name = hs_ep->name;
3104  hs_ep->ep.maxpacket = epnum ? 512 : EP0_MPS_LIMIT;
3105  hs_ep->ep.ops = &s3c_hsotg_ep_ops;
3106 
3107  /*
3108  * Read the FIFO size for the Periodic TX FIFO, even if we're
3109  * an OUT endpoint, we may as well do this if in future the
3110  * code is changed to make each endpoint's direction changeable.
3111  */
3112 
3113  ptxfifo = readl(hsotg->regs + DPTXFSIZn(epnum));
3114  hs_ep->fifo_size = DPTXFSIZn_DPTxFSize_GET(ptxfifo) * 4;
3115 
3116  /*
3117  * if we're using dma, we need to set the next-endpoint pointer
3118  * to be something valid.
3119  */
3120 
3121  if (using_dma(hsotg)) {
3122  u32 next = DxEPCTL_NextEp((epnum + 1) % 15);
3123  writel(next, hsotg->regs + DIEPCTL(epnum));
3124  writel(next, hsotg->regs + DOEPCTL(epnum));
3125  }
3126 }
3127 
3134 static void s3c_hsotg_hw_cfg(struct s3c_hsotg *hsotg)
3135 {
3136  u32 cfg2, cfg4;
3137  /* check hardware configuration */
3138 
3139  cfg2 = readl(hsotg->regs + 0x48);
3140  hsotg->num_of_eps = (cfg2 >> 10) & 0xF;
3141 
3142  dev_info(hsotg->dev, "EPs:%d\n", hsotg->num_of_eps);
3143 
3144  cfg4 = readl(hsotg->regs + 0x50);
3145  hsotg->dedicated_fifos = (cfg4 >> 25) & 1;
3146 
3147  dev_info(hsotg->dev, "%s fifos\n",
3148  hsotg->dedicated_fifos ? "dedicated" : "shared");
3149 }
3150 
3155 static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
3156 {
3157 #ifdef DEBUG
3158  struct device *dev = hsotg->dev;
3159  void __iomem *regs = hsotg->regs;
3160  u32 val;
3161  int idx;
3162 
3163  dev_info(dev, "DCFG=0x%08x, DCTL=0x%08x, DIEPMSK=%08x\n",
3164  readl(regs + DCFG), readl(regs + DCTL),
3165  readl(regs + DIEPMSK));
3166 
3167  dev_info(dev, "GAHBCFG=0x%08x, 0x44=0x%08x\n",
3168  readl(regs + GAHBCFG), readl(regs + 0x44));
3169 
3170  dev_info(dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
3171  readl(regs + GRXFSIZ), readl(regs + GNPTXFSIZ));
3172 
3173  /* show periodic fifo settings */
3174 
3175  for (idx = 1; idx <= 15; idx++) {
3176  val = readl(regs + DPTXFSIZn(idx));
3177  dev_info(dev, "DPTx[%d] FSize=%d, StAddr=0x%08x\n", idx,
3178  val >> DPTXFSIZn_DPTxFSize_SHIFT,
3180  }
3181 
3182  for (idx = 0; idx < 15; idx++) {
3183  dev_info(dev,
3184  "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
3185  readl(regs + DIEPCTL(idx)),
3186  readl(regs + DIEPTSIZ(idx)),
3187  readl(regs + DIEPDMA(idx)));
3188 
3189  val = readl(regs + DOEPCTL(idx));
3190  dev_info(dev,
3191  "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
3192  idx, readl(regs + DOEPCTL(idx)),
3193  readl(regs + DOEPTSIZ(idx)),
3194  readl(regs + DOEPDMA(idx)));
3195 
3196  }
3197 
3198  dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
3199  readl(regs + DVBUSDIS), readl(regs + DVBUSPULSE));
3200 #endif
3201 }
3202 
3212 static int state_show(struct seq_file *seq, void *v)
3213 {
3214  struct s3c_hsotg *hsotg = seq->private;
3215  void __iomem *regs = hsotg->regs;
3216  int idx;
3217 
3218  seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n",
3219  readl(regs + DCFG),
3220  readl(regs + DCTL),
3221  readl(regs + DSTS));
3222 
3223  seq_printf(seq, "DIEPMSK=0x%08x, DOEPMASK=0x%08x\n",
3224  readl(regs + DIEPMSK), readl(regs + DOEPMSK));
3225 
3226  seq_printf(seq, "GINTMSK=0x%08x, GINTSTS=0x%08x\n",
3227  readl(regs + GINTMSK),
3228  readl(regs + GINTSTS));
3229 
3230  seq_printf(seq, "DAINTMSK=0x%08x, DAINT=0x%08x\n",
3231  readl(regs + DAINTMSK),
3232  readl(regs + DAINT));
3233 
3234  seq_printf(seq, "GNPTXSTS=0x%08x, GRXSTSR=%08x\n",
3235  readl(regs + GNPTXSTS),
3236  readl(regs + GRXSTSR));
3237 
3238  seq_printf(seq, "\nEndpoint status:\n");
3239 
3240  for (idx = 0; idx < 15; idx++) {
3241  u32 in, out;
3242 
3243  in = readl(regs + DIEPCTL(idx));
3244  out = readl(regs + DOEPCTL(idx));
3245 
3246  seq_printf(seq, "ep%d: DIEPCTL=0x%08x, DOEPCTL=0x%08x",
3247  idx, in, out);
3248 
3249  in = readl(regs + DIEPTSIZ(idx));
3250  out = readl(regs + DOEPTSIZ(idx));
3251 
3252  seq_printf(seq, ", DIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x",
3253  in, out);
3254 
3255  seq_printf(seq, "\n");
3256  }
3257 
3258  return 0;
3259 }
3260 
3261 static int state_open(struct inode *inode, struct file *file)
3262 {
3263  return single_open(file, state_show, inode->i_private);
3264 }
3265 
3266 static const struct file_operations state_fops = {
3267  .owner = THIS_MODULE,
3268  .open = state_open,
3269  .read = seq_read,
3270  .llseek = seq_lseek,
3271  .release = single_release,
3272 };
3273 
3282 static int fifo_show(struct seq_file *seq, void *v)
3283 {
3284  struct s3c_hsotg *hsotg = seq->private;
3285  void __iomem *regs = hsotg->regs;
3286  u32 val;
3287  int idx;
3288 
3289  seq_printf(seq, "Non-periodic FIFOs:\n");
3290  seq_printf(seq, "RXFIFO: Size %d\n", readl(regs + GRXFSIZ));
3291 
3292  val = readl(regs + GNPTXFSIZ);
3293  seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
3294  val >> GNPTXFSIZ_NPTxFDep_SHIFT,
3296 
3297  seq_printf(seq, "\nPeriodic TXFIFOs:\n");
3298 
3299  for (idx = 1; idx <= 15; idx++) {
3300  val = readl(regs + DPTXFSIZn(idx));
3301 
3302  seq_printf(seq, "\tDPTXFIFO%2d: Size %d, Start 0x%08x\n", idx,
3303  val >> DPTXFSIZn_DPTxFSize_SHIFT,
3305  }
3306 
3307  return 0;
3308 }
3309 
3310 static int fifo_open(struct inode *inode, struct file *file)
3311 {
3312  return single_open(file, fifo_show, inode->i_private);
3313 }
3314 
3315 static const struct file_operations fifo_fops = {
3316  .owner = THIS_MODULE,
3317  .open = fifo_open,
3318  .read = seq_read,
3319  .llseek = seq_lseek,
3320  .release = single_release,
3321 };
3322 
3323 
3324 static const char *decode_direction(int is_in)
3325 {
3326  return is_in ? "in" : "out";
3327 }
3328 
3337 static int ep_show(struct seq_file *seq, void *v)
3338 {
3339  struct s3c_hsotg_ep *ep = seq->private;
3340  struct s3c_hsotg *hsotg = ep->parent;
3341  struct s3c_hsotg_req *req;
3342  void __iomem *regs = hsotg->regs;
3343  int index = ep->index;
3344  int show_limit = 15;
3345  unsigned long flags;
3346 
3347  seq_printf(seq, "Endpoint index %d, named %s, dir %s:\n",
3348  ep->index, ep->ep.name, decode_direction(ep->dir_in));
3349 
3350  /* first show the register state */
3351 
3352  seq_printf(seq, "\tDIEPCTL=0x%08x, DOEPCTL=0x%08x\n",
3353  readl(regs + DIEPCTL(index)),
3354  readl(regs + DOEPCTL(index)));
3355 
3356  seq_printf(seq, "\tDIEPDMA=0x%08x, DOEPDMA=0x%08x\n",
3357  readl(regs + DIEPDMA(index)),
3358  readl(regs + DOEPDMA(index)));
3359 
3360  seq_printf(seq, "\tDIEPINT=0x%08x, DOEPINT=0x%08x\n",
3361  readl(regs + DIEPINT(index)),
3362  readl(regs + DOEPINT(index)));
3363 
3364  seq_printf(seq, "\tDIEPTSIZ=0x%08x, DOEPTSIZ=0x%08x\n",
3365  readl(regs + DIEPTSIZ(index)),
3366  readl(regs + DOEPTSIZ(index)));
3367 
3368  seq_printf(seq, "\n");
3369  seq_printf(seq, "mps %d\n", ep->ep.maxpacket);
3370  seq_printf(seq, "total_data=%ld\n", ep->total_data);
3371 
3372  seq_printf(seq, "request list (%p,%p):\n",
3373  ep->queue.next, ep->queue.prev);
3374 
3375  spin_lock_irqsave(&hsotg->lock, flags);
3376 
3377  list_for_each_entry(req, &ep->queue, queue) {
3378  if (--show_limit < 0) {
3379  seq_printf(seq, "not showing more requests...\n");
3380  break;
3381  }
3382 
3383  seq_printf(seq, "%c req %p: %d bytes @%p, ",
3384  req == ep->req ? '*' : ' ',
3385  req, req->req.length, req->req.buf);
3386  seq_printf(seq, "%d done, res %d\n",
3387  req->req.actual, req->req.status);
3388  }
3389 
3390  spin_unlock_irqrestore(&hsotg->lock, flags);
3391 
3392  return 0;
3393 }
3394 
3395 static int ep_open(struct inode *inode, struct file *file)
3396 {
3397  return single_open(file, ep_show, inode->i_private);
3398 }
3399 
3400 static const struct file_operations ep_fops = {
3401  .owner = THIS_MODULE,
3402  .open = ep_open,
3403  .read = seq_read,
3404  .llseek = seq_lseek,
3405  .release = single_release,
3406 };
3407 
3417 static void __devinit s3c_hsotg_create_debug(struct s3c_hsotg *hsotg)
3418 {
3419  struct dentry *root;
3420  unsigned epidx;
3421 
3422  root = debugfs_create_dir(dev_name(hsotg->dev), NULL);
3423  hsotg->debug_root = root;
3424  if (IS_ERR(root)) {
3425  dev_err(hsotg->dev, "cannot create debug root\n");
3426  return;
3427  }
3428 
3429  /* create general state file */
3430 
3431  hsotg->debug_file = debugfs_create_file("state", 0444, root,
3432  hsotg, &state_fops);
3433 
3434  if (IS_ERR(hsotg->debug_file))
3435  dev_err(hsotg->dev, "%s: failed to create state\n", __func__);
3436 
3437  hsotg->debug_fifo = debugfs_create_file("fifo", 0444, root,
3438  hsotg, &fifo_fops);
3439 
3440  if (IS_ERR(hsotg->debug_fifo))
3441  dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__);
3442 
3443  /* create one file for each endpoint */
3444 
3445  for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3446  struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3447 
3448  ep->debugfs = debugfs_create_file(ep->name, 0444,
3449  root, ep, &ep_fops);
3450 
3451  if (IS_ERR(ep->debugfs))
3452  dev_err(hsotg->dev, "failed to create %s debug file\n",
3453  ep->name);
3454  }
3455 }
3456 
3463 static void __devexit s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
3464 {
3465  unsigned epidx;
3466 
3467  for (epidx = 0; epidx < hsotg->num_of_eps; epidx++) {
3468  struct s3c_hsotg_ep *ep = &hsotg->eps[epidx];
3469  debugfs_remove(ep->debugfs);
3470  }
3471 
3472  debugfs_remove(hsotg->debug_file);
3473  debugfs_remove(hsotg->debug_fifo);
3474  debugfs_remove(hsotg->debug_root);
3475 }
3476 
3481 static void s3c_hsotg_release(struct device *dev)
3482 {
3483  struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
3484 
3485  kfree(hsotg);
3486 }
3487 
3493 static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
3494 {
3495  struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
3496  struct device *dev = &pdev->dev;
3497  struct s3c_hsotg_ep *eps;
3498  struct s3c_hsotg *hsotg;
3499  struct resource *res;
3500  int epnum;
3501  int ret;
3502  int i;
3503 
3504  plat = pdev->dev.platform_data;
3505  if (!plat) {
3506  dev_err(&pdev->dev, "no platform data defined\n");
3507  return -EINVAL;
3508  }
3509 
3510  hsotg = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsotg), GFP_KERNEL);
3511  if (!hsotg) {
3512  dev_err(dev, "cannot get memory\n");
3513  return -ENOMEM;
3514  }
3515 
3516  hsotg->dev = dev;
3517  hsotg->plat = plat;
3518 
3519  hsotg->clk = devm_clk_get(&pdev->dev, "otg");
3520  if (IS_ERR(hsotg->clk)) {
3521  dev_err(dev, "cannot get otg clock\n");
3522  return PTR_ERR(hsotg->clk);
3523  }
3524 
3525  platform_set_drvdata(pdev, hsotg);
3526 
3527  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3528 
3529  hsotg->regs = devm_request_and_ioremap(&pdev->dev, res);
3530  if (!hsotg->regs) {
3531  dev_err(dev, "cannot map registers\n");
3532  ret = -ENXIO;
3533  goto err_clk;
3534  }
3535 
3536  ret = platform_get_irq(pdev, 0);
3537  if (ret < 0) {
3538  dev_err(dev, "cannot find IRQ\n");
3539  goto err_clk;
3540  }
3541 
3542  spin_lock_init(&hsotg->lock);
3543 
3544  hsotg->irq = ret;
3545 
3546  ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0,
3547  dev_name(dev), hsotg);
3548  if (ret < 0) {
3549  dev_err(dev, "cannot claim IRQ\n");
3550  goto err_clk;
3551  }
3552 
3553  dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq);
3554 
3555  device_initialize(&hsotg->gadget.dev);
3556 
3557  dev_set_name(&hsotg->gadget.dev, "gadget");
3558 
3559  hsotg->gadget.max_speed = USB_SPEED_HIGH;
3560  hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
3561  hsotg->gadget.name = dev_name(dev);
3562 
3563  hsotg->gadget.dev.parent = dev;
3564  hsotg->gadget.dev.dma_mask = dev->dma_mask;
3565  hsotg->gadget.dev.release = s3c_hsotg_release;
3566 
3567  /* reset the system */
3568 
3569  clk_prepare_enable(hsotg->clk);
3570 
3571  /* regulators */
3572 
3573  for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
3574  hsotg->supplies[i].supply = s3c_hsotg_supply_names[i];
3575 
3576  ret = regulator_bulk_get(dev, ARRAY_SIZE(hsotg->supplies),
3577  hsotg->supplies);
3578  if (ret) {
3579  dev_err(dev, "failed to request supplies: %d\n", ret);
3580  goto err_clk;
3581  }
3582 
3584  hsotg->supplies);
3585 
3586  if (ret) {
3587  dev_err(hsotg->dev, "failed to enable supplies: %d\n", ret);
3588  goto err_supplies;
3589  }
3590 
3591  /* usb phy enable */
3592  s3c_hsotg_phy_enable(hsotg);
3593 
3594  s3c_hsotg_corereset(hsotg);
3595  s3c_hsotg_init(hsotg);
3596  s3c_hsotg_hw_cfg(hsotg);
3597 
3598  /* hsotg->num_of_eps holds number of EPs other than ep0 */
3599 
3600  if (hsotg->num_of_eps == 0) {
3601  dev_err(dev, "wrong number of EPs (zero)\n");
3602  ret = -EINVAL;
3603  goto err_supplies;
3604  }
3605 
3606  eps = kcalloc(hsotg->num_of_eps + 1, sizeof(struct s3c_hsotg_ep),
3607  GFP_KERNEL);
3608  if (!eps) {
3609  dev_err(dev, "cannot get memory\n");
3610  ret = -ENOMEM;
3611  goto err_supplies;
3612  }
3613 
3614  hsotg->eps = eps;
3615 
3616  /* setup endpoint information */
3617 
3618  INIT_LIST_HEAD(&hsotg->gadget.ep_list);
3619  hsotg->gadget.ep0 = &hsotg->eps[0].ep;
3620 
3621  /* allocate EP0 request */
3622 
3623  hsotg->ctrl_req = s3c_hsotg_ep_alloc_request(&hsotg->eps[0].ep,
3624  GFP_KERNEL);
3625  if (!hsotg->ctrl_req) {
3626  dev_err(dev, "failed to allocate ctrl req\n");
3627  ret = -ENOMEM;
3628  goto err_ep_mem;
3629  }
3630 
3631  /* initialise the endpoints now the core has been initialised */
3632  for (epnum = 0; epnum < hsotg->num_of_eps; epnum++)
3633  s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3634 
3635  /* disable power and clock */
3636 
3638  hsotg->supplies);
3639  if (ret) {
3640  dev_err(hsotg->dev, "failed to disable supplies: %d\n", ret);
3641  goto err_ep_mem;
3642  }
3643 
3644  s3c_hsotg_phy_disable(hsotg);
3645 
3646  ret = device_add(&hsotg->gadget.dev);
3647  if (ret) {
3648  put_device(&hsotg->gadget.dev);
3649  goto err_ep_mem;
3650  }
3651 
3652  ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
3653  if (ret)
3654  goto err_ep_mem;
3655 
3656  s3c_hsotg_create_debug(hsotg);
3657 
3658  s3c_hsotg_dump(hsotg);
3659 
3660  return 0;
3661 
3662 err_ep_mem:
3663  kfree(eps);
3664 err_supplies:
3665  s3c_hsotg_phy_disable(hsotg);
3667 
3668 err_clk:
3669  clk_disable_unprepare(hsotg->clk);
3670 
3671  return ret;
3672 }
3673 
3678 static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
3679 {
3680  struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
3681 
3682  usb_del_gadget_udc(&hsotg->gadget);
3683 
3684  s3c_hsotg_delete_debug(hsotg);
3685 
3686  if (hsotg->driver) {
3687  /* should have been done already by driver model core */
3689  }
3690 
3691  s3c_hsotg_phy_disable(hsotg);
3693 
3694  clk_disable_unprepare(hsotg->clk);
3695 
3696  device_unregister(&hsotg->gadget.dev);
3697  return 0;
3698 }
3699 
3700 #if 1
3701 #define s3c_hsotg_suspend NULL
3702 #define s3c_hsotg_resume NULL
3703 #endif
3704 
3705 static struct platform_driver s3c_hsotg_driver = {
3706  .driver = {
3707  .name = "s3c-hsotg",
3708  .owner = THIS_MODULE,
3709  },
3710  .probe = s3c_hsotg_probe,
3711  .remove = __devexit_p(s3c_hsotg_remove),
3712  .suspend = s3c_hsotg_suspend,
3713  .resume = s3c_hsotg_resume,
3714 };
3715 
3716 module_platform_driver(s3c_hsotg_driver);
3717 
3718 MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
3719 MODULE_AUTHOR("Ben Dooks <[email protected]>");
3720 MODULE_LICENSE("GPL");
3721 MODULE_ALIAS("platform:s3c-hsotg");