Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bcm63xx_udc.c
Go to the documentation of this file.
1 /*
2  * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2012 Kevin Cernekee <[email protected]>
5  * Copyright (C) 2012 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kconfig.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/platform_device.h>
31 #include <linux/sched.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/timer.h>
35 #include <linux/usb/ch9.h>
36 #include <linux/usb/gadget.h>
37 #include <linux/workqueue.h>
38 
39 #include <bcm63xx_cpu.h>
40 #include <bcm63xx_iudma.h>
41 #include <bcm63xx_dev_usb_usbd.h>
42 #include <bcm63xx_io.h>
43 #include <bcm63xx_regs.h>
44 
45 #define DRV_MODULE_NAME "bcm63xx_udc"
46 
47 static const char bcm63xx_ep0name[] = "ep0";
48 static const char *const bcm63xx_ep_name[] = {
49  bcm63xx_ep0name,
50  "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
51 };
52 
53 static bool use_fullspeed;
54 module_param(use_fullspeed, bool, S_IRUGO);
55 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
56 
57 /*
58  * RX IRQ coalescing options:
59  *
60  * false (default) - one IRQ per DATAx packet. Slow but reliable. The
61  * driver is able to pass the "testusb" suite and recover from conditions like:
62  *
63  * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
64  * 2) Host sends 512 bytes of data
65  * 3) Host decides to reconfigure the device and sends SET_INTERFACE
66  * 4) Device shuts down the endpoint and cancels the RX transaction
67  *
68  * true - one IRQ per transfer, for transfers <= 2048B. Generates
69  * considerably fewer IRQs, but error recovery is less robust. Does not
70  * reliably pass "testusb".
71  *
72  * TX always uses coalescing, because we can cancel partially complete TX
73  * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
74  * this on RX.
75  */
76 static bool irq_coalesce;
77 module_param(irq_coalesce, bool, S_IRUGO);
78 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
79 
80 #define BCM63XX_NUM_EP 5
81 #define BCM63XX_NUM_IUDMA 6
82 #define BCM63XX_NUM_FIFO_PAIRS 3
83 
84 #define IUDMA_RESET_TIMEOUT_US 10000
85 
86 #define IUDMA_EP0_RXCHAN 0
87 #define IUDMA_EP0_TXCHAN 1
88 
89 #define IUDMA_MAX_FRAGMENT 2048
90 #define BCM63XX_MAX_CTRL_PKT 64
91 
92 #define BCMEP_CTRL 0x00
93 #define BCMEP_ISOC 0x01
94 #define BCMEP_BULK 0x02
95 #define BCMEP_INTR 0x03
96 
97 #define BCMEP_OUT 0x00
98 #define BCMEP_IN 0x01
99 
100 #define BCM63XX_SPD_FULL 1
101 #define BCM63XX_SPD_HIGH 0
102 
103 #define IUDMA_DMAC_OFFSET 0x200
104 #define IUDMA_DMAS_OFFSET 0x400
105 
116 };
117 
118 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
119  "REQUEUE",
120  "IDLE",
121  "IN_DATA_PHASE_SETUP",
122  "IN_DATA_PHASE_COMPLETE",
123  "OUT_DATA_PHASE_SETUP",
124  "OUT_DATA_PHASE_COMPLETE",
125  "OUT_STATUS_PHASE",
126  "IN_FAKE_STATUS_PHASE",
127  "SHUTDOWN",
128 };
129 
140 struct iudma_ch_cfg {
141  int ep_num;
142  int n_bds;
143  int ep_type;
144  int dir;
148 };
149 
150 static const struct iudma_ch_cfg iudma_defaults[] = {
151 
152  /* This controller was designed to support a CDC/RNDIS application.
153  It may be possible to reconfigure some of the endpoints, but
154  the hardware limitations (FIFO sizing and number of DMA channels)
155  may significantly impact flexibility and/or stability. Change
156  these values at your own risk.
157 
158  ep_num ep_type n_fifo_slots max_pkt_fs
159  idx | n_bds | dir | max_pkt_hs |
160  | | | | | | | | */
161  [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162  [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
163  [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
164  [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
165  [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
166  [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
167 };
168 
169 struct bcm63xx_udc;
170 
195 struct iudma_ch {
196  unsigned int ch_idx;
197  int ep_num;
198  bool enabled;
199  int max_pkt;
200  bool is_tx;
201  struct bcm63xx_ep *bep;
202  struct bcm63xx_udc *udc;
203 
208 
211  unsigned int n_bds;
212 };
213 
223 struct bcm63xx_ep {
224  unsigned int ep_num;
225  struct iudma_ch *iudma;
226  struct usb_ep ep;
227  struct bcm63xx_udc *udc;
228  struct list_head queue;
229  unsigned halted:1;
230 };
231 
240 struct bcm63xx_req {
241  struct list_head queue; /* ep's requests */
242  struct usb_request req;
243  unsigned int offset;
244  unsigned int bd_bytes;
245  struct iudma_ch *iudma;
246 };
247 
280 struct bcm63xx_udc {
282 
283  struct device *dev;
285  struct clk *usbd_clk;
286  struct clk *usbh_clk;
287 
290 
293 
296 
297  int cfg;
298  int iface;
300 
303 
304  int ep0state;
306 
307  unsigned long wedgemap;
308 
309  unsigned ep0_req_reset:1;
310  unsigned ep0_req_set_cfg:1;
311  unsigned ep0_req_set_iface:1;
312  unsigned ep0_req_shutdown:1;
313 
314  unsigned ep0_req_completed:1;
317 
321 };
322 
323 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
324 
325 /***********************************************************************
326  * Convenience functions
327  ***********************************************************************/
328 
329 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
330 {
331  return container_of(g, struct bcm63xx_udc, gadget);
332 }
333 
334 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
335 {
336  return container_of(ep, struct bcm63xx_ep, ep);
337 }
338 
339 static inline struct bcm63xx_req *our_req(struct usb_request *req)
340 {
341  return container_of(req, struct bcm63xx_req, req);
342 }
343 
344 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
345 {
346  return bcm_readl(udc->usbd_regs + off);
347 }
348 
349 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
350 {
351  bcm_writel(val, udc->usbd_regs + off);
352 }
353 
354 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
355 {
356  return bcm_readl(udc->iudma_regs + off);
357 }
358 
359 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
360 {
361  bcm_writel(val, udc->iudma_regs + off);
362 }
363 
364 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
365 {
366  return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
367 }
368 
369 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
370 {
371  bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
372 }
373 
374 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
375 {
376  return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
377 }
378 
379 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
380 {
381  bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
382 }
383 
384 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
385 {
386  if (is_enabled) {
387  clk_enable(udc->usbh_clk);
388  clk_enable(udc->usbd_clk);
389  udelay(10);
390  } else {
391  clk_disable(udc->usbd_clk);
392  clk_disable(udc->usbh_clk);
393  }
394 }
395 
396 /***********************************************************************
397  * Low-level IUDMA / FIFO operations
398  ***********************************************************************/
399 
409 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
410 {
411  u32 val = usbd_readl(udc, USBD_CONTROL_REG);
412 
414  val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
415  usbd_writel(udc, val, USBD_CONTROL_REG);
416 }
417 
427 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
428  bool is_stalled)
429 {
430  u32 val;
431 
432  val = USBD_STALL_UPDATE_MASK |
433  (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
434  (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
435  usbd_writel(udc, val, USBD_STALL_REG);
436 }
437 
445 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
446 {
447  int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
448  u32 i, val, rx_fifo_slot, tx_fifo_slot;
449 
450  /* set up FIFO boundaries and packet sizes; this is done in pairs */
451  rx_fifo_slot = tx_fifo_slot = 0;
452  for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
453  const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
454  const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
455 
456  bcm63xx_ep_dma_select(udc, i >> 1);
457 
458  val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
459  ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
461  rx_fifo_slot += rx_cfg->n_fifo_slots;
462  usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
463  usbd_writel(udc,
464  is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
466 
467  val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
468  ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
470  tx_fifo_slot += tx_cfg->n_fifo_slots;
471  usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
472  usbd_writel(udc,
473  is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
475 
476  usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
477  }
478 }
479 
485 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
486 {
487  u32 val;
488 
489  bcm63xx_ep_dma_select(udc, ep_num);
490 
491  val = usbd_readl(udc, USBD_CONTROL_REG);
493  usbd_writel(udc, val, USBD_CONTROL_REG);
494  usbd_readl(udc, USBD_CONTROL_REG);
495 }
496 
501 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
502 {
503  int i;
504 
505  for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
506  bcm63xx_fifo_reset_ep(udc, i);
507 }
508 
513 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
514 {
515  u32 i, val;
516 
517  for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
518  const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
519 
520  if (cfg->ep_num < 0)
521  continue;
522 
523  bcm63xx_ep_dma_select(udc, cfg->ep_num);
524  val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
525  ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
526  usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
527  }
528 }
529 
536 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
537 {
538  u32 val, i;
539 
541 
542  for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
543  const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
544  int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
545  cfg->max_pkt_hs : cfg->max_pkt_fs;
546  int idx = cfg->ep_num;
547 
548  udc->iudma[i].max_pkt = max_pkt;
549 
550  if (idx < 0)
551  continue;
552  udc->bep[idx].ep.maxpacket = max_pkt;
553 
554  val = (idx << USBD_CSR_EP_LOG_SHIFT) |
555  (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
556  (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
557  (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
558  (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
560  (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
561  usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
562  }
563 }
564 
578 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
579  struct bcm63xx_req *breq)
580 {
581  int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
582  unsigned int bytes_left = breq->req.length - breq->offset;
583  const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
584  iudma->max_pkt : IUDMA_MAX_FRAGMENT;
585 
586  iudma->n_bds_used = 0;
587  breq->bd_bytes = 0;
588  breq->iudma = iudma;
589 
590  if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
591  extra_zero_pkt = 1;
592 
593  do {
594  struct bcm_enet_desc *d = iudma->write_bd;
595  u32 dmaflags = 0;
596  unsigned int n_bytes;
597 
598  if (d == iudma->end_bd) {
599  dmaflags |= DMADESC_WRAP_MASK;
600  iudma->write_bd = iudma->bd_ring;
601  } else {
602  iudma->write_bd++;
603  }
604  iudma->n_bds_used++;
605 
606  n_bytes = min_t(int, bytes_left, max_bd_bytes);
607  if (n_bytes)
608  dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
609  else
610  dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
612 
613  dmaflags |= DMADESC_OWNER_MASK;
614  if (first_bd) {
615  dmaflags |= DMADESC_SOP_MASK;
616  first_bd = 0;
617  }
618 
619  /*
620  * extra_zero_pkt forces one more iteration through the loop
621  * after all data is queued up, to send the zero packet
622  */
623  if (extra_zero_pkt && !bytes_left)
624  extra_zero_pkt = 0;
625 
626  if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
627  (n_bytes == bytes_left && !extra_zero_pkt)) {
628  last_bd = 1;
629  dmaflags |= DMADESC_EOP_MASK;
630  }
631 
632  d->address = breq->req.dma + breq->offset;
633  mb();
634  d->len_stat = dmaflags;
635 
636  breq->offset += n_bytes;
637  breq->bd_bytes += n_bytes;
638  bytes_left -= n_bytes;
639  } while (!last_bd);
640 
641  usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
642  ENETDMAC_CHANCFG_REG(iudma->ch_idx));
643 }
644 
654 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
655 {
656  int i, actual_len = 0;
657  struct bcm_enet_desc *d = iudma->read_bd;
658 
659  if (!iudma->n_bds_used)
660  return -EINVAL;
661 
662  for (i = 0; i < iudma->n_bds_used; i++) {
663  u32 dmaflags;
664 
665  dmaflags = d->len_stat;
666 
667  if (dmaflags & DMADESC_OWNER_MASK)
668  return -EBUSY;
669 
670  actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
671  DMADESC_LENGTH_SHIFT;
672  if (d == iudma->end_bd)
673  d = iudma->bd_ring;
674  else
675  d++;
676  }
677 
678  iudma->read_bd = d;
679  iudma->n_bds_used = 0;
680  return actual_len;
681 }
682 
688 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
689 {
691  struct bcm_enet_desc *d;
692  int ch_idx = iudma->ch_idx;
693 
694  if (!iudma->is_tx)
695  bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
696 
697  /* stop DMA, then wait for the hardware to wrap up */
698  usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
699 
700  while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
702  udelay(1);
703 
704  /* repeatedly flush the FIFO data until the BD completes */
705  if (iudma->is_tx && iudma->ep_num >= 0)
706  bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
707 
708  if (!timeout--) {
709  dev_err(udc->dev, "can't reset IUDMA channel %d\n",
710  ch_idx);
711  break;
712  }
713  if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
714  dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
715  ch_idx);
716  usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
717  ENETDMAC_CHANCFG_REG(ch_idx));
718  }
719  }
720  usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
721 
722  /* don't leave "live" HW-owned entries for the next guy to step on */
723  for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
724  d->len_stat = 0;
725  mb();
726 
727  iudma->read_bd = iudma->write_bd = iudma->bd_ring;
728  iudma->n_bds_used = 0;
729 
730  /* set up IRQs, UBUS burst size, and BD base for this channel */
731  usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
732  ENETDMAC_IRMASK_REG(ch_idx));
733  usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
734 
735  usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
736  usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
737 }
738 
744 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
745 {
746  struct iudma_ch *iudma = &udc->iudma[ch_idx];
747  const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
748  unsigned int n_bds = cfg->n_bds;
749  struct bcm63xx_ep *bep = NULL;
750 
751  iudma->ep_num = cfg->ep_num;
752  iudma->ch_idx = ch_idx;
753  iudma->is_tx = !!(ch_idx & 0x01);
754  if (iudma->ep_num >= 0) {
755  bep = &udc->bep[iudma->ep_num];
756  bep->iudma = iudma;
757  INIT_LIST_HEAD(&bep->queue);
758  }
759 
760  iudma->bep = bep;
761  iudma->udc = udc;
762 
763  /* ep0 is always active; others are controlled by the gadget driver */
764  if (iudma->ep_num <= 0)
765  iudma->enabled = true;
766 
767  iudma->n_bds = n_bds;
768  iudma->bd_ring = dmam_alloc_coherent(udc->dev,
769  n_bds * sizeof(struct bcm_enet_desc),
770  &iudma->bd_ring_dma, GFP_KERNEL);
771  if (!iudma->bd_ring)
772  return -ENOMEM;
773  iudma->end_bd = &iudma->bd_ring[n_bds - 1];
774 
775  return 0;
776 }
777 
784 static int iudma_init(struct bcm63xx_udc *udc)
785 {
786  int i, rc;
787 
788  usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
789 
790  for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
791  rc = iudma_init_channel(udc, i);
792  if (rc)
793  return rc;
794  iudma_reset_channel(udc, &udc->iudma[i]);
795  }
796 
797  usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
798  return 0;
799 }
800 
807 static void iudma_uninit(struct bcm63xx_udc *udc)
808 {
809  int i;
810 
811  usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
812 
813  for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
814  iudma_reset_channel(udc, &udc->iudma[i]);
815 
816  usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
817 }
818 
819 /***********************************************************************
820  * Other low-level USBD operations
821  ***********************************************************************/
822 
828 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
829 {
830  u32 val;
831 
832  usbd_writel(udc, 0, USBD_STATUS_REG);
833 
839  usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
840  usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
841 }
842 
854 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
855 {
856  u32 val, portmask = BIT(udc->pd->port_no);
857 
858  if (BCMCPU_IS_6328()) {
859  /* configure pinmux to sense VBUS signal */
862  val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
865  }
866 
868  if (is_device) {
869  val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
870  val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
871  } else {
872  val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
873  val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
874  }
876 
878  if (is_device)
880  else
881  val &= ~USBH_PRIV_SWAP_USBD_MASK;
883 }
884 
894 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
895 {
896  u32 val, portmask = BIT(udc->pd->port_no);
897 
899  if (is_on)
900  val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
901  else
902  val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
904 }
905 
913 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
914 {
915  set_clocks(udc, true);
916  iudma_uninit(udc);
917  set_clocks(udc, false);
918 
919  clk_put(udc->usbd_clk);
920  clk_put(udc->usbh_clk);
921 }
922 
927 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
928 {
929  int i, rc = 0;
930  u32 val;
931 
933  GFP_KERNEL);
934  if (!udc->ep0_ctrl_buf)
935  return -ENOMEM;
936 
937  INIT_LIST_HEAD(&udc->gadget.ep_list);
938  for (i = 0; i < BCM63XX_NUM_EP; i++) {
939  struct bcm63xx_ep *bep = &udc->bep[i];
940 
941  bep->ep.name = bcm63xx_ep_name[i];
942  bep->ep_num = i;
943  bep->ep.ops = &bcm63xx_udc_ep_ops;
944  list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
945  bep->halted = 0;
946  bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
947  bep->udc = udc;
948  bep->ep.desc = NULL;
949  INIT_LIST_HEAD(&bep->queue);
950  }
951 
952  udc->gadget.ep0 = &udc->bep[0].ep;
953  list_del(&udc->bep[0].ep.ep_list);
954 
955  udc->gadget.speed = USB_SPEED_UNKNOWN;
956  udc->ep0state = EP0_SHUTDOWN;
957 
958  udc->usbh_clk = clk_get(udc->dev, "usbh");
959  if (IS_ERR(udc->usbh_clk))
960  return -EIO;
961 
962  udc->usbd_clk = clk_get(udc->dev, "usbd");
963  if (IS_ERR(udc->usbd_clk)) {
964  clk_put(udc->usbh_clk);
965  return -EIO;
966  }
967 
968  set_clocks(udc, true);
969 
972  (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
973  usbd_writel(udc, val, USBD_CONTROL_REG);
974 
980 
981  if (udc->gadget.max_speed == USB_SPEED_HIGH)
983  else
985  usbd_writel(udc, val, USBD_STRAPS_REG);
986 
987  bcm63xx_set_ctrl_irqs(udc, false);
988 
989  usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
990 
993  usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
994 
995  rc = iudma_init(udc);
996  set_clocks(udc, false);
997  if (rc)
998  bcm63xx_uninit_udc_hw(udc);
999 
1000  return 0;
1001 }
1002 
1003 /***********************************************************************
1004  * Standard EP gadget operations
1005  ***********************************************************************/
1006 
1015 static int bcm63xx_ep_enable(struct usb_ep *ep,
1016  const struct usb_endpoint_descriptor *desc)
1017 {
1018  struct bcm63xx_ep *bep = our_ep(ep);
1019  struct bcm63xx_udc *udc = bep->udc;
1020  struct iudma_ch *iudma = bep->iudma;
1021  unsigned long flags;
1022 
1023  if (!ep || !desc || ep->name == bcm63xx_ep0name)
1024  return -EINVAL;
1025 
1026  if (!udc->driver)
1027  return -ESHUTDOWN;
1028 
1029  spin_lock_irqsave(&udc->lock, flags);
1030  if (iudma->enabled) {
1031  spin_unlock_irqrestore(&udc->lock, flags);
1032  return -EINVAL;
1033  }
1034 
1035  iudma->enabled = true;
1036  BUG_ON(!list_empty(&bep->queue));
1037 
1038  iudma_reset_channel(udc, iudma);
1039 
1040  bep->halted = 0;
1041  bcm63xx_set_stall(udc, bep, false);
1042  clear_bit(bep->ep_num, &udc->wedgemap);
1043 
1044  ep->desc = desc;
1045  ep->maxpacket = usb_endpoint_maxp(desc);
1046 
1047  spin_unlock_irqrestore(&udc->lock, flags);
1048  return 0;
1049 }
1050 
1055 static int bcm63xx_ep_disable(struct usb_ep *ep)
1056 {
1057  struct bcm63xx_ep *bep = our_ep(ep);
1058  struct bcm63xx_udc *udc = bep->udc;
1059  struct iudma_ch *iudma = bep->iudma;
1060  struct list_head *pos, *n;
1061  unsigned long flags;
1062 
1063  if (!ep || !ep->desc)
1064  return -EINVAL;
1065 
1066  spin_lock_irqsave(&udc->lock, flags);
1067  if (!iudma->enabled) {
1068  spin_unlock_irqrestore(&udc->lock, flags);
1069  return -EINVAL;
1070  }
1071  iudma->enabled = false;
1072 
1073  iudma_reset_channel(udc, iudma);
1074 
1075  if (!list_empty(&bep->queue)) {
1076  list_for_each_safe(pos, n, &bep->queue) {
1077  struct bcm63xx_req *breq =
1078  list_entry(pos, struct bcm63xx_req, queue);
1079 
1080  usb_gadget_unmap_request(&udc->gadget, &breq->req,
1081  iudma->is_tx);
1082  list_del(&breq->queue);
1083  breq->req.status = -ESHUTDOWN;
1084 
1085  spin_unlock_irqrestore(&udc->lock, flags);
1086  breq->req.complete(&iudma->bep->ep, &breq->req);
1087  spin_lock_irqsave(&udc->lock, flags);
1088  }
1089  }
1090  ep->desc = NULL;
1091 
1092  spin_unlock_irqrestore(&udc->lock, flags);
1093  return 0;
1094 }
1095 
1101 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1102  gfp_t mem_flags)
1103 {
1104  struct bcm63xx_req *breq;
1105 
1106  breq = kzalloc(sizeof(*breq), mem_flags);
1107  if (!breq)
1108  return NULL;
1109  return &breq->req;
1110 }
1111 
1117 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1118  struct usb_request *req)
1119 {
1120  struct bcm63xx_req *breq = our_req(req);
1121  kfree(breq);
1122 }
1123 
1138 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1139  gfp_t mem_flags)
1140 {
1141  struct bcm63xx_ep *bep = our_ep(ep);
1142  struct bcm63xx_udc *udc = bep->udc;
1143  struct bcm63xx_req *breq = our_req(req);
1144  unsigned long flags;
1145  int rc = 0;
1146 
1147  if (unlikely(!req || !req->complete || !req->buf || !ep))
1148  return -EINVAL;
1149 
1150  req->actual = 0;
1151  req->status = 0;
1152  breq->offset = 0;
1153 
1154  if (bep == &udc->bep[0]) {
1155  /* only one reply per request, please */
1156  if (udc->ep0_reply)
1157  return -EINVAL;
1158 
1159  udc->ep0_reply = req;
1160  schedule_work(&udc->ep0_wq);
1161  return 0;
1162  }
1163 
1164  spin_lock_irqsave(&udc->lock, flags);
1165  if (!bep->iudma->enabled) {
1166  rc = -ESHUTDOWN;
1167  goto out;
1168  }
1169 
1170  rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1171  if (rc == 0) {
1172  list_add_tail(&breq->queue, &bep->queue);
1173  if (list_is_singular(&bep->queue))
1174  iudma_write(udc, bep->iudma, breq);
1175  }
1176 
1177 out:
1178  spin_unlock_irqrestore(&udc->lock, flags);
1179  return rc;
1180 }
1181 
1191 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1192 {
1193  struct bcm63xx_ep *bep = our_ep(ep);
1194  struct bcm63xx_udc *udc = bep->udc;
1195  struct bcm63xx_req *breq = our_req(req), *cur;
1196  unsigned long flags;
1197  int rc = 0;
1198 
1199  spin_lock_irqsave(&udc->lock, flags);
1200  if (list_empty(&bep->queue)) {
1201  rc = -EINVAL;
1202  goto out;
1203  }
1204 
1205  cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1206  usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1207 
1208  if (breq == cur) {
1209  iudma_reset_channel(udc, bep->iudma);
1210  list_del(&breq->queue);
1211 
1212  if (!list_empty(&bep->queue)) {
1213  struct bcm63xx_req *next;
1214 
1215  next = list_first_entry(&bep->queue,
1216  struct bcm63xx_req, queue);
1217  iudma_write(udc, bep->iudma, next);
1218  }
1219  } else {
1220  list_del(&breq->queue);
1221  }
1222 
1223 out:
1224  spin_unlock_irqrestore(&udc->lock, flags);
1225 
1226  req->status = -ESHUTDOWN;
1227  req->complete(ep, req);
1228 
1229  return rc;
1230 }
1231 
1239 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1240 {
1241  struct bcm63xx_ep *bep = our_ep(ep);
1242  struct bcm63xx_udc *udc = bep->udc;
1243  unsigned long flags;
1244 
1245  spin_lock_irqsave(&udc->lock, flags);
1246  bcm63xx_set_stall(udc, bep, !!value);
1247  bep->halted = value;
1248  spin_unlock_irqrestore(&udc->lock, flags);
1249 
1250  return 0;
1251 }
1252 
1259 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1260 {
1261  struct bcm63xx_ep *bep = our_ep(ep);
1262  struct bcm63xx_udc *udc = bep->udc;
1263  unsigned long flags;
1264 
1265  spin_lock_irqsave(&udc->lock, flags);
1266  set_bit(bep->ep_num, &udc->wedgemap);
1267  bcm63xx_set_stall(udc, bep, true);
1268  spin_unlock_irqrestore(&udc->lock, flags);
1269 
1270  return 0;
1271 }
1272 
1273 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1274  .enable = bcm63xx_ep_enable,
1275  .disable = bcm63xx_ep_disable,
1276 
1277  .alloc_request = bcm63xx_udc_alloc_request,
1278  .free_request = bcm63xx_udc_free_request,
1279 
1280  .queue = bcm63xx_udc_queue,
1281  .dequeue = bcm63xx_udc_dequeue,
1282 
1283  .set_halt = bcm63xx_udc_set_halt,
1284  .set_wedge = bcm63xx_udc_set_wedge,
1285 };
1286 
1287 /***********************************************************************
1288  * EP0 handling
1289  ***********************************************************************/
1290 
1296 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1297  struct usb_ctrlrequest *ctrl)
1298 {
1299  int rc;
1300 
1301  spin_unlock_irq(&udc->lock);
1302  rc = udc->driver->setup(&udc->gadget, ctrl);
1303  spin_lock_irq(&udc->lock);
1304  return rc;
1305 }
1306 
1319 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1320 {
1321  struct usb_ctrlrequest ctrl;
1322  int rc;
1323 
1326  ctrl.wValue = cpu_to_le16(udc->cfg);
1327  ctrl.wIndex = 0;
1328  ctrl.wLength = 0;
1329 
1330  rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1331  if (rc < 0) {
1333  "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1334  udc->cfg);
1335  }
1336  return rc;
1337 }
1338 
1343 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1344 {
1345  struct usb_ctrlrequest ctrl;
1346  int rc;
1347 
1350  ctrl.wValue = cpu_to_le16(udc->alt_iface);
1351  ctrl.wIndex = cpu_to_le16(udc->iface);
1352  ctrl.wLength = 0;
1353 
1354  rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1355  if (rc < 0) {
1357  "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1358  udc->iface, udc->alt_iface);
1359  }
1360  return rc;
1361 }
1362 
1369 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1370  struct usb_request *req)
1371 {
1372  struct bcm63xx_req *breq = our_req(req);
1373  struct iudma_ch *iudma = &udc->iudma[ch_idx];
1374 
1375  BUG_ON(udc->ep0_request);
1376  udc->ep0_request = req;
1377 
1378  req->actual = 0;
1379  breq->offset = 0;
1380  usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1381  iudma_write(udc, iudma, breq);
1382 }
1383 
1390 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1391  struct usb_request *req, int status)
1392 {
1393  req->status = status;
1394  if (status)
1395  req->actual = 0;
1396  if (req->complete) {
1397  spin_unlock_irq(&udc->lock);
1398  req->complete(&udc->bep[0].ep, req);
1399  spin_lock_irq(&udc->lock);
1400  }
1401 }
1402 
1409 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1410 {
1411  struct usb_request *req = udc->ep0_reply;
1412 
1413  udc->ep0_reply = NULL;
1414  usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1415  if (udc->ep0_request == req) {
1416  udc->ep0_req_completed = 0;
1417  udc->ep0_request = NULL;
1418  }
1419  bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1420 }
1421 
1427 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1428 {
1429  struct usb_request *req = udc->ep0_request;
1430 
1431  udc->ep0_req_completed = 0;
1432  udc->ep0_request = NULL;
1433 
1434  return req->actual;
1435 }
1436 
1446 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1447  int length)
1448 {
1449  struct usb_request *req = &udc->ep0_ctrl_req.req;
1450 
1451  req->buf = udc->ep0_ctrl_buf;
1452  req->length = length;
1453  req->complete = NULL;
1454 
1455  bcm63xx_ep0_map_write(udc, ch_idx, req);
1456 }
1457 
1466 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1467 {
1468  int rc;
1469  struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1470 
1471  rc = bcm63xx_ep0_read_complete(udc);
1472 
1473  if (rc < 0) {
1474  dev_err(udc->dev, "missing SETUP packet\n");
1475  return EP0_IDLE;
1476  }
1477 
1478  /*
1479  * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1480  * ALWAYS deliver these 100% of the time, so if we happen to see one,
1481  * just throw it away.
1482  */
1483  if (rc == 0)
1484  return EP0_REQUEUE;
1485 
1486  /* Drop malformed SETUP packets */
1487  if (rc != sizeof(*ctrl)) {
1489  "malformed SETUP packet (%d bytes)\n", rc);
1490  return EP0_REQUEUE;
1491  }
1492 
1493  /* Process new SETUP packet arriving on ep0 */
1494  rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1495  if (rc < 0) {
1496  bcm63xx_set_stall(udc, &udc->bep[0], true);
1497  return EP0_REQUEUE;
1498  }
1499 
1500  if (!ctrl->wLength)
1501  return EP0_REQUEUE;
1502  else if (ctrl->bRequestType & USB_DIR_IN)
1503  return EP0_IN_DATA_PHASE_SETUP;
1504  else
1505  return EP0_OUT_DATA_PHASE_SETUP;
1506 }
1507 
1519 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1520 {
1521  if (udc->ep0_req_reset) {
1522  udc->ep0_req_reset = 0;
1523  } else if (udc->ep0_req_set_cfg) {
1524  udc->ep0_req_set_cfg = 0;
1525  if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1527  } else if (udc->ep0_req_set_iface) {
1528  udc->ep0_req_set_iface = 0;
1529  if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1531  } else if (udc->ep0_req_completed) {
1532  udc->ep0state = bcm63xx_ep0_do_setup(udc);
1533  return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1534  } else if (udc->ep0_req_shutdown) {
1535  udc->ep0_req_shutdown = 0;
1536  udc->ep0_req_completed = 0;
1537  udc->ep0_request = NULL;
1538  iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1540  &udc->ep0_ctrl_req.req, 0);
1541 
1542  /* bcm63xx_udc_pullup() is waiting for this */
1543  mb();
1544  udc->ep0state = EP0_SHUTDOWN;
1545  } else if (udc->ep0_reply) {
1546  /*
1547  * This could happen if a USB RESET shows up during an ep0
1548  * transaction (especially if a laggy driver like gadgetfs
1549  * is in use).
1550  */
1551  dev_warn(udc->dev, "nuking unexpected reply\n");
1552  bcm63xx_ep0_nuke_reply(udc, 0);
1553  } else {
1554  return -EAGAIN;
1555  }
1556 
1557  return 0;
1558 }
1559 
1566 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1567 {
1568  enum bcm63xx_ep0_state ep0state = udc->ep0state;
1569  bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1570 
1571  switch (udc->ep0state) {
1572  case EP0_REQUEUE:
1573  /* set up descriptor to receive SETUP packet */
1574  bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1576  ep0state = EP0_IDLE;
1577  break;
1578  case EP0_IDLE:
1579  return bcm63xx_ep0_do_idle(udc);
1581  /*
1582  * Normal case: TX request is in ep0_reply (queued by the
1583  * callback), or will be queued shortly. When it's here,
1584  * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1585  *
1586  * Shutdown case: Stop waiting for the reply. Just
1587  * REQUEUE->IDLE. The gadget driver is NOT expected to
1588  * queue anything else now.
1589  */
1590  if (udc->ep0_reply) {
1591  bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1592  udc->ep0_reply);
1593  ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1594  } else if (shutdown) {
1595  ep0state = EP0_REQUEUE;
1596  }
1597  break;
1599  /*
1600  * Normal case: TX packet (ep0_reply) is in flight; wait for
1601  * it to finish, then go back to REQUEUE->IDLE.
1602  *
1603  * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1604  * completion to the gadget driver, then REQUEUE->IDLE.
1605  */
1606  if (udc->ep0_req_completed) {
1607  udc->ep0_reply = NULL;
1608  bcm63xx_ep0_read_complete(udc);
1609  /*
1610  * the "ack" sometimes gets eaten (see
1611  * bcm63xx_ep0_do_idle)
1612  */
1613  ep0state = EP0_REQUEUE;
1614  } else if (shutdown) {
1615  iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1616  bcm63xx_ep0_nuke_reply(udc, 1);
1617  ep0state = EP0_REQUEUE;
1618  }
1619  break;
1620  }
1622  /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1623  if (udc->ep0_reply) {
1624  bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1625  udc->ep0_reply);
1626  ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1627  } else if (shutdown) {
1628  ep0state = EP0_REQUEUE;
1629  }
1630  break;
1632  /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1633  if (udc->ep0_req_completed) {
1634  udc->ep0_reply = NULL;
1635  bcm63xx_ep0_read_complete(udc);
1636 
1637  /* send 0-byte ack to host */
1638  bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1639  ep0state = EP0_OUT_STATUS_PHASE;
1640  } else if (shutdown) {
1641  iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1642  bcm63xx_ep0_nuke_reply(udc, 0);
1643  ep0state = EP0_REQUEUE;
1644  }
1645  break;
1646  }
1647  case EP0_OUT_STATUS_PHASE:
1648  /*
1649  * Normal case: 0-byte OUT ack packet is in flight; wait
1650  * for it to finish, then go back to REQUEUE->IDLE.
1651  *
1652  * Shutdown case: just cancel the transmission. Don't bother
1653  * calling the completion, because it originated from this
1654  * function anyway. Then go back to REQUEUE->IDLE.
1655  */
1656  if (udc->ep0_req_completed) {
1657  bcm63xx_ep0_read_complete(udc);
1658  ep0state = EP0_REQUEUE;
1659  } else if (shutdown) {
1660  iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1661  udc->ep0_request = NULL;
1662  ep0state = EP0_REQUEUE;
1663  }
1664  break;
1665  case EP0_IN_FAKE_STATUS_PHASE: {
1666  /*
1667  * Normal case: we spoofed a SETUP packet and are now
1668  * waiting for the gadget driver to send a 0-byte reply.
1669  * This doesn't actually get sent to the HW because the
1670  * HW has already sent its own reply. Once we get the
1671  * response, return to IDLE.
1672  *
1673  * Shutdown case: return to IDLE immediately.
1674  *
1675  * Note that the ep0 RX descriptor has remained queued
1676  * (and possibly unfilled) during this entire transaction.
1677  * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1678  * or SET_INTERFACE transactions.
1679  */
1680  struct usb_request *r = udc->ep0_reply;
1681 
1682  if (!r) {
1683  if (shutdown)
1684  ep0state = EP0_IDLE;
1685  break;
1686  }
1687 
1688  bcm63xx_ep0_complete(udc, r, 0);
1689  udc->ep0_reply = NULL;
1690  ep0state = EP0_IDLE;
1691  break;
1692  }
1693  case EP0_SHUTDOWN:
1694  break;
1695  }
1696 
1697  if (udc->ep0state == ep0state)
1698  return -EAGAIN;
1699 
1700  udc->ep0state = ep0state;
1701  return 0;
1702 }
1703 
1718 static void bcm63xx_ep0_process(struct work_struct *w)
1719 {
1720  struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1721  spin_lock_irq(&udc->lock);
1722  while (bcm63xx_ep0_one_round(udc) == 0)
1723  ;
1724  spin_unlock_irq(&udc->lock);
1725 }
1726 
1727 /***********************************************************************
1728  * Standard UDC gadget operations
1729  ***********************************************************************/
1730 
1735 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1736 {
1737  struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1738 
1739  return (usbd_readl(udc, USBD_STATUS_REG) &
1741 }
1742 
1750 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1751 {
1752  struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1753  unsigned long flags;
1754  int i, rc = -EINVAL;
1755 
1756  spin_lock_irqsave(&udc->lock, flags);
1757  if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1758  udc->gadget.speed = USB_SPEED_UNKNOWN;
1759  udc->ep0state = EP0_REQUEUE;
1760  bcm63xx_fifo_setup(udc);
1761  bcm63xx_fifo_reset(udc);
1762  bcm63xx_ep_setup(udc);
1763 
1764  bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1765  for (i = 0; i < BCM63XX_NUM_EP; i++)
1766  bcm63xx_set_stall(udc, &udc->bep[i], false);
1767 
1768  bcm63xx_set_ctrl_irqs(udc, true);
1769  bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1770  rc = 0;
1771  } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1772  bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1773 
1774  udc->ep0_req_shutdown = 1;
1775  spin_unlock_irqrestore(&udc->lock, flags);
1776 
1777  while (1) {
1778  schedule_work(&udc->ep0_wq);
1779  if (udc->ep0state == EP0_SHUTDOWN)
1780  break;
1781  msleep(50);
1782  }
1783  bcm63xx_set_ctrl_irqs(udc, false);
1784  cancel_work_sync(&udc->ep0_wq);
1785  return 0;
1786  }
1787 
1788  spin_unlock_irqrestore(&udc->lock, flags);
1789  return rc;
1790 }
1791 
1797 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1798  struct usb_gadget_driver *driver)
1799 {
1800  struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1801  unsigned long flags;
1802 
1803  if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1804  !driver->setup)
1805  return -EINVAL;
1806  if (!udc)
1807  return -ENODEV;
1808  if (udc->driver)
1809  return -EBUSY;
1810 
1811  spin_lock_irqsave(&udc->lock, flags);
1812 
1813  set_clocks(udc, true);
1814  bcm63xx_fifo_setup(udc);
1815  bcm63xx_ep_init(udc);
1816  bcm63xx_ep_setup(udc);
1817  bcm63xx_fifo_reset(udc);
1818  bcm63xx_select_phy_mode(udc, true);
1819 
1820  udc->driver = driver;
1821  driver->driver.bus = NULL;
1822  udc->gadget.dev.driver = &driver->driver;
1823  udc->gadget.dev.of_node = udc->dev->of_node;
1824 
1825  spin_unlock_irqrestore(&udc->lock, flags);
1826 
1827  return 0;
1828 }
1829 
1835 static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1836  struct usb_gadget_driver *driver)
1837 {
1838  struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1839  unsigned long flags;
1840 
1841  spin_lock_irqsave(&udc->lock, flags);
1842 
1843  udc->driver = NULL;
1844  udc->gadget.dev.driver = NULL;
1845 
1846  /*
1847  * If we switch the PHY too abruptly after dropping D+, the host
1848  * will often complain:
1849  *
1850  * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1851  */
1852  msleep(100);
1853 
1854  bcm63xx_select_phy_mode(udc, false);
1855  set_clocks(udc, false);
1856 
1857  spin_unlock_irqrestore(&udc->lock, flags);
1858 
1859  return 0;
1860 }
1861 
1862 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1863  .get_frame = bcm63xx_udc_get_frame,
1864  .pullup = bcm63xx_udc_pullup,
1865  .udc_start = bcm63xx_udc_start,
1866  .udc_stop = bcm63xx_udc_stop,
1867 };
1868 
1869 /***********************************************************************
1870  * IRQ handling
1871  ***********************************************************************/
1872 
1882 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1883 {
1884  u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1885 
1888  udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1890  bcm63xx_ep_setup(udc);
1891 }
1892 
1900 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1901 {
1902  u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1903  enum usb_device_speed oldspeed = udc->gadget.speed;
1904 
1905  switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1906  case BCM63XX_SPD_HIGH:
1907  udc->gadget.speed = USB_SPEED_HIGH;
1908  break;
1909  case BCM63XX_SPD_FULL:
1910  udc->gadget.speed = USB_SPEED_FULL;
1911  break;
1912  default:
1913  /* this should never happen */
1914  udc->gadget.speed = USB_SPEED_UNKNOWN;
1915  dev_err(udc->dev,
1916  "received SETUP packet with invalid link speed\n");
1917  return 0;
1918  }
1919 
1920  if (udc->gadget.speed != oldspeed) {
1921  dev_info(udc->dev, "link up, %s-speed mode\n",
1922  udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1923  return 1;
1924  } else {
1925  return 0;
1926  }
1927 }
1928 
1940 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1941 {
1942  int i;
1943 
1944  for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1945  bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1946  if (!new_status)
1947  clear_bit(i, &udc->wedgemap);
1948  }
1949 }
1950 
1959 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1960 {
1961  struct bcm63xx_udc *udc = dev_id;
1962  u32 stat;
1963  bool disconnected = false;
1964 
1965  stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1966  usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1967 
1968  usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1969 
1970  spin_lock(&udc->lock);
1971  if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1972  /* VBUS toggled */
1973 
1974  if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1976  udc->gadget.speed != USB_SPEED_UNKNOWN)
1977  dev_info(udc->dev, "link down\n");
1978 
1979  udc->gadget.speed = USB_SPEED_UNKNOWN;
1980  disconnected = true;
1981  }
1982  if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1983  bcm63xx_fifo_setup(udc);
1984  bcm63xx_fifo_reset(udc);
1985  bcm63xx_ep_setup(udc);
1986 
1987  bcm63xx_update_wedge(udc, false);
1988 
1989  udc->ep0_req_reset = 1;
1990  schedule_work(&udc->ep0_wq);
1991  disconnected = true;
1992  }
1993  if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1994  if (bcm63xx_update_link_speed(udc)) {
1995  bcm63xx_fifo_setup(udc);
1996  bcm63xx_ep_setup(udc);
1997  }
1998  bcm63xx_update_wedge(udc, true);
1999  }
2000  if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2001  bcm63xx_update_cfg_iface(udc);
2002  udc->ep0_req_set_cfg = 1;
2003  schedule_work(&udc->ep0_wq);
2004  }
2005  if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2006  bcm63xx_update_cfg_iface(udc);
2007  udc->ep0_req_set_iface = 1;
2008  schedule_work(&udc->ep0_wq);
2009  }
2010  spin_unlock(&udc->lock);
2011 
2012  if (disconnected && udc->driver)
2013  udc->driver->disconnect(&udc->gadget);
2014 
2015  return IRQ_HANDLED;
2016 }
2017 
2028 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2029 {
2030  struct iudma_ch *iudma = dev_id;
2031  struct bcm63xx_udc *udc = iudma->udc;
2032  struct bcm63xx_ep *bep;
2033  struct usb_request *req = NULL;
2034  struct bcm63xx_req *breq = NULL;
2035  int rc;
2036  bool is_done = false;
2037 
2038  spin_lock(&udc->lock);
2039 
2040  usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2041  ENETDMAC_IR_REG(iudma->ch_idx));
2042  bep = iudma->bep;
2043  rc = iudma_read(udc, iudma);
2044 
2045  /* special handling for EP0 RX (0) and TX (1) */
2046  if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2047  iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2048  req = udc->ep0_request;
2049  breq = our_req(req);
2050 
2051  /* a single request could require multiple submissions */
2052  if (rc >= 0) {
2053  req->actual += rc;
2054 
2055  if (req->actual >= req->length || breq->bd_bytes > rc) {
2056  udc->ep0_req_completed = 1;
2057  is_done = true;
2058  schedule_work(&udc->ep0_wq);
2059 
2060  /* "actual" on a ZLP is 1 byte */
2061  req->actual = min(req->actual, req->length);
2062  } else {
2063  /* queue up the next BD (same request) */
2064  iudma_write(udc, iudma, breq);
2065  }
2066  }
2067  } else if (!list_empty(&bep->queue)) {
2068  breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2069  req = &breq->req;
2070 
2071  if (rc >= 0) {
2072  req->actual += rc;
2073 
2074  if (req->actual >= req->length || breq->bd_bytes > rc) {
2075  is_done = true;
2076  list_del(&breq->queue);
2077 
2078  req->actual = min(req->actual, req->length);
2079 
2080  if (!list_empty(&bep->queue)) {
2081  struct bcm63xx_req *next;
2082 
2083  next = list_first_entry(&bep->queue,
2084  struct bcm63xx_req, queue);
2085  iudma_write(udc, iudma, next);
2086  }
2087  } else {
2088  iudma_write(udc, iudma, breq);
2089  }
2090  }
2091  }
2092  spin_unlock(&udc->lock);
2093 
2094  if (is_done) {
2095  usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2096  if (req->complete)
2097  req->complete(&bep->ep, req);
2098  }
2099 
2100  return IRQ_HANDLED;
2101 }
2102 
2103 /***********************************************************************
2104  * Debug filesystem
2105  ***********************************************************************/
2106 
2107 /*
2108  * bcm63xx_usbd_dbg_show - Show USBD controller state.
2109  * @s: seq_file to which the information will be written.
2110  * @p: Unused.
2111  *
2112  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2113  */
2114 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2115 {
2116  struct bcm63xx_udc *udc = s->private;
2117 
2118  if (!udc->driver)
2119  return -ENODEV;
2120 
2121  seq_printf(s, "ep0 state: %s\n",
2122  bcm63xx_ep0_state_names[udc->ep0state]);
2123  seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2124  udc->ep0_req_reset ? "reset " : "",
2125  udc->ep0_req_set_cfg ? "set_cfg " : "",
2126  udc->ep0_req_set_iface ? "set_iface " : "",
2127  udc->ep0_req_shutdown ? "shutdown " : "",
2128  udc->ep0_request ? "pending " : "",
2129  udc->ep0_req_completed ? "completed " : "",
2130  udc->ep0_reply ? "reply " : "");
2131  seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2132  udc->cfg, udc->iface, udc->alt_iface);
2133  seq_printf(s, "regs:\n");
2134  seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2135  usbd_readl(udc, USBD_CONTROL_REG),
2136  usbd_readl(udc, USBD_STRAPS_REG),
2137  usbd_readl(udc, USBD_STATUS_REG));
2138  seq_printf(s, " events: %08x; stall: %08x\n",
2139  usbd_readl(udc, USBD_EVENTS_REG),
2140  usbd_readl(udc, USBD_STALL_REG));
2141 
2142  return 0;
2143 }
2144 
2145 /*
2146  * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2147  * @s: seq_file to which the information will be written.
2148  * @p: Unused.
2149  *
2150  * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2151  */
2152 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2153 {
2154  struct bcm63xx_udc *udc = s->private;
2155  int ch_idx, i;
2156  u32 sram2, sram3;
2157 
2158  if (!udc->driver)
2159  return -ENODEV;
2160 
2161  for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2162  struct iudma_ch *iudma = &udc->iudma[ch_idx];
2163  struct list_head *pos;
2164 
2165  seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2166  switch (iudma_defaults[ch_idx].ep_type) {
2167  case BCMEP_CTRL:
2168  seq_printf(s, "control");
2169  break;
2170  case BCMEP_BULK:
2171  seq_printf(s, "bulk");
2172  break;
2173  case BCMEP_INTR:
2174  seq_printf(s, "interrupt");
2175  break;
2176  }
2177  seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2178  seq_printf(s, " [ep%d]:\n",
2179  max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2180  seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2181  usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2182  usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2183  usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2184  usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2185 
2186  sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2187  sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2188  seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2189  usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2190  sram2 >> 16, sram2 & 0xffff,
2191  sram3 >> 16, sram3 & 0xffff,
2192  usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2193  seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2194  iudma->n_bds);
2195 
2196  if (iudma->bep) {
2197  i = 0;
2198  list_for_each(pos, &iudma->bep->queue)
2199  i++;
2200  seq_printf(s, "; %d queued\n", i);
2201  } else {
2202  seq_printf(s, "\n");
2203  }
2204 
2205  for (i = 0; i < iudma->n_bds; i++) {
2206  struct bcm_enet_desc *d = &iudma->bd_ring[i];
2207 
2208  seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2209  i * sizeof(*d), i,
2210  d->len_stat >> 16, d->len_stat & 0xffff,
2211  d->address);
2212  if (d == iudma->read_bd)
2213  seq_printf(s, " <<RD");
2214  if (d == iudma->write_bd)
2215  seq_printf(s, " <<WR");
2216  seq_printf(s, "\n");
2217  }
2218 
2219  seq_printf(s, "\n");
2220  }
2221 
2222  return 0;
2223 }
2224 
2225 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2226 {
2227  return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2228 }
2229 
2230 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2231 {
2232  return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2233 }
2234 
2235 static const struct file_operations usbd_dbg_fops = {
2236  .owner = THIS_MODULE,
2237  .open = bcm63xx_usbd_dbg_open,
2238  .llseek = seq_lseek,
2239  .read = seq_read,
2240  .release = single_release,
2241 };
2242 
2243 static const struct file_operations iudma_dbg_fops = {
2244  .owner = THIS_MODULE,
2245  .open = bcm63xx_iudma_dbg_open,
2246  .llseek = seq_lseek,
2247  .read = seq_read,
2248  .release = single_release,
2249 };
2250 
2251 
2256 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2257 {
2258  struct dentry *root, *usbd, *iudma;
2259 
2260  if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2261  return;
2262 
2263  root = debugfs_create_dir(udc->gadget.name, NULL);
2264  if (IS_ERR(root) || !root)
2265  goto err_root;
2266 
2267  usbd = debugfs_create_file("usbd", 0400, root, udc,
2268  &usbd_dbg_fops);
2269  if (!usbd)
2270  goto err_usbd;
2271  iudma = debugfs_create_file("iudma", 0400, root, udc,
2272  &iudma_dbg_fops);
2273  if (!iudma)
2274  goto err_iudma;
2275 
2276  udc->debugfs_root = root;
2277  udc->debugfs_usbd = usbd;
2278  udc->debugfs_iudma = iudma;
2279  return;
2280 err_iudma:
2281  debugfs_remove(usbd);
2282 err_usbd:
2283  debugfs_remove(root);
2284 err_root:
2285  dev_err(udc->dev, "debugfs is not available\n");
2286 }
2287 
2294 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2295 {
2299  udc->debugfs_iudma = NULL;
2300  udc->debugfs_usbd = NULL;
2301  udc->debugfs_root = NULL;
2302 }
2303 
2304 /***********************************************************************
2305  * Driver init/exit
2306  ***********************************************************************/
2307 
2315 static void bcm63xx_udc_gadget_release(struct device *dev)
2316 {
2317 }
2318 
2326 static int __devinit bcm63xx_udc_probe(struct platform_device *pdev)
2327 {
2328  struct device *dev = &pdev->dev;
2330  struct bcm63xx_udc *udc;
2331  struct resource *res;
2332  int rc = -ENOMEM, i, irq;
2333 
2334  udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2335  if (!udc) {
2336  dev_err(dev, "cannot allocate memory\n");
2337  return -ENOMEM;
2338  }
2339 
2340  platform_set_drvdata(pdev, udc);
2341  udc->dev = dev;
2342  udc->pd = pd;
2343 
2344  if (!pd) {
2345  dev_err(dev, "missing platform data\n");
2346  return -EINVAL;
2347  }
2348 
2349  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2350  if (!res) {
2351  dev_err(dev, "error finding USBD resource\n");
2352  return -ENXIO;
2353  }
2354  udc->usbd_regs = devm_request_and_ioremap(dev, res);
2355 
2356  res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2357  if (!res) {
2358  dev_err(dev, "error finding IUDMA resource\n");
2359  return -ENXIO;
2360  }
2361  udc->iudma_regs = devm_request_and_ioremap(dev, res);
2362 
2363  if (!udc->usbd_regs || !udc->iudma_regs) {
2364  dev_err(dev, "error requesting resources\n");
2365  return -ENXIO;
2366  }
2367 
2368  spin_lock_init(&udc->lock);
2369  INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2370  dev_set_name(&udc->gadget.dev, "gadget");
2371 
2372  udc->gadget.ops = &bcm63xx_udc_ops;
2373  udc->gadget.name = dev_name(dev);
2374  udc->gadget.dev.parent = dev;
2375  udc->gadget.dev.release = bcm63xx_udc_gadget_release;
2376  udc->gadget.dev.dma_mask = dev->dma_mask;
2377 
2378  if (!pd->use_fullspeed && !use_fullspeed)
2379  udc->gadget.max_speed = USB_SPEED_HIGH;
2380  else
2381  udc->gadget.max_speed = USB_SPEED_FULL;
2382 
2383  /* request clocks, allocate buffers, and clear any pending IRQs */
2384  rc = bcm63xx_init_udc_hw(udc);
2385  if (rc)
2386  return rc;
2387 
2388  rc = -ENXIO;
2389 
2390  /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2391  irq = platform_get_irq(pdev, 0);
2392  if (irq < 0) {
2393  dev_err(dev, "missing IRQ resource #0\n");
2394  goto out_uninit;
2395  }
2396  if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2397  dev_name(dev), udc) < 0) {
2398  dev_err(dev, "error requesting IRQ #%d\n", irq);
2399  goto out_uninit;
2400  }
2401 
2402  /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2403  for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2404  irq = platform_get_irq(pdev, i + 1);
2405  if (irq < 0) {
2406  dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2407  goto out_uninit;
2408  }
2409  if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2410  dev_name(dev), &udc->iudma[i]) < 0) {
2411  dev_err(dev, "error requesting IRQ #%d\n", irq);
2412  goto out_uninit;
2413  }
2414  }
2415 
2416  rc = device_register(&udc->gadget.dev);
2417  if (rc)
2418  goto out_uninit;
2419 
2420  bcm63xx_udc_init_debugfs(udc);
2421  rc = usb_add_gadget_udc(dev, &udc->gadget);
2422  if (!rc)
2423  return 0;
2424 
2425  bcm63xx_udc_cleanup_debugfs(udc);
2426  device_unregister(&udc->gadget.dev);
2427 out_uninit:
2428  bcm63xx_uninit_udc_hw(udc);
2429  return rc;
2430 }
2431 
2436 static int __devexit bcm63xx_udc_remove(struct platform_device *pdev)
2437 {
2438  struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2439 
2440  bcm63xx_udc_cleanup_debugfs(udc);
2441  usb_del_gadget_udc(&udc->gadget);
2442  device_unregister(&udc->gadget.dev);
2443  BUG_ON(udc->driver);
2444 
2445  platform_set_drvdata(pdev, NULL);
2446  bcm63xx_uninit_udc_hw(udc);
2447 
2448  return 0;
2449 }
2450 
2451 static struct platform_driver bcm63xx_udc_driver = {
2452  .probe = bcm63xx_udc_probe,
2453  .remove = __devexit_p(bcm63xx_udc_remove),
2454  .driver = {
2455  .name = DRV_MODULE_NAME,
2456  .owner = THIS_MODULE,
2457  },
2458 };
2459 module_platform_driver(bcm63xx_udc_driver);
2460 
2461 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2462 MODULE_AUTHOR("Kevin Cernekee <[email protected]>");
2463 MODULE_LICENSE("GPL");
2464 MODULE_ALIAS("platform:" DRV_MODULE_NAME);