Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lpc32xx_udc.c
Go to the documentation of this file.
1 /*
2  * USB Gadget driver for LPC32xx
3  *
4  * Authors:
5  * Kevin Wells <[email protected]>
6  * Mike James
7  * Roland Stigge <[email protected]>
8  *
9  * Copyright (C) 2006 Philips Semiconductors
10  * Copyright (C) 2009 NXP Semiconductors
11  * Copyright (C) 2012 Roland Stigge
12  *
13  * Note: This driver is based on original work done by Mike James for
14  * the LPC3180.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software
28  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29  */
30 
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/platform_device.h>
34 #include <linux/delay.h>
35 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/interrupt.h>
41 #include <linux/proc_fs.h>
42 #include <linux/clk.h>
43 #include <linux/usb/ch9.h>
44 #include <linux/usb/gadget.h>
45 #include <linux/i2c.h>
46 #include <linux/kthread.h>
47 #include <linux/freezer.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/dmapool.h>
50 #include <linux/workqueue.h>
51 #include <linux/of.h>
52 #include <linux/usb/isp1301.h>
53 
54 #include <asm/byteorder.h>
55 #include <mach/hardware.h>
56 #include <linux/io.h>
57 #include <asm/irq.h>
58 #include <asm/system.h>
59 
60 #include <mach/platform.h>
61 #include <mach/irqs.h>
62 #include <mach/board.h>
63 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
64 #include <linux/debugfs.h>
65 #include <linux/seq_file.h>
66 #endif
67 
68 /*
69  * USB device configuration structure
70  */
71 typedef void (*usc_chg_event)(int);
73  int vbus_drv_pol; /* 0=active low drive for VBUS via ISP1301 */
74  usc_chg_event conn_chgb; /* Connection change event (optional) */
75  usc_chg_event susp_chgb; /* Suspend/resume event (optional) */
76  usc_chg_event rmwk_chgb; /* Enable/disable remote wakeup */
77 };
78 
79 /*
80  * controller driver data structures
81  */
82 
83 /* 16 endpoints (not to be confused with 32 hardware endpoints) */
84 #define NUM_ENDPOINTS 16
85 
86 /*
87  * IRQ indices make reading the code a little easier
88  */
89 #define IRQ_USB_LP 0
90 #define IRQ_USB_HP 1
91 #define IRQ_USB_DEVDMA 2
92 #define IRQ_USB_ATX 3
93 
94 #define EP_OUT 0 /* RX (from host) */
95 #define EP_IN 1 /* TX (to host) */
96 
97 /* Returns the interrupt mask for the selected hardware endpoint */
98 #define EP_MASK_SEL(ep, dir) (1 << (((ep) * 2) + dir))
99 
100 #define EP_INT_TYPE 0
101 #define EP_ISO_TYPE 1
102 #define EP_BLK_TYPE 2
103 #define EP_CTL_TYPE 3
104 
105 /* EP0 states */
106 #define WAIT_FOR_SETUP 0 /* Wait for setup packet */
107 #define DATA_IN 1 /* Expect dev->host transfer */
108 #define DATA_OUT 2 /* Expect host->dev transfer */
109 
110 /* DD (DMA Descriptor) structure, requires word alignment, this is already
111  * defined in the LPC32XX USB device header file, but this version is slightly
112  * modified to tag some work data with each DMA descriptor. */
120  u32 iso_status[6]; /* 5 spare */
122 };
123 
124 /*
125  * Logical endpoint structure
126  */
127 struct lpc32xx_ep {
128  struct usb_ep ep;
129  struct list_head queue;
130  struct lpc32xx_udc *udc;
131 
132  u32 hwep_num_base; /* Physical hardware EP */
133  u32 hwep_num; /* Maps to hardware endpoint */
136 
137  bool is_in;
140 
142 
143  bool wedge;
144 };
145 
146 /*
147  * Common UDC structure
148  */
149 struct lpc32xx_udc {
153  struct device *dev;
154  struct dentry *pde;
157 
158  /* Board and device specific */
163  int udp_irq[4];
164  struct clk *usb_pll_clk;
165  struct clk *usb_slv_clk;
166  struct clk *usb_otg_clk;
167 
168  /* DMA support */
172 
173  /* Common EP and control data */
178 
179  /* VBUS detection, pullup, and power flags */
182  int pullup;
183  int poweron;
184 
185  /* Work queues related to I2C support */
189 
190  /* USB device peripheral - various */
192  bool enabled;
193  bool clocked;
194  bool suspended;
196  int ep0state;
199 };
200 
201 /*
202  * Endpoint request
203  */
205  struct usb_request req;
206  struct list_head queue;
208  bool mapped;
209  bool send_zlp;
210 };
211 
212 static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
213 {
214  return container_of(g, struct lpc32xx_udc, gadget);
215 }
216 
217 #define ep_dbg(epp, fmt, arg...) \
218  dev_dbg(epp->udc->dev, "%s: " fmt, __func__, ## arg)
219 #define ep_err(epp, fmt, arg...) \
220  dev_err(epp->udc->dev, "%s: " fmt, __func__, ## arg)
221 #define ep_info(epp, fmt, arg...) \
222  dev_info(epp->udc->dev, "%s: " fmt, __func__, ## arg)
223 #define ep_warn(epp, fmt, arg...) \
224  dev_warn(epp->udc->dev, "%s:" fmt, __func__, ## arg)
225 
226 #define UDCA_BUFF_SIZE (128)
227 
228 /* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
229  * be replaced with an inremap()ed pointer
230  * */
231 #define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
232 
233 /* USB_CTRL bit defines */
234 #define USB_SLAVE_HCLK_EN (1 << 24)
235 #define USB_HOST_NEED_CLK_EN (1 << 21)
236 #define USB_DEV_NEED_CLK_EN (1 << 22)
237 
238 /**********************************************************************
239  * USB device controller register offsets
240  **********************************************************************/
241 
242 #define USBD_DEVINTST(x) ((x) + 0x200)
243 #define USBD_DEVINTEN(x) ((x) + 0x204)
244 #define USBD_DEVINTCLR(x) ((x) + 0x208)
245 #define USBD_DEVINTSET(x) ((x) + 0x20C)
246 #define USBD_CMDCODE(x) ((x) + 0x210)
247 #define USBD_CMDDATA(x) ((x) + 0x214)
248 #define USBD_RXDATA(x) ((x) + 0x218)
249 #define USBD_TXDATA(x) ((x) + 0x21C)
250 #define USBD_RXPLEN(x) ((x) + 0x220)
251 #define USBD_TXPLEN(x) ((x) + 0x224)
252 #define USBD_CTRL(x) ((x) + 0x228)
253 #define USBD_DEVINTPRI(x) ((x) + 0x22C)
254 #define USBD_EPINTST(x) ((x) + 0x230)
255 #define USBD_EPINTEN(x) ((x) + 0x234)
256 #define USBD_EPINTCLR(x) ((x) + 0x238)
257 #define USBD_EPINTSET(x) ((x) + 0x23C)
258 #define USBD_EPINTPRI(x) ((x) + 0x240)
259 #define USBD_REEP(x) ((x) + 0x244)
260 #define USBD_EPIND(x) ((x) + 0x248)
261 #define USBD_EPMAXPSIZE(x) ((x) + 0x24C)
262 /* DMA support registers only below */
263 /* Set, clear, or get enabled state of the DMA request status. If
264  * enabled, an IN or OUT token will start a DMA transfer for the EP */
265 #define USBD_DMARST(x) ((x) + 0x250)
266 #define USBD_DMARCLR(x) ((x) + 0x254)
267 #define USBD_DMARSET(x) ((x) + 0x258)
268 /* DMA UDCA head pointer */
269 #define USBD_UDCAH(x) ((x) + 0x280)
270 /* EP DMA status, enable, and disable. This is used to specifically
271  * enabled or disable DMA for a specific EP */
272 #define USBD_EPDMAST(x) ((x) + 0x284)
273 #define USBD_EPDMAEN(x) ((x) + 0x288)
274 #define USBD_EPDMADIS(x) ((x) + 0x28C)
275 /* DMA master interrupts enable and pending interrupts */
276 #define USBD_DMAINTST(x) ((x) + 0x290)
277 #define USBD_DMAINTEN(x) ((x) + 0x294)
278 /* DMA end of transfer interrupt enable, disable, status */
279 #define USBD_EOTINTST(x) ((x) + 0x2A0)
280 #define USBD_EOTINTCLR(x) ((x) + 0x2A4)
281 #define USBD_EOTINTSET(x) ((x) + 0x2A8)
282 /* New DD request interrupt enable, disable, status */
283 #define USBD_NDDRTINTST(x) ((x) + 0x2AC)
284 #define USBD_NDDRTINTCLR(x) ((x) + 0x2B0)
285 #define USBD_NDDRTINTSET(x) ((x) + 0x2B4)
286 /* DMA error interrupt enable, disable, status */
287 #define USBD_SYSERRTINTST(x) ((x) + 0x2B8)
288 #define USBD_SYSERRTINTCLR(x) ((x) + 0x2BC)
289 #define USBD_SYSERRTINTSET(x) ((x) + 0x2C0)
290 
291 /**********************************************************************
292  * USBD_DEVINTST/USBD_DEVINTEN/USBD_DEVINTCLR/USBD_DEVINTSET/
293  * USBD_DEVINTPRI register definitions
294  **********************************************************************/
295 #define USBD_ERR_INT (1 << 9)
296 #define USBD_EP_RLZED (1 << 8)
297 #define USBD_TXENDPKT (1 << 7)
298 #define USBD_RXENDPKT (1 << 6)
299 #define USBD_CDFULL (1 << 5)
300 #define USBD_CCEMPTY (1 << 4)
301 #define USBD_DEV_STAT (1 << 3)
302 #define USBD_EP_SLOW (1 << 2)
303 #define USBD_EP_FAST (1 << 1)
304 #define USBD_FRAME (1 << 0)
305 
306 /**********************************************************************
307  * USBD_EPINTST/USBD_EPINTEN/USBD_EPINTCLR/USBD_EPINTSET/
308  * USBD_EPINTPRI register definitions
309  **********************************************************************/
310 /* End point selection macro (RX) */
311 #define USBD_RX_EP_SEL(e) (1 << ((e) << 1))
312 
313 /* End point selection macro (TX) */
314 #define USBD_TX_EP_SEL(e) (1 << (((e) << 1) + 1))
315 
316 /**********************************************************************
317  * USBD_REEP/USBD_DMARST/USBD_DMARCLR/USBD_DMARSET/USBD_EPDMAST/
318  * USBD_EPDMAEN/USBD_EPDMADIS/
319  * USBD_NDDRTINTST/USBD_NDDRTINTCLR/USBD_NDDRTINTSET/
320  * USBD_EOTINTST/USBD_EOTINTCLR/USBD_EOTINTSET/
321  * USBD_SYSERRTINTST/USBD_SYSERRTINTCLR/USBD_SYSERRTINTSET
322  * register definitions
323  **********************************************************************/
324 /* Endpoint selection macro */
325 #define USBD_EP_SEL(e) (1 << (e))
326 
327 /**********************************************************************
328  * SBD_DMAINTST/USBD_DMAINTEN
329  **********************************************************************/
330 #define USBD_SYS_ERR_INT (1 << 2)
331 #define USBD_NEW_DD_INT (1 << 1)
332 #define USBD_EOT_INT (1 << 0)
333 
334 /**********************************************************************
335  * USBD_RXPLEN register definitions
336  **********************************************************************/
337 #define USBD_PKT_RDY (1 << 11)
338 #define USBD_DV (1 << 10)
339 #define USBD_PK_LEN_MASK 0x3FF
340 
341 /**********************************************************************
342  * USBD_CTRL register definitions
343  **********************************************************************/
344 #define USBD_LOG_ENDPOINT(e) ((e) << 2)
345 #define USBD_WR_EN (1 << 1)
346 #define USBD_RD_EN (1 << 0)
347 
348 /**********************************************************************
349  * USBD_CMDCODE register definitions
350  **********************************************************************/
351 #define USBD_CMD_CODE(c) ((c) << 16)
352 #define USBD_CMD_PHASE(p) ((p) << 8)
353 
354 /**********************************************************************
355  * USBD_DMARST/USBD_DMARCLR/USBD_DMARSET register definitions
356  **********************************************************************/
357 #define USBD_DMAEP(e) (1 << (e))
358 
359 /* DD (DMA Descriptor) structure, requires word alignment */
366 };
367 
368 /* dd_setup bit defines */
369 #define DD_SETUP_ATLE_DMA_MODE 0x01
370 #define DD_SETUP_NEXT_DD_VALID 0x04
371 #define DD_SETUP_ISO_EP 0x10
372 #define DD_SETUP_PACKETLEN(n) (((n) & 0x7FF) << 5)
373 #define DD_SETUP_DMALENBYTES(n) (((n) & 0xFFFF) << 16)
374 
375 /* dd_status bit defines */
376 #define DD_STATUS_DD_RETIRED 0x01
377 #define DD_STATUS_STS_MASK 0x1E
378 #define DD_STATUS_STS_NS 0x00 /* Not serviced */
379 #define DD_STATUS_STS_BS 0x02 /* Being serviced */
380 #define DD_STATUS_STS_NC 0x04 /* Normal completion */
381 #define DD_STATUS_STS_DUR 0x06 /* Data underrun (short packet) */
382 #define DD_STATUS_STS_DOR 0x08 /* Data overrun */
383 #define DD_STATUS_STS_SE 0x12 /* System error */
384 #define DD_STATUS_PKT_VAL 0x20 /* Packet valid */
385 #define DD_STATUS_LSB_EX 0x40 /* LS byte extracted (ATLE) */
386 #define DD_STATUS_MSB_EX 0x80 /* MS byte extracted (ATLE) */
387 #define DD_STATUS_MLEN(n) (((n) >> 8) & 0x3F)
388 #define DD_STATUS_CURDMACNT(n) (((n) >> 16) & 0xFFFF)
389 
390 /*
391  *
392  * Protocol engine bits below
393  *
394  */
395 /* Device Interrupt Bit Definitions */
396 #define FRAME_INT 0x00000001
397 #define EP_FAST_INT 0x00000002
398 #define EP_SLOW_INT 0x00000004
399 #define DEV_STAT_INT 0x00000008
400 #define CCEMTY_INT 0x00000010
401 #define CDFULL_INT 0x00000020
402 #define RxENDPKT_INT 0x00000040
403 #define TxENDPKT_INT 0x00000080
404 #define EP_RLZED_INT 0x00000100
405 #define ERR_INT 0x00000200
406 
407 /* Rx & Tx Packet Length Definitions */
408 #define PKT_LNGTH_MASK 0x000003FF
409 #define PKT_DV 0x00000400
410 #define PKT_RDY 0x00000800
411 
412 /* USB Control Definitions */
413 #define CTRL_RD_EN 0x00000001
414 #define CTRL_WR_EN 0x00000002
415 
416 /* Command Codes */
417 #define CMD_SET_ADDR 0x00D00500
418 #define CMD_CFG_DEV 0x00D80500
419 #define CMD_SET_MODE 0x00F30500
420 #define CMD_RD_FRAME 0x00F50500
421 #define DAT_RD_FRAME 0x00F50200
422 #define CMD_RD_TEST 0x00FD0500
423 #define DAT_RD_TEST 0x00FD0200
424 #define CMD_SET_DEV_STAT 0x00FE0500
425 #define CMD_GET_DEV_STAT 0x00FE0500
426 #define DAT_GET_DEV_STAT 0x00FE0200
427 #define CMD_GET_ERR_CODE 0x00FF0500
428 #define DAT_GET_ERR_CODE 0x00FF0200
429 #define CMD_RD_ERR_STAT 0x00FB0500
430 #define DAT_RD_ERR_STAT 0x00FB0200
431 #define DAT_WR_BYTE(x) (0x00000100 | ((x) << 16))
432 #define CMD_SEL_EP(x) (0x00000500 | ((x) << 16))
433 #define DAT_SEL_EP(x) (0x00000200 | ((x) << 16))
434 #define CMD_SEL_EP_CLRI(x) (0x00400500 | ((x) << 16))
435 #define DAT_SEL_EP_CLRI(x) (0x00400200 | ((x) << 16))
436 #define CMD_SET_EP_STAT(x) (0x00400500 | ((x) << 16))
437 #define CMD_CLR_BUF 0x00F20500
438 #define DAT_CLR_BUF 0x00F20200
439 #define CMD_VALID_BUF 0x00FA0500
440 
441 /* Device Address Register Definitions */
442 #define DEV_ADDR_MASK 0x7F
443 #define DEV_EN 0x80
444 
445 /* Device Configure Register Definitions */
446 #define CONF_DVICE 0x01
447 
448 /* Device Mode Register Definitions */
449 #define AP_CLK 0x01
450 #define INAK_CI 0x02
451 #define INAK_CO 0x04
452 #define INAK_II 0x08
453 #define INAK_IO 0x10
454 #define INAK_BI 0x20
455 #define INAK_BO 0x40
456 
457 /* Device Status Register Definitions */
458 #define DEV_CON 0x01
459 #define DEV_CON_CH 0x02
460 #define DEV_SUS 0x04
461 #define DEV_SUS_CH 0x08
462 #define DEV_RST 0x10
463 
464 /* Error Code Register Definitions */
465 #define ERR_EC_MASK 0x0F
466 #define ERR_EA 0x10
467 
468 /* Error Status Register Definitions */
469 #define ERR_PID 0x01
470 #define ERR_UEPKT 0x02
471 #define ERR_DCRC 0x04
472 #define ERR_TIMOUT 0x08
473 #define ERR_EOP 0x10
474 #define ERR_B_OVRN 0x20
475 #define ERR_BTSTF 0x40
476 #define ERR_TGL 0x80
477 
478 /* Endpoint Select Register Definitions */
479 #define EP_SEL_F 0x01
480 #define EP_SEL_ST 0x02
481 #define EP_SEL_STP 0x04
482 #define EP_SEL_PO 0x08
483 #define EP_SEL_EPN 0x10
484 #define EP_SEL_B_1_FULL 0x20
485 #define EP_SEL_B_2_FULL 0x40
486 
487 /* Endpoint Status Register Definitions */
488 #define EP_STAT_ST 0x01
489 #define EP_STAT_DA 0x20
490 #define EP_STAT_RF_MO 0x40
491 #define EP_STAT_CND_ST 0x80
492 
493 /* Clear Buffer Register Definitions */
494 #define CLR_BUF_PO 0x01
495 
496 /* DMA Interrupt Bit Definitions */
497 #define EOT_INT 0x01
498 #define NDD_REQ_INT 0x02
499 #define SYS_ERR_INT 0x04
500 
501 #define DRIVER_VERSION "1.03"
502 static const char driver_name[] = "lpc32xx_udc";
503 
504 /*
505  *
506  * proc interface support
507  *
508  */
509 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
510 static char *epnames[] = {"INT", "ISO", "BULK", "CTRL"};
511 static const char debug_filename[] = "driver/udc";
512 
513 static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep)
514 {
515  struct lpc32xx_request *req;
516 
517  seq_printf(s, "\n");
518  seq_printf(s, "%12s, maxpacket %4d %3s",
519  ep->ep.name, ep->ep.maxpacket,
520  ep->is_in ? "in" : "out");
521  seq_printf(s, " type %4s", epnames[ep->eptype]);
522  seq_printf(s, " ints: %12d", ep->totalints);
523 
524  if (list_empty(&ep->queue))
525  seq_printf(s, "\t(queue empty)\n");
526  else {
527  list_for_each_entry(req, &ep->queue, queue) {
528  u32 length = req->req.actual;
529 
530  seq_printf(s, "\treq %p len %d/%d buf %p\n",
531  &req->req, length,
532  req->req.length, req->req.buf);
533  }
534  }
535 }
536 
537 static int proc_udc_show(struct seq_file *s, void *unused)
538 {
539  struct lpc32xx_udc *udc = s->private;
540  struct lpc32xx_ep *ep;
541  unsigned long flags;
542 
543  seq_printf(s, "%s: version %s\n", driver_name, DRIVER_VERSION);
544 
545  spin_lock_irqsave(&udc->lock, flags);
546 
547  seq_printf(s, "vbus %s, pullup %s, %s powered%s, gadget %s\n\n",
548  udc->vbus ? "present" : "off",
549  udc->enabled ? (udc->vbus ? "active" : "enabled") :
550  "disabled",
551  udc->selfpowered ? "self" : "VBUS",
552  udc->suspended ? ", suspended" : "",
553  udc->driver ? udc->driver->driver.name : "(none)");
554 
555  if (udc->enabled && udc->vbus) {
556  proc_ep_show(s, &udc->ep[0]);
557  list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list)
558  proc_ep_show(s, ep);
559  }
560 
561  spin_unlock_irqrestore(&udc->lock, flags);
562 
563  return 0;
564 }
565 
566 static int proc_udc_open(struct inode *inode, struct file *file)
567 {
568  return single_open(file, proc_udc_show, PDE(inode)->data);
569 }
570 
571 static const struct file_operations proc_ops = {
572  .owner = THIS_MODULE,
573  .open = proc_udc_open,
574  .read = seq_read,
575  .llseek = seq_lseek,
576  .release = single_release,
577 };
578 
579 static void create_debug_file(struct lpc32xx_udc *udc)
580 {
581  udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops);
582 }
583 
584 static void remove_debug_file(struct lpc32xx_udc *udc)
585 {
586  if (udc->pde)
587  debugfs_remove(udc->pde);
588 }
589 
590 #else
591 static inline void create_debug_file(struct lpc32xx_udc *udc) {}
592 static inline void remove_debug_file(struct lpc32xx_udc *udc) {}
593 #endif
594 
595 /* Primary initialization sequence for the ISP1301 transceiver */
596 static void isp1301_udc_configure(struct lpc32xx_udc *udc)
597 {
598  /* LPC32XX only supports DAT_SE0 USB mode */
599  /* This sequence is important */
600 
601  /* Disable transparent UART mode first */
604  MC1_UART_EN);
605 
606  /* Set full speed and SE0 mode */
611 
612  /*
613  * The PSW_OE enable bit state is reversed in the ISP1301 User's Guide
614  */
619 
620  /* Driver VBUS_DRV high or low depending on board setup */
621  if (udc->board->vbus_drv_pol != 0)
624  else
627  OTG1_VBUS_DRV);
628 
629  /* Bi-directional mode with suspend control
630  * Enable both pulldowns for now - the pullup will be enable when VBUS
631  * is detected */
637 
638  /* Discharge VBUS (just in case) */
641  msleep(1);
645 
646  /* Clear and enable VBUS high edge interrupt */
657 
658  /* Enable usb_need_clk clock after transceiver is initialized */
660 
661  dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
663  dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
665  dev_info(udc->dev, "ISP1301 Version ID : 0x%04x\n",
667 }
668 
669 /* Enables or disables the USB device pullup via the ISP1301 transceiver */
670 static void isp1301_pullup_set(struct lpc32xx_udc *udc)
671 {
672  if (udc->pullup)
673  /* Enable pullup for bus signalling */
676  else
677  /* Enable pullup for bus signalling */
681 }
682 
683 static void pullup_work(struct work_struct *work)
684 {
685  struct lpc32xx_udc *udc =
686  container_of(work, struct lpc32xx_udc, pullup_job);
687 
688  isp1301_pullup_set(udc);
689 }
690 
691 static void isp1301_pullup_enable(struct lpc32xx_udc *udc, int en_pullup,
692  int block)
693 {
694  if (en_pullup == udc->pullup)
695  return;
696 
697  udc->pullup = en_pullup;
698  if (block)
699  isp1301_pullup_set(udc);
700  else
701  /* defer slow i2c pull up setting */
702  schedule_work(&udc->pullup_job);
703 }
704 
705 #ifdef CONFIG_PM
706 /* Powers up or down the ISP1301 transceiver */
707 static void isp1301_set_powerstate(struct lpc32xx_udc *udc, int enable)
708 {
709  if (enable != 0)
710  /* Power up ISP1301 - this ISP1301 will automatically wakeup
711  when VBUS is detected */
715  else
716  /* Power down ISP1301 */
719 }
720 
721 static void power_work(struct work_struct *work)
722 {
723  struct lpc32xx_udc *udc =
724  container_of(work, struct lpc32xx_udc, power_job);
725 
726  isp1301_set_powerstate(udc, udc->poweron);
727 }
728 #endif
729 
730 /*
731  *
732  * USB protocol engine command/data read/write helper functions
733  *
734  */
735 /* Issues a single command to the USB device state machine */
736 static void udc_protocol_cmd_w(struct lpc32xx_udc *udc, u32 cmd)
737 {
738  u32 pass = 0;
739  int to;
740 
741  /* EP may lock on CLRI if this read isn't done */
743  (void) tmp;
744 
745  while (pass == 0) {
747 
748  /* Write command code */
749  writel(cmd, USBD_CMDCODE(udc->udp_baseaddr));
750  to = 10000;
751  while (((readl(USBD_DEVINTST(udc->udp_baseaddr)) &
752  USBD_CCEMPTY) == 0) && (to > 0)) {
753  to--;
754  }
755 
756  if (to > 0)
757  pass = 1;
758 
759  cpu_relax();
760  }
761 }
762 
763 /* Issues 2 commands (or command and data) to the USB device state machine */
764 static inline void udc_protocol_cmd_data_w(struct lpc32xx_udc *udc, u32 cmd,
765  u32 data)
766 {
767  udc_protocol_cmd_w(udc, cmd);
768  udc_protocol_cmd_w(udc, data);
769 }
770 
771 /* Issues a single command to the USB device state machine and reads
772  * response data */
773 static u32 udc_protocol_cmd_r(struct lpc32xx_udc *udc, u32 cmd)
774 {
775  u32 tmp;
776  int to = 1000;
777 
778  /* Write a command and read data from the protocol engine */
781 
782  /* Write command code */
783  udc_protocol_cmd_w(udc, cmd);
784 
785  tmp = readl(USBD_DEVINTST(udc->udp_baseaddr));
786  while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) & USBD_CDFULL))
787  && (to > 0))
788  to--;
789  if (!to)
790  dev_dbg(udc->dev,
791  "Protocol engine didn't receive response (CDFULL)\n");
792 
793  return readl(USBD_CMDDATA(udc->udp_baseaddr));
794 }
795 
796 /*
797  *
798  * USB device interrupt mask support functions
799  *
800  */
801 /* Enable one or more USB device interrupts */
802 static inline void uda_enable_devint(struct lpc32xx_udc *udc, u32 devmask)
803 {
804  udc->enabled_devints |= devmask;
806 }
807 
808 /* Disable one or more USB device interrupts */
809 static inline void uda_disable_devint(struct lpc32xx_udc *udc, u32 mask)
810 {
811  udc->enabled_devints &= ~mask;
813 }
814 
815 /* Clear one or more USB device interrupts */
816 static inline void uda_clear_devint(struct lpc32xx_udc *udc, u32 mask)
817 {
818  writel(mask, USBD_DEVINTCLR(udc->udp_baseaddr));
819 }
820 
821 /*
822  *
823  * Endpoint interrupt disable/enable functions
824  *
825  */
826 /* Enable one or more USB endpoint interrupts */
827 static void uda_enable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
828 {
829  udc->enabled_hwepints |= (1 << hwep);
831 }
832 
833 /* Disable one or more USB endpoint interrupts */
834 static void uda_disable_hwepint(struct lpc32xx_udc *udc, u32 hwep)
835 {
836  udc->enabled_hwepints &= ~(1 << hwep);
838 }
839 
840 /* Clear one or more USB endpoint interrupts */
841 static inline void uda_clear_hwepint(struct lpc32xx_udc *udc, u32 hwep)
842 {
843  writel((1 << hwep), USBD_EPINTCLR(udc->udp_baseaddr));
844 }
845 
846 /* Enable DMA for the HW channel */
847 static inline void udc_ep_dma_enable(struct lpc32xx_udc *udc, u32 hwep)
848 {
849  writel((1 << hwep), USBD_EPDMAEN(udc->udp_baseaddr));
850 }
851 
852 /* Disable DMA for the HW channel */
853 static inline void udc_ep_dma_disable(struct lpc32xx_udc *udc, u32 hwep)
854 {
855  writel((1 << hwep), USBD_EPDMADIS(udc->udp_baseaddr));
856 }
857 
858 /*
859  *
860  * Endpoint realize/unrealize functions
861  *
862  */
863 /* Before an endpoint can be used, it needs to be realized
864  * in the USB protocol engine - this realizes the endpoint.
865  * The interrupt (FIFO or DMA) is not enabled with this function */
866 static void udc_realize_hwep(struct lpc32xx_udc *udc, u32 hwep,
867  u32 maxpacket)
868 {
869  int to = 1000;
870 
872  writel(hwep, USBD_EPIND(udc->udp_baseaddr));
873  udc->realized_eps |= (1 << hwep);
875  writel(maxpacket, USBD_EPMAXPSIZE(udc->udp_baseaddr));
876 
877  /* Wait until endpoint is realized in hardware */
878  while ((!(readl(USBD_DEVINTST(udc->udp_baseaddr)) &
879  USBD_EP_RLZED)) && (to > 0))
880  to--;
881  if (!to)
882  dev_dbg(udc->dev, "EP not correctly realized in hardware\n");
883 
885 }
886 
887 /* Unrealize an EP */
888 static void udc_unrealize_hwep(struct lpc32xx_udc *udc, u32 hwep)
889 {
890  udc->realized_eps &= ~(1 << hwep);
892 }
893 
894 /*
895  *
896  * Endpoint support functions
897  *
898  */
899 /* Select and clear endpoint interrupt */
900 static u32 udc_selep_clrint(struct lpc32xx_udc *udc, u32 hwep)
901 {
902  udc_protocol_cmd_w(udc, CMD_SEL_EP_CLRI(hwep));
903  return udc_protocol_cmd_r(udc, DAT_SEL_EP_CLRI(hwep));
904 }
905 
906 /* Disables the endpoint in the USB protocol engine */
907 static void udc_disable_hwep(struct lpc32xx_udc *udc, u32 hwep)
908 {
909  udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
911 }
912 
913 /* Stalls the endpoint - endpoint will return STALL */
914 static void udc_stall_hwep(struct lpc32xx_udc *udc, u32 hwep)
915 {
916  udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
918 }
919 
920 /* Clear stall or reset endpoint */
921 static void udc_clrstall_hwep(struct lpc32xx_udc *udc, u32 hwep)
922 {
923  udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(hwep),
924  DAT_WR_BYTE(0));
925 }
926 
927 /* Select an endpoint for endpoint status, clear, validate */
928 static void udc_select_hwep(struct lpc32xx_udc *udc, u32 hwep)
929 {
930  udc_protocol_cmd_w(udc, CMD_SEL_EP(hwep));
931 }
932 
933 /*
934  *
935  * Endpoint buffer management functions
936  *
937  */
938 /* Clear the current endpoint's buffer */
939 static void udc_clr_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
940 {
941  udc_select_hwep(udc, hwep);
942  udc_protocol_cmd_w(udc, CMD_CLR_BUF);
943 }
944 
945 /* Validate the current endpoint's buffer */
946 static void udc_val_buffer_hwep(struct lpc32xx_udc *udc, u32 hwep)
947 {
948  udc_select_hwep(udc, hwep);
949  udc_protocol_cmd_w(udc, CMD_VALID_BUF);
950 }
951 
952 static inline u32 udc_clearep_getsts(struct lpc32xx_udc *udc, u32 hwep)
953 {
954  /* Clear EP interrupt */
955  uda_clear_hwepint(udc, hwep);
956  return udc_selep_clrint(udc, hwep);
957 }
958 
959 /*
960  *
961  * USB EP DMA support
962  *
963  */
964 /* Allocate a DMA Descriptor */
965 static struct lpc32xx_usbd_dd_gad *udc_dd_alloc(struct lpc32xx_udc *udc)
966 {
967  dma_addr_t dma;
968  struct lpc32xx_usbd_dd_gad *dd;
969 
970  dd = (struct lpc32xx_usbd_dd_gad *) dma_pool_alloc(
971  udc->dd_cache, (GFP_KERNEL | GFP_DMA), &dma);
972  if (dd)
973  dd->this_dma = dma;
974 
975  return dd;
976 }
977 
978 /* Free a DMA Descriptor */
979 static void udc_dd_free(struct lpc32xx_udc *udc, struct lpc32xx_usbd_dd_gad *dd)
980 {
981  dma_pool_free(udc->dd_cache, dd, dd->this_dma);
982 }
983 
984 /*
985  *
986  * USB setup and shutdown functions
987  *
988  */
989 /* Enables or disables most of the USB system clocks when low power mode is
990  * needed. Clocks are typically started on a connection event, and disabled
991  * when a cable is disconnected */
992 static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
993 {
994  if (enable != 0) {
995  if (udc->clocked)
996  return;
997 
998  udc->clocked = 1;
999 
1000  /* 48MHz PLL up */
1001  clk_enable(udc->usb_pll_clk);
1002 
1003  /* Enable the USB device clock */
1005  USB_CTRL);
1006 
1007  clk_enable(udc->usb_otg_clk);
1008  } else {
1009  if (!udc->clocked)
1010  return;
1011 
1012  udc->clocked = 0;
1013 
1014  /* Never disable the USB_HCLK during normal operation */
1015 
1016  /* 48MHz PLL dpwn */
1017  clk_disable(udc->usb_pll_clk);
1018 
1019  /* Disable the USB device clock */
1021  USB_CTRL);
1022 
1023  clk_disable(udc->usb_otg_clk);
1024  }
1025 }
1026 
1027 /* Set/reset USB device address */
1028 static void udc_set_address(struct lpc32xx_udc *udc, u32 addr)
1029 {
1030  /* Address will be latched at the end of the status phase, or
1031  latched immediately if function is called twice */
1032  udc_protocol_cmd_data_w(udc, CMD_SET_ADDR,
1033  DAT_WR_BYTE(DEV_EN | addr));
1034 }
1035 
1036 /* Setup up a IN request for DMA transfer - this consists of determining the
1037  * list of DMA addresses for the transfer, allocating DMA Descriptors,
1038  * installing the DD into the UDCA, and then enabling the DMA for that EP */
1039 static int udc_ep_in_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1040 {
1041  struct lpc32xx_request *req;
1042  u32 hwep = ep->hwep_num;
1043 
1044  ep->req_pending = 1;
1045 
1046  /* There will always be a request waiting here */
1047  req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1048 
1049  /* Place the DD Descriptor into the UDCA */
1050  udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1051 
1052  /* Enable DMA and interrupt for the HW EP */
1053  udc_ep_dma_enable(udc, hwep);
1054 
1055  /* Clear ZLP if last packet is not of MAXP size */
1056  if (req->req.length % ep->ep.maxpacket)
1057  req->send_zlp = 0;
1058 
1059  return 0;
1060 }
1061 
1062 /* Setup up a OUT request for DMA transfer - this consists of determining the
1063  * list of DMA addresses for the transfer, allocating DMA Descriptors,
1064  * installing the DD into the UDCA, and then enabling the DMA for that EP */
1065 static int udc_ep_out_req_dma(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
1066 {
1067  struct lpc32xx_request *req;
1068  u32 hwep = ep->hwep_num;
1069 
1070  ep->req_pending = 1;
1071 
1072  /* There will always be a request waiting here */
1073  req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1074 
1075  /* Place the DD Descriptor into the UDCA */
1076  udc->udca_v_base[hwep] = req->dd_desc_ptr->this_dma;
1077 
1078  /* Enable DMA and interrupt for the HW EP */
1079  udc_ep_dma_enable(udc, hwep);
1080  return 0;
1081 }
1082 
1083 static void udc_disable(struct lpc32xx_udc *udc)
1084 {
1085  u32 i;
1086 
1087  /* Disable device */
1088  udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1089  udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(0));
1090 
1091  /* Disable all device interrupts (including EP0) */
1092  uda_disable_devint(udc, 0x3FF);
1093 
1094  /* Disable and reset all endpoint interrupts */
1095  for (i = 0; i < 32; i++) {
1096  uda_disable_hwepint(udc, i);
1097  uda_clear_hwepint(udc, i);
1098  udc_disable_hwep(udc, i);
1099  udc_unrealize_hwep(udc, i);
1100  udc->udca_v_base[i] = 0;
1101 
1102  /* Disable and clear all interrupts and DMA */
1103  udc_ep_dma_disable(udc, i);
1104  writel((1 << i), USBD_EOTINTCLR(udc->udp_baseaddr));
1105  writel((1 << i), USBD_NDDRTINTCLR(udc->udp_baseaddr));
1106  writel((1 << i), USBD_SYSERRTINTCLR(udc->udp_baseaddr));
1107  writel((1 << i), USBD_DMARCLR(udc->udp_baseaddr));
1108  }
1109 
1110  /* Disable DMA interrupts */
1111  writel(0, USBD_DMAINTEN(udc->udp_baseaddr));
1112 
1113  writel(0, USBD_UDCAH(udc->udp_baseaddr));
1114 }
1115 
1116 static void udc_enable(struct lpc32xx_udc *udc)
1117 {
1118  u32 i;
1119  struct lpc32xx_ep *ep = &udc->ep[0];
1120 
1121  /* Start with known state */
1122  udc_disable(udc);
1123 
1124  /* Enable device */
1125  udc_protocol_cmd_data_w(udc, CMD_SET_DEV_STAT, DAT_WR_BYTE(DEV_CON));
1126 
1127  /* EP interrupts on high priority, FRAME interrupt on low priority */
1129  writel(0xFFFF, USBD_EPINTPRI(udc->udp_baseaddr));
1130 
1131  /* Clear any pending device interrupts */
1132  writel(0x3FF, USBD_DEVINTCLR(udc->udp_baseaddr));
1133 
1134  /* Setup UDCA - not yet used (DMA) */
1136 
1137  /* Only enable EP0 in and out for now, EP0 only works in FIFO mode */
1138  for (i = 0; i <= 1; i++) {
1139  udc_realize_hwep(udc, i, ep->ep.maxpacket);
1140  uda_enable_hwepint(udc, i);
1141  udc_select_hwep(udc, i);
1142  udc_clrstall_hwep(udc, i);
1143  udc_clr_buffer_hwep(udc, i);
1144  }
1145 
1146  /* Device interrupt setup */
1147  uda_clear_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1148  USBD_EP_FAST));
1149  uda_enable_devint(udc, (USBD_ERR_INT | USBD_DEV_STAT | USBD_EP_SLOW |
1150  USBD_EP_FAST));
1151 
1152  /* Set device address to 0 - called twice to force a latch in the USB
1153  engine without the need of a setup packet status closure */
1154  udc_set_address(udc, 0);
1155  udc_set_address(udc, 0);
1156 
1157  /* Enable master DMA interrupts */
1159  USBD_DMAINTEN(udc->udp_baseaddr));
1160 
1161  udc->dev_status = 0;
1162 }
1163 
1164 /*
1165  *
1166  * USB device board specific events handled via callbacks
1167  *
1168  */
1169 /* Connection change event - notify board function of change */
1170 static void uda_power_event(struct lpc32xx_udc *udc, u32 conn)
1171 {
1172  /* Just notify of a connection change event (optional) */
1173  if (udc->board->conn_chgb != NULL)
1174  udc->board->conn_chgb(conn);
1175 }
1176 
1177 /* Suspend/resume event - notify board function of change */
1178 static void uda_resm_susp_event(struct lpc32xx_udc *udc, u32 conn)
1179 {
1180  /* Just notify of a Suspend/resume change event (optional) */
1181  if (udc->board->susp_chgb != NULL)
1182  udc->board->susp_chgb(conn);
1183 
1184  if (conn)
1185  udc->suspended = 0;
1186  else
1187  udc->suspended = 1;
1188 }
1189 
1190 /* Remote wakeup enable/disable - notify board function of change */
1191 static void uda_remwkp_cgh(struct lpc32xx_udc *udc)
1192 {
1193  if (udc->board->rmwk_chgb != NULL)
1194  udc->board->rmwk_chgb(udc->dev_status &
1195  (1 << USB_DEVICE_REMOTE_WAKEUP));
1196 }
1197 
1198 /* Reads data from FIFO, adjusts for alignment and data size */
1199 static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1200 {
1201  int n, i, bl;
1202  u16 *p16;
1203  u32 *p32, tmp, cbytes;
1204 
1205  /* Use optimal data transfer method based on source address and size */
1206  switch (((u32) data) & 0x3) {
1207  case 0: /* 32-bit aligned */
1208  p32 = (u32 *) data;
1209  cbytes = (bytes & ~0x3);
1210 
1211  /* Copy 32-bit aligned data first */
1212  for (n = 0; n < cbytes; n += 4)
1213  *p32++ = readl(USBD_RXDATA(udc->udp_baseaddr));
1214 
1215  /* Handle any remaining bytes */
1216  bl = bytes - cbytes;
1217  if (bl) {
1218  tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1219  for (n = 0; n < bl; n++)
1220  data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1221 
1222  }
1223  break;
1224 
1225  case 1: /* 8-bit aligned */
1226  case 3:
1227  /* Each byte has to be handled independently */
1228  for (n = 0; n < bytes; n += 4) {
1229  tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1230 
1231  bl = bytes - n;
1232  if (bl > 3)
1233  bl = 3;
1234 
1235  for (i = 0; i < bl; i++)
1236  data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
1237  }
1238  break;
1239 
1240  case 2: /* 16-bit aligned */
1241  p16 = (u16 *) data;
1242  cbytes = (bytes & ~0x3);
1243 
1244  /* Copy 32-bit sized objects first with 16-bit alignment */
1245  for (n = 0; n < cbytes; n += 4) {
1246  tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1247  *p16++ = (u16)(tmp & 0xFFFF);
1248  *p16++ = (u16)((tmp >> 16) & 0xFFFF);
1249  }
1250 
1251  /* Handle any remaining bytes */
1252  bl = bytes - cbytes;
1253  if (bl) {
1254  tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1255  for (n = 0; n < bl; n++)
1256  data[cbytes + n] = ((tmp >> (n * 8)) & 0xFF);
1257  }
1258  break;
1259  }
1260 }
1261 
1262 /* Read data from the FIFO for an endpoint. This function is for endpoints (such
1263  * as EP0) that don't use DMA. This function should only be called if a packet
1264  * is known to be ready to read for the endpoint. Note that the endpoint must
1265  * be selected in the protocol engine prior to this call. */
1266 static u32 udc_read_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1267  u32 bytes)
1268 {
1269  u32 tmpv;
1270  int to = 1000;
1271  u32 tmp, hwrep = ((hwep & 0x1E) << 1) | CTRL_RD_EN;
1272 
1273  /* Setup read of endpoint */
1274  writel(hwrep, USBD_CTRL(udc->udp_baseaddr));
1275 
1276  /* Wait until packet is ready */
1277  while ((((tmpv = readl(USBD_RXPLEN(udc->udp_baseaddr))) &
1278  PKT_RDY) == 0) && (to > 0))
1279  to--;
1280  if (!to)
1281  dev_dbg(udc->dev, "No packet ready on FIFO EP read\n");
1282 
1283  /* Mask out count */
1284  tmp = tmpv & PKT_LNGTH_MASK;
1285  if (bytes < tmp)
1286  tmp = bytes;
1287 
1288  if ((tmp > 0) && (data != NULL))
1289  udc_pop_fifo(udc, (u8 *) data, tmp);
1290 
1291  writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1292 
1293  /* Clear the buffer */
1294  udc_clr_buffer_hwep(udc, hwep);
1295 
1296  return tmp;
1297 }
1298 
1299 /* Stuffs data into the FIFO, adjusts for alignment and data size */
1300 static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1301 {
1302  int n, i, bl;
1303  u16 *p16;
1304  u32 *p32, tmp, cbytes;
1305 
1306  /* Use optimal data transfer method based on source address and size */
1307  switch (((u32) data) & 0x3) {
1308  case 0: /* 32-bit aligned */
1309  p32 = (u32 *) data;
1310  cbytes = (bytes & ~0x3);
1311 
1312  /* Copy 32-bit aligned data first */
1313  for (n = 0; n < cbytes; n += 4)
1314  writel(*p32++, USBD_TXDATA(udc->udp_baseaddr));
1315 
1316  /* Handle any remaining bytes */
1317  bl = bytes - cbytes;
1318  if (bl) {
1319  tmp = 0;
1320  for (n = 0; n < bl; n++)
1321  tmp |= data[cbytes + n] << (n * 8);
1322 
1323  writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1324  }
1325  break;
1326 
1327  case 1: /* 8-bit aligned */
1328  case 3:
1329  /* Each byte has to be handled independently */
1330  for (n = 0; n < bytes; n += 4) {
1331  bl = bytes - n;
1332  if (bl > 4)
1333  bl = 4;
1334 
1335  tmp = 0;
1336  for (i = 0; i < bl; i++)
1337  tmp |= data[n + i] << (i * 8);
1338 
1339  writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1340  }
1341  break;
1342 
1343  case 2: /* 16-bit aligned */
1344  p16 = (u16 *) data;
1345  cbytes = (bytes & ~0x3);
1346 
1347  /* Copy 32-bit aligned data first */
1348  for (n = 0; n < cbytes; n += 4) {
1349  tmp = *p16++ & 0xFFFF;
1350  tmp |= (*p16++ & 0xFFFF) << 16;
1351  writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1352  }
1353 
1354  /* Handle any remaining bytes */
1355  bl = bytes - cbytes;
1356  if (bl) {
1357  tmp = 0;
1358  for (n = 0; n < bl; n++)
1359  tmp |= data[cbytes + n] << (n * 8);
1360 
1361  writel(tmp, USBD_TXDATA(udc->udp_baseaddr));
1362  }
1363  break;
1364  }
1365 }
1366 
1367 /* Write data to the FIFO for an endpoint. This function is for endpoints (such
1368  * as EP0) that don't use DMA. Note that the endpoint must be selected in the
1369  * protocol engine prior to this call. */
1370 static void udc_write_hwep(struct lpc32xx_udc *udc, u32 hwep, u32 *data,
1371  u32 bytes)
1372 {
1373  u32 hwwep = ((hwep & 0x1E) << 1) | CTRL_WR_EN;
1374 
1375  if ((bytes > 0) && (data == NULL))
1376  return;
1377 
1378  /* Setup write of endpoint */
1379  writel(hwwep, USBD_CTRL(udc->udp_baseaddr));
1380 
1381  writel(bytes, USBD_TXPLEN(udc->udp_baseaddr));
1382 
1383  /* Need at least 1 byte to trigger TX */
1384  if (bytes == 0)
1385  writel(0, USBD_TXDATA(udc->udp_baseaddr));
1386  else
1387  udc_stuff_fifo(udc, (u8 *) data, bytes);
1388 
1389  writel(((hwep & 0x1E) << 1), USBD_CTRL(udc->udp_baseaddr));
1390 
1391  udc_val_buffer_hwep(udc, hwep);
1392 }
1393 
1394 /* USB device reset - resets USB to a default state with just EP0
1395  enabled */
1396 static void uda_usb_reset(struct lpc32xx_udc *udc)
1397 {
1398  u32 i = 0;
1399  /* Re-init device controller and EP0 */
1400  udc_enable(udc);
1401  udc->gadget.speed = USB_SPEED_FULL;
1402 
1403  for (i = 1; i < NUM_ENDPOINTS; i++) {
1404  struct lpc32xx_ep *ep = &udc->ep[i];
1405  ep->req_pending = 0;
1406  }
1407 }
1408 
1409 /* Send a ZLP on EP0 */
1410 static void udc_ep0_send_zlp(struct lpc32xx_udc *udc)
1411 {
1412  udc_write_hwep(udc, EP_IN, NULL, 0);
1413 }
1414 
1415 /* Get current frame number */
1416 static u16 udc_get_current_frame(struct lpc32xx_udc *udc)
1417 {
1418  u16 flo, fhi;
1419 
1420  udc_protocol_cmd_w(udc, CMD_RD_FRAME);
1421  flo = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1422  fhi = (u16) udc_protocol_cmd_r(udc, DAT_RD_FRAME);
1423 
1424  return (fhi << 8) | flo;
1425 }
1426 
1427 /* Set the device as configured - enables all endpoints */
1428 static inline void udc_set_device_configured(struct lpc32xx_udc *udc)
1429 {
1430  udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(CONF_DVICE));
1431 }
1432 
1433 /* Set the device as unconfigured - disables all endpoints */
1434 static inline void udc_set_device_unconfigured(struct lpc32xx_udc *udc)
1435 {
1436  udc_protocol_cmd_data_w(udc, CMD_CFG_DEV, DAT_WR_BYTE(0));
1437 }
1438 
1439 /* reinit == restore initial software state */
1440 static void udc_reinit(struct lpc32xx_udc *udc)
1441 {
1442  u32 i;
1443 
1444  INIT_LIST_HEAD(&udc->gadget.ep_list);
1445  INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
1446 
1447  for (i = 0; i < NUM_ENDPOINTS; i++) {
1448  struct lpc32xx_ep *ep = &udc->ep[i];
1449 
1450  if (i != 0)
1451  list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1452  ep->ep.maxpacket = ep->maxpacket;
1453  INIT_LIST_HEAD(&ep->queue);
1454  ep->req_pending = 0;
1455  }
1456 
1457  udc->ep0state = WAIT_FOR_SETUP;
1458 }
1459 
1460 /* Must be called with lock */
1461 static void done(struct lpc32xx_ep *ep, struct lpc32xx_request *req, int status)
1462 {
1463  struct lpc32xx_udc *udc = ep->udc;
1464 
1465  list_del_init(&req->queue);
1466  if (req->req.status == -EINPROGRESS)
1467  req->req.status = status;
1468  else
1469  status = req->req.status;
1470 
1471  if (ep->lep) {
1473 
1474  if (ep->is_in)
1475  direction = DMA_TO_DEVICE;
1476  else
1477  direction = DMA_FROM_DEVICE;
1478 
1479  if (req->mapped) {
1480  dma_unmap_single(ep->udc->gadget.dev.parent,
1481  req->req.dma, req->req.length,
1482  direction);
1483  req->req.dma = 0;
1484  req->mapped = 0;
1485  } else
1486  dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
1487  req->req.dma, req->req.length,
1488  direction);
1489 
1490  /* Free DDs */
1491  udc_dd_free(udc, req->dd_desc_ptr);
1492  }
1493 
1494  if (status && status != -ESHUTDOWN)
1495  ep_dbg(ep, "%s done %p, status %d\n", ep->ep.name, req, status);
1496 
1497  ep->req_pending = 0;
1498  spin_unlock(&udc->lock);
1499  req->req.complete(&ep->ep, &req->req);
1500  spin_lock(&udc->lock);
1501 }
1502 
1503 /* Must be called with lock */
1504 static void nuke(struct lpc32xx_ep *ep, int status)
1505 {
1506  struct lpc32xx_request *req;
1507 
1508  while (!list_empty(&ep->queue)) {
1509  req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
1510  done(ep, req, status);
1511  }
1512 
1513  if (status == -ESHUTDOWN) {
1514  uda_disable_hwepint(ep->udc, ep->hwep_num);
1515  udc_disable_hwep(ep->udc, ep->hwep_num);
1516  }
1517 }
1518 
1519 /* IN endpoint 0 transfer */
1520 static int udc_ep0_in_req(struct lpc32xx_udc *udc)
1521 {
1522  struct lpc32xx_request *req;
1523  struct lpc32xx_ep *ep0 = &udc->ep[0];
1524  u32 tsend, ts = 0;
1525 
1526  if (list_empty(&ep0->queue))
1527  /* Nothing to send */
1528  return 0;
1529  else
1530  req = list_entry(ep0->queue.next, struct lpc32xx_request,
1531  queue);
1532 
1533  tsend = ts = req->req.length - req->req.actual;
1534  if (ts == 0) {
1535  /* Send a ZLP */
1536  udc_ep0_send_zlp(udc);
1537  done(ep0, req, 0);
1538  return 1;
1539  } else if (ts > ep0->ep.maxpacket)
1540  ts = ep0->ep.maxpacket; /* Just send what we can */
1541 
1542  /* Write data to the EP0 FIFO and start transfer */
1543  udc_write_hwep(udc, EP_IN, (req->req.buf + req->req.actual), ts);
1544 
1545  /* Increment data pointer */
1546  req->req.actual += ts;
1547 
1548  if (tsend >= ep0->ep.maxpacket)
1549  return 0; /* Stay in data transfer state */
1550 
1551  /* Transfer request is complete */
1552  udc->ep0state = WAIT_FOR_SETUP;
1553  done(ep0, req, 0);
1554  return 1;
1555 }
1556 
1557 /* OUT endpoint 0 transfer */
1558 static int udc_ep0_out_req(struct lpc32xx_udc *udc)
1559 {
1560  struct lpc32xx_request *req;
1561  struct lpc32xx_ep *ep0 = &udc->ep[0];
1562  u32 tr, bufferspace;
1563 
1564  if (list_empty(&ep0->queue))
1565  return 0;
1566  else
1567  req = list_entry(ep0->queue.next, struct lpc32xx_request,
1568  queue);
1569 
1570  if (req) {
1571  if (req->req.length == 0) {
1572  /* Just dequeue request */
1573  done(ep0, req, 0);
1574  udc->ep0state = WAIT_FOR_SETUP;
1575  return 1;
1576  }
1577 
1578  /* Get data from FIFO */
1579  bufferspace = req->req.length - req->req.actual;
1580  if (bufferspace > ep0->ep.maxpacket)
1581  bufferspace = ep0->ep.maxpacket;
1582 
1583  /* Copy data to buffer */
1584  prefetchw(req->req.buf + req->req.actual);
1585  tr = udc_read_hwep(udc, EP_OUT, req->req.buf + req->req.actual,
1586  bufferspace);
1587  req->req.actual += bufferspace;
1588 
1589  if (tr < ep0->ep.maxpacket) {
1590  /* This is the last packet */
1591  done(ep0, req, 0);
1592  udc->ep0state = WAIT_FOR_SETUP;
1593  return 1;
1594  }
1595  }
1596 
1597  return 0;
1598 }
1599 
1600 /* Must be called with lock */
1601 static void stop_activity(struct lpc32xx_udc *udc)
1602 {
1603  struct usb_gadget_driver *driver = udc->driver;
1604  int i;
1605 
1606  if (udc->gadget.speed == USB_SPEED_UNKNOWN)
1607  driver = NULL;
1608 
1609  udc->gadget.speed = USB_SPEED_UNKNOWN;
1610  udc->suspended = 0;
1611 
1612  for (i = 0; i < NUM_ENDPOINTS; i++) {
1613  struct lpc32xx_ep *ep = &udc->ep[i];
1614  nuke(ep, -ESHUTDOWN);
1615  }
1616  if (driver) {
1617  spin_unlock(&udc->lock);
1618  driver->disconnect(&udc->gadget);
1619  spin_lock(&udc->lock);
1620  }
1621 
1622  isp1301_pullup_enable(udc, 0, 0);
1623  udc_disable(udc);
1624  udc_reinit(udc);
1625 }
1626 
1627 /*
1628  * Activate or kill host pullup
1629  * Can be called with or without lock
1630  */
1631 static void pullup(struct lpc32xx_udc *udc, int is_on)
1632 {
1633  if (!udc->clocked)
1634  return;
1635 
1636  if (!udc->enabled || !udc->vbus)
1637  is_on = 0;
1638 
1639  if (is_on != udc->pullup)
1640  isp1301_pullup_enable(udc, is_on, 0);
1641 }
1642 
1643 /* Must be called without lock */
1644 static int lpc32xx_ep_disable(struct usb_ep *_ep)
1645 {
1646  struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1647  struct lpc32xx_udc *udc = ep->udc;
1648  unsigned long flags;
1649 
1650  if ((ep->hwep_num_base == 0) || (ep->hwep_num == 0))
1651  return -EINVAL;
1652  spin_lock_irqsave(&udc->lock, flags);
1653 
1654  nuke(ep, -ESHUTDOWN);
1655 
1656  /* Clear all DMA statuses for this EP */
1657  udc_ep_dma_disable(udc, ep->hwep_num);
1658  writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1659  writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1661  writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1662 
1663  /* Remove the DD pointer in the UDCA */
1664  udc->udca_v_base[ep->hwep_num] = 0;
1665 
1666  /* Disable and reset endpoint and interrupt */
1667  uda_clear_hwepint(udc, ep->hwep_num);
1668  udc_unrealize_hwep(udc, ep->hwep_num);
1669 
1670  ep->hwep_num = 0;
1671 
1672  spin_unlock_irqrestore(&udc->lock, flags);
1673 
1674  atomic_dec(&udc->enabled_ep_cnt);
1676 
1677  return 0;
1678 }
1679 
1680 /* Must be called without lock */
1681 static int lpc32xx_ep_enable(struct usb_ep *_ep,
1682  const struct usb_endpoint_descriptor *desc)
1683 {
1684  struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1685  struct lpc32xx_udc *udc = ep->udc;
1686  u16 maxpacket;
1687  u32 tmp;
1688  unsigned long flags;
1689 
1690  /* Verify EP data */
1691  if ((!_ep) || (!ep) || (!desc) ||
1692  (desc->bDescriptorType != USB_DT_ENDPOINT)) {
1693  dev_dbg(udc->dev, "bad ep or descriptor\n");
1694  return -EINVAL;
1695  }
1696  maxpacket = usb_endpoint_maxp(desc);
1697  if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
1698  dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
1699  return -EINVAL;
1700  }
1701 
1702  /* Don't touch EP0 */
1703  if (ep->hwep_num_base == 0) {
1704  dev_dbg(udc->dev, "Can't re-enable EP0!!!\n");
1705  return -EINVAL;
1706  }
1707 
1708  /* Is driver ready? */
1709  if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1710  dev_dbg(udc->dev, "bogus device state\n");
1711  return -ESHUTDOWN;
1712  }
1713 
1715  switch (tmp) {
1717  return -EINVAL;
1718 
1719  case USB_ENDPOINT_XFER_INT:
1720  if (maxpacket > ep->maxpacket) {
1721  dev_dbg(udc->dev,
1722  "Bad INT endpoint maxpacket %d\n", maxpacket);
1723  return -EINVAL;
1724  }
1725  break;
1726 
1728  switch (maxpacket) {
1729  case 8:
1730  case 16:
1731  case 32:
1732  case 64:
1733  break;
1734 
1735  default:
1736  dev_dbg(udc->dev,
1737  "Bad BULK endpoint maxpacket %d\n", maxpacket);
1738  return -EINVAL;
1739  }
1740  break;
1741 
1743  break;
1744  }
1745  spin_lock_irqsave(&udc->lock, flags);
1746 
1747  /* Initialize endpoint to match the selected descriptor */
1748  ep->is_in = (desc->bEndpointAddress & USB_DIR_IN) != 0;
1749  ep->ep.maxpacket = maxpacket;
1750 
1751  /* Map hardware endpoint from base and direction */
1752  if (ep->is_in)
1753  /* IN endpoints are offset 1 from the OUT endpoint */
1754  ep->hwep_num = ep->hwep_num_base + EP_IN;
1755  else
1756  ep->hwep_num = ep->hwep_num_base;
1757 
1758  ep_dbg(ep, "EP enabled: %s, HW:%d, MP:%d IN:%d\n", ep->ep.name,
1759  ep->hwep_num, maxpacket, (ep->is_in == 1));
1760 
1761  /* Realize the endpoint, interrupt is enabled later when
1762  * buffers are queued, IN EPs will NAK until buffers are ready */
1763  udc_realize_hwep(udc, ep->hwep_num, ep->ep.maxpacket);
1764  udc_clr_buffer_hwep(udc, ep->hwep_num);
1765  uda_disable_hwepint(udc, ep->hwep_num);
1766  udc_clrstall_hwep(udc, ep->hwep_num);
1767 
1768  /* Clear all DMA statuses for this EP */
1769  udc_ep_dma_disable(udc, ep->hwep_num);
1770  writel(1 << ep->hwep_num, USBD_EOTINTCLR(udc->udp_baseaddr));
1771  writel(1 << ep->hwep_num, USBD_NDDRTINTCLR(udc->udp_baseaddr));
1773  writel(1 << ep->hwep_num, USBD_DMARCLR(udc->udp_baseaddr));
1774 
1775  spin_unlock_irqrestore(&udc->lock, flags);
1776 
1777  atomic_inc(&udc->enabled_ep_cnt);
1778  return 0;
1779 }
1780 
1781 /*
1782  * Allocate a USB request list
1783  * Can be called with or without lock
1784  */
1785 static struct usb_request *lpc32xx_ep_alloc_request(struct usb_ep *_ep,
1786  gfp_t gfp_flags)
1787 {
1788  struct lpc32xx_request *req;
1789 
1790  req = kzalloc(sizeof(struct lpc32xx_request), gfp_flags);
1791  if (!req)
1792  return NULL;
1793 
1794  INIT_LIST_HEAD(&req->queue);
1795  return &req->req;
1796 }
1797 
1798 /*
1799  * De-allocate a USB request list
1800  * Can be called with or without lock
1801  */
1802 static void lpc32xx_ep_free_request(struct usb_ep *_ep,
1803  struct usb_request *_req)
1804 {
1805  struct lpc32xx_request *req;
1806 
1807  req = container_of(_req, struct lpc32xx_request, req);
1808  BUG_ON(!list_empty(&req->queue));
1809  kfree(req);
1810 }
1811 
1812 /* Must be called without lock */
1813 static int lpc32xx_ep_queue(struct usb_ep *_ep,
1814  struct usb_request *_req, gfp_t gfp_flags)
1815 {
1816  struct lpc32xx_request *req;
1817  struct lpc32xx_ep *ep;
1818  struct lpc32xx_udc *udc;
1819  unsigned long flags;
1820  int status = 0;
1821 
1822  req = container_of(_req, struct lpc32xx_request, req);
1823  ep = container_of(_ep, struct lpc32xx_ep, ep);
1824 
1825  if (!_req || !_req->complete || !_req->buf ||
1826  !list_empty(&req->queue))
1827  return -EINVAL;
1828 
1829  udc = ep->udc;
1830 
1831  if (!_ep) {
1832  dev_dbg(udc->dev, "invalid ep\n");
1833  return -EINVAL;
1834  }
1835 
1836 
1837  if ((!udc) || (!udc->driver) ||
1838  (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
1839  dev_dbg(udc->dev, "invalid device\n");
1840  return -EINVAL;
1841  }
1842 
1843  if (ep->lep) {
1845  struct lpc32xx_usbd_dd_gad *dd;
1846 
1847  /* Map DMA pointer */
1848  if (ep->is_in)
1849  direction = DMA_TO_DEVICE;
1850  else
1851  direction = DMA_FROM_DEVICE;
1852 
1853  if (req->req.dma == 0) {
1854  req->req.dma = dma_map_single(
1855  ep->udc->gadget.dev.parent,
1856  req->req.buf, req->req.length, direction);
1857  req->mapped = 1;
1858  } else {
1860  ep->udc->gadget.dev.parent, req->req.dma,
1861  req->req.length, direction);
1862  req->mapped = 0;
1863  }
1864 
1865  /* For the request, build a list of DDs */
1866  dd = udc_dd_alloc(udc);
1867  if (!dd) {
1868  /* Error allocating DD */
1869  return -ENOMEM;
1870  }
1871  req->dd_desc_ptr = dd;
1872 
1873  /* Setup the DMA descriptor */
1874  dd->dd_next_phy = dd->dd_next_v = 0;
1875  dd->dd_buffer_addr = req->req.dma;
1876  dd->dd_status = 0;
1877 
1878  /* Special handling for ISO EPs */
1879  if (ep->eptype == EP_ISO_TYPE) {
1880  dd->dd_setup = DD_SETUP_ISO_EP |
1881  DD_SETUP_PACKETLEN(0) |
1883  dd->dd_iso_ps_mem_addr = dd->this_dma + 24;
1884  if (ep->is_in)
1885  dd->iso_status[0] = req->req.length;
1886  else
1887  dd->iso_status[0] = 0;
1888  } else
1889  dd->dd_setup = DD_SETUP_PACKETLEN(ep->ep.maxpacket) |
1890  DD_SETUP_DMALENBYTES(req->req.length);
1891  }
1892 
1893  ep_dbg(ep, "%s queue req %p len %d buf %p (in=%d) z=%d\n", _ep->name,
1894  _req, _req->length, _req->buf, ep->is_in, _req->zero);
1895 
1896  spin_lock_irqsave(&udc->lock, flags);
1897 
1898  _req->status = -EINPROGRESS;
1899  _req->actual = 0;
1900  req->send_zlp = _req->zero;
1901 
1902  /* Kickstart empty queues */
1903  if (list_empty(&ep->queue)) {
1904  list_add_tail(&req->queue, &ep->queue);
1905 
1906  if (ep->hwep_num_base == 0) {
1907  /* Handle expected data direction */
1908  if (ep->is_in) {
1909  /* IN packet to host */
1910  udc->ep0state = DATA_IN;
1911  status = udc_ep0_in_req(udc);
1912  } else {
1913  /* OUT packet from host */
1914  udc->ep0state = DATA_OUT;
1915  status = udc_ep0_out_req(udc);
1916  }
1917  } else if (ep->is_in) {
1918  /* IN packet to host and kick off transfer */
1919  if (!ep->req_pending)
1920  udc_ep_in_req_dma(udc, ep);
1921  } else
1922  /* OUT packet from host and kick off list */
1923  if (!ep->req_pending)
1924  udc_ep_out_req_dma(udc, ep);
1925  } else
1926  list_add_tail(&req->queue, &ep->queue);
1927 
1928  spin_unlock_irqrestore(&udc->lock, flags);
1929 
1930  return (status < 0) ? status : 0;
1931 }
1932 
1933 /* Must be called without lock */
1934 static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1935 {
1936  struct lpc32xx_ep *ep;
1937  struct lpc32xx_request *req;
1938  unsigned long flags;
1939 
1940  ep = container_of(_ep, struct lpc32xx_ep, ep);
1941  if (!_ep || ep->hwep_num_base == 0)
1942  return -EINVAL;
1943 
1944  spin_lock_irqsave(&ep->udc->lock, flags);
1945 
1946  /* make sure it's actually queued on this endpoint */
1947  list_for_each_entry(req, &ep->queue, queue) {
1948  if (&req->req == _req)
1949  break;
1950  }
1951  if (&req->req != _req) {
1952  spin_unlock_irqrestore(&ep->udc->lock, flags);
1953  return -EINVAL;
1954  }
1955 
1956  done(ep, req, -ECONNRESET);
1957 
1958  spin_unlock_irqrestore(&ep->udc->lock, flags);
1959 
1960  return 0;
1961 }
1962 
1963 /* Must be called without lock */
1964 static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
1965 {
1966  struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1967  struct lpc32xx_udc *udc = ep->udc;
1968  unsigned long flags;
1969 
1970  if ((!ep) || (ep->hwep_num <= 1))
1971  return -EINVAL;
1972 
1973  /* Don't halt an IN EP */
1974  if (ep->is_in)
1975  return -EAGAIN;
1976 
1977  spin_lock_irqsave(&udc->lock, flags);
1978 
1979  if (value == 1) {
1980  /* stall */
1981  udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1983  } else {
1984  /* End stall */
1985  ep->wedge = 0;
1986  udc_protocol_cmd_data_w(udc, CMD_SET_EP_STAT(ep->hwep_num),
1987  DAT_WR_BYTE(0));
1988  }
1989 
1990  spin_unlock_irqrestore(&udc->lock, flags);
1991 
1992  return 0;
1993 }
1994 
1995 /* set the halt feature and ignores clear requests */
1996 static int lpc32xx_ep_set_wedge(struct usb_ep *_ep)
1997 {
1998  struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
1999 
2000  if (!_ep || !ep->udc)
2001  return -EINVAL;
2002 
2003  ep->wedge = 1;
2004 
2005  return usb_ep_set_halt(_ep);
2006 }
2007 
2008 static const struct usb_ep_ops lpc32xx_ep_ops = {
2009  .enable = lpc32xx_ep_enable,
2010  .disable = lpc32xx_ep_disable,
2011  .alloc_request = lpc32xx_ep_alloc_request,
2012  .free_request = lpc32xx_ep_free_request,
2013  .queue = lpc32xx_ep_queue,
2014  .dequeue = lpc32xx_ep_dequeue,
2015  .set_halt = lpc32xx_ep_set_halt,
2016  .set_wedge = lpc32xx_ep_set_wedge,
2017 };
2018 
2019 /* Send a ZLP on a non-0 IN EP */
2020 void udc_send_in_zlp(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2021 {
2022  /* Clear EP status */
2023  udc_clearep_getsts(udc, ep->hwep_num);
2024 
2025  /* Send ZLP via FIFO mechanism */
2026  udc_write_hwep(udc, ep->hwep_num, NULL, 0);
2027 }
2028 
2029 /*
2030  * Handle EP completion for ZLP
2031  * This function will only be called when a delayed ZLP needs to be sent out
2032  * after a DMA transfer has filled both buffers.
2033  */
2034 void udc_handle_eps(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2035 {
2036  u32 epstatus;
2037  struct lpc32xx_request *req;
2038 
2039  if (ep->hwep_num <= 0)
2040  return;
2041 
2042  uda_clear_hwepint(udc, ep->hwep_num);
2043 
2044  /* If this interrupt isn't enabled, return now */
2045  if (!(udc->enabled_hwepints & (1 << ep->hwep_num)))
2046  return;
2047 
2048  /* Get endpoint status */
2049  epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2050 
2051  /*
2052  * This should never happen, but protect against writing to the
2053  * buffer when full.
2054  */
2055  if (epstatus & EP_SEL_F)
2056  return;
2057 
2058  if (ep->is_in) {
2059  udc_send_in_zlp(udc, ep);
2060  uda_disable_hwepint(udc, ep->hwep_num);
2061  } else
2062  return;
2063 
2064  /* If there isn't a request waiting, something went wrong */
2065  req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2066  if (req) {
2067  done(ep, req, 0);
2068 
2069  /* Start another request if ready */
2070  if (!list_empty(&ep->queue)) {
2071  if (ep->is_in)
2072  udc_ep_in_req_dma(udc, ep);
2073  else
2074  udc_ep_out_req_dma(udc, ep);
2075  } else
2076  ep->req_pending = 0;
2077  }
2078 }
2079 
2080 
2081 /* DMA end of transfer completion */
2082 static void udc_handle_dma_ep(struct lpc32xx_udc *udc, struct lpc32xx_ep *ep)
2083 {
2084  u32 status, epstatus;
2085  struct lpc32xx_request *req;
2086  struct lpc32xx_usbd_dd_gad *dd;
2087 
2088 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2089  ep->totalints++;
2090 #endif
2091 
2092  req = list_entry(ep->queue.next, struct lpc32xx_request, queue);
2093  if (!req) {
2094  ep_err(ep, "DMA interrupt on no req!\n");
2095  return;
2096  }
2097  dd = req->dd_desc_ptr;
2098 
2099  /* DMA descriptor should always be retired for this call */
2100  if (!(dd->dd_status & DD_STATUS_DD_RETIRED))
2101  ep_warn(ep, "DMA descriptor did not retire\n");
2102 
2103  /* Disable DMA */
2104  udc_ep_dma_disable(udc, ep->hwep_num);
2105  writel((1 << ep->hwep_num), USBD_EOTINTCLR(udc->udp_baseaddr));
2106  writel((1 << ep->hwep_num), USBD_NDDRTINTCLR(udc->udp_baseaddr));
2107 
2108  /* System error? */
2109  if (readl(USBD_SYSERRTINTST(udc->udp_baseaddr)) &
2110  (1 << ep->hwep_num)) {
2111  writel((1 << ep->hwep_num),
2113  ep_err(ep, "AHB critical error!\n");
2114  ep->req_pending = 0;
2115 
2116  /* The error could have occurred on a packet of a multipacket
2117  * transfer, so recovering the transfer is not possible. Close
2118  * the request with an error */
2119  done(ep, req, -ECONNABORTED);
2120  return;
2121  }
2122 
2123  /* Handle the current DD's status */
2124  status = dd->dd_status;
2125  switch (status & DD_STATUS_STS_MASK) {
2126  case DD_STATUS_STS_NS:
2127  /* DD not serviced? This shouldn't happen! */
2128  ep->req_pending = 0;
2129  ep_err(ep, "DMA critical EP error: DD not serviced (0x%x)!\n",
2130  status);
2131 
2132  done(ep, req, -ECONNABORTED);
2133  return;
2134 
2135  case DD_STATUS_STS_BS:
2136  /* Interrupt only fires on EOT - This shouldn't happen! */
2137  ep->req_pending = 0;
2138  ep_err(ep, "DMA critical EP error: EOT prior to service completion (0x%x)!\n",
2139  status);
2140  done(ep, req, -ECONNABORTED);
2141  return;
2142 
2143  case DD_STATUS_STS_NC:
2144  case DD_STATUS_STS_DUR:
2145  /* Really just a short packet, not an underrun */
2146  /* This is a good status and what we expect */
2147  break;
2148 
2149  default:
2150  /* Data overrun, system error, or unknown */
2151  ep->req_pending = 0;
2152  ep_err(ep, "DMA critical EP error: System error (0x%x)!\n",
2153  status);
2154  done(ep, req, -ECONNABORTED);
2155  return;
2156  }
2157 
2158  /* ISO endpoints are handled differently */
2159  if (ep->eptype == EP_ISO_TYPE) {
2160  if (ep->is_in)
2161  req->req.actual = req->req.length;
2162  else
2163  req->req.actual = dd->iso_status[0] & 0xFFFF;
2164  } else
2165  req->req.actual += DD_STATUS_CURDMACNT(status);
2166 
2167  /* Send a ZLP if necessary. This will be done for non-int
2168  * packets which have a size that is a divisor of MAXP */
2169  if (req->send_zlp) {
2170  /*
2171  * If at least 1 buffer is available, send the ZLP now.
2172  * Otherwise, the ZLP send needs to be deferred until a
2173  * buffer is available.
2174  */
2175  if (udc_clearep_getsts(udc, ep->hwep_num) & EP_SEL_F) {
2176  udc_clearep_getsts(udc, ep->hwep_num);
2177  uda_enable_hwepint(udc, ep->hwep_num);
2178  epstatus = udc_clearep_getsts(udc, ep->hwep_num);
2179 
2180  /* Let the EP interrupt handle the ZLP */
2181  return;
2182  } else
2183  udc_send_in_zlp(udc, ep);
2184  }
2185 
2186  /* Transfer request is complete */
2187  done(ep, req, 0);
2188 
2189  /* Start another request if ready */
2190  udc_clearep_getsts(udc, ep->hwep_num);
2191  if (!list_empty((&ep->queue))) {
2192  if (ep->is_in)
2193  udc_ep_in_req_dma(udc, ep);
2194  else
2195  udc_ep_out_req_dma(udc, ep);
2196  } else
2197  ep->req_pending = 0;
2198 
2199 }
2200 
2201 /*
2202  *
2203  * Endpoint 0 functions
2204  *
2205  */
2206 static void udc_handle_dev(struct lpc32xx_udc *udc)
2207 {
2208  u32 tmp;
2209 
2210  udc_protocol_cmd_w(udc, CMD_GET_DEV_STAT);
2211  tmp = udc_protocol_cmd_r(udc, DAT_GET_DEV_STAT);
2212 
2213  if (tmp & DEV_RST)
2214  uda_usb_reset(udc);
2215  else if (tmp & DEV_CON_CH)
2216  uda_power_event(udc, (tmp & DEV_CON));
2217  else if (tmp & DEV_SUS_CH) {
2218  if (tmp & DEV_SUS) {
2219  if (udc->vbus == 0)
2220  stop_activity(udc);
2221  else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2222  udc->driver) {
2223  /* Power down transceiver */
2224  udc->poweron = 0;
2225  schedule_work(&udc->pullup_job);
2226  uda_resm_susp_event(udc, 1);
2227  }
2228  } else if ((udc->gadget.speed != USB_SPEED_UNKNOWN) &&
2229  udc->driver && udc->vbus) {
2230  uda_resm_susp_event(udc, 0);
2231  /* Power up transceiver */
2232  udc->poweron = 1;
2233  schedule_work(&udc->pullup_job);
2234  }
2235  }
2236 }
2237 
2238 static int udc_get_status(struct lpc32xx_udc *udc, u16 reqtype, u16 wIndex)
2239 {
2240  struct lpc32xx_ep *ep;
2241  u32 ep0buff = 0, tmp;
2242 
2243  switch (reqtype & USB_RECIP_MASK) {
2244  case USB_RECIP_INTERFACE:
2245  break; /* Not supported */
2246 
2247  case USB_RECIP_DEVICE:
2248  ep0buff = (udc->selfpowered << USB_DEVICE_SELF_POWERED);
2249  if (udc->dev_status & (1 << USB_DEVICE_REMOTE_WAKEUP))
2250  ep0buff |= (1 << USB_DEVICE_REMOTE_WAKEUP);
2251  break;
2252 
2253  case USB_RECIP_ENDPOINT:
2254  tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2255  ep = &udc->ep[tmp];
2256  if ((tmp == 0) || (tmp >= NUM_ENDPOINTS))
2257  return -EOPNOTSUPP;
2258 
2259  if (wIndex & USB_DIR_IN) {
2260  if (!ep->is_in)
2261  return -EOPNOTSUPP; /* Something's wrong */
2262  } else if (ep->is_in)
2263  return -EOPNOTSUPP; /* Not an IN endpoint */
2264 
2265  /* Get status of the endpoint */
2266  udc_protocol_cmd_w(udc, CMD_SEL_EP(ep->hwep_num));
2267  tmp = udc_protocol_cmd_r(udc, DAT_SEL_EP(ep->hwep_num));
2268 
2269  if (tmp & EP_SEL_ST)
2270  ep0buff = (1 << USB_ENDPOINT_HALT);
2271  else
2272  ep0buff = 0;
2273  break;
2274 
2275  default:
2276  break;
2277  }
2278 
2279  /* Return data */
2280  udc_write_hwep(udc, EP_IN, &ep0buff, 2);
2281 
2282  return 0;
2283 }
2284 
2285 static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
2286 {
2287  struct lpc32xx_ep *ep, *ep0 = &udc->ep[0];
2288  struct usb_ctrlrequest ctrlpkt;
2289  int i, bytes;
2290  u16 wIndex, wValue, wLength, reqtype, req, tmp;
2291 
2292  /* Nuke previous transfers */
2293  nuke(ep0, -EPROTO);
2294 
2295  /* Get setup packet */
2296  bytes = udc_read_hwep(udc, EP_OUT, (u32 *) &ctrlpkt, 8);
2297  if (bytes != 8) {
2298  ep_warn(ep0, "Incorrectly sized setup packet (s/b 8, is %d)!\n",
2299  bytes);
2300  return;
2301  }
2302 
2303  /* Native endianness */
2304  wIndex = le16_to_cpu(ctrlpkt.wIndex);
2305  wValue = le16_to_cpu(ctrlpkt.wValue);
2306  wLength = le16_to_cpu(ctrlpkt.wLength);
2307  reqtype = le16_to_cpu(ctrlpkt.bRequestType);
2308 
2309  /* Set direction of EP0 */
2310  if (likely(reqtype & USB_DIR_IN))
2311  ep0->is_in = 1;
2312  else
2313  ep0->is_in = 0;
2314 
2315  /* Handle SETUP packet */
2316  req = le16_to_cpu(ctrlpkt.bRequest);
2317  switch (req) {
2318  case USB_REQ_CLEAR_FEATURE:
2319  case USB_REQ_SET_FEATURE:
2320  switch (reqtype) {
2322  if (wValue != USB_DEVICE_REMOTE_WAKEUP)
2323  goto stall; /* Nothing else handled */
2324 
2325  /* Tell board about event */
2326  if (req == USB_REQ_CLEAR_FEATURE)
2327  udc->dev_status &=
2328  ~(1 << USB_DEVICE_REMOTE_WAKEUP);
2329  else
2330  udc->dev_status |=
2331  (1 << USB_DEVICE_REMOTE_WAKEUP);
2332  uda_remwkp_cgh(udc);
2333  goto zlp_send;
2334 
2336  tmp = wIndex & USB_ENDPOINT_NUMBER_MASK;
2337  if ((wValue != USB_ENDPOINT_HALT) ||
2338  (tmp >= NUM_ENDPOINTS))
2339  break;
2340 
2341  /* Find hardware endpoint from logical endpoint */
2342  ep = &udc->ep[tmp];
2343  tmp = ep->hwep_num;
2344  if (tmp == 0)
2345  break;
2346 
2347  if (req == USB_REQ_SET_FEATURE)
2348  udc_stall_hwep(udc, tmp);
2349  else if (!ep->wedge)
2350  udc_clrstall_hwep(udc, tmp);
2351 
2352  goto zlp_send;
2353 
2354  default:
2355  break;
2356  }
2357 
2358 
2359  case USB_REQ_SET_ADDRESS:
2360  if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
2361  udc_set_address(udc, wValue);
2362  goto zlp_send;
2363  }
2364  break;
2365 
2366  case USB_REQ_GET_STATUS:
2367  udc_get_status(udc, reqtype, wIndex);
2368  return;
2369 
2370  default:
2371  break; /* Let GadgetFS handle the descriptor instead */
2372  }
2373 
2374  if (likely(udc->driver)) {
2375  /* device-2-host (IN) or no data setup command, process
2376  * immediately */
2377  spin_unlock(&udc->lock);
2378  i = udc->driver->setup(&udc->gadget, &ctrlpkt);
2379 
2380  spin_lock(&udc->lock);
2381  if (req == USB_REQ_SET_CONFIGURATION) {
2382  /* Configuration is set after endpoints are realized */
2383  if (wValue) {
2384  /* Set configuration */
2385  udc_set_device_configured(udc);
2386 
2387  udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2389  INAK_BI | INAK_II));
2390  } else {
2391  /* Clear configuration */
2392  udc_set_device_unconfigured(udc);
2393 
2394  /* Disable NAK interrupts */
2395  udc_protocol_cmd_data_w(udc, CMD_SET_MODE,
2396  DAT_WR_BYTE(AP_CLK));
2397  }
2398  }
2399 
2400  if (i < 0) {
2401  /* setup processing failed, force stall */
2402  dev_err(udc->dev,
2403  "req %02x.%02x protocol STALL; stat %d\n",
2404  reqtype, req, i);
2405  udc->ep0state = WAIT_FOR_SETUP;
2406  goto stall;
2407  }
2408  }
2409 
2410  if (!ep0->is_in)
2411  udc_ep0_send_zlp(udc); /* ZLP IN packet on data phase */
2412 
2413  return;
2414 
2415 stall:
2416  udc_stall_hwep(udc, EP_IN);
2417  return;
2418 
2419 zlp_send:
2420  udc_ep0_send_zlp(udc);
2421  return;
2422 }
2423 
2424 /* IN endpoint 0 transfer */
2425 static void udc_handle_ep0_in(struct lpc32xx_udc *udc)
2426 {
2427  struct lpc32xx_ep *ep0 = &udc->ep[0];
2428  u32 epstatus;
2429 
2430  /* Clear EP interrupt */
2431  epstatus = udc_clearep_getsts(udc, EP_IN);
2432 
2433 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2434  ep0->totalints++;
2435 #endif
2436 
2437  /* Stalled? Clear stall and reset buffers */
2438  if (epstatus & EP_SEL_ST) {
2439  udc_clrstall_hwep(udc, EP_IN);
2440  nuke(ep0, -ECONNABORTED);
2441  udc->ep0state = WAIT_FOR_SETUP;
2442  return;
2443  }
2444 
2445  /* Is a buffer available? */
2446  if (!(epstatus & EP_SEL_F)) {
2447  /* Handle based on current state */
2448  if (udc->ep0state == DATA_IN)
2449  udc_ep0_in_req(udc);
2450  else {
2451  /* Unknown state for EP0 oe end of DATA IN phase */
2452  nuke(ep0, -ECONNABORTED);
2453  udc->ep0state = WAIT_FOR_SETUP;
2454  }
2455  }
2456 }
2457 
2458 /* OUT endpoint 0 transfer */
2459 static void udc_handle_ep0_out(struct lpc32xx_udc *udc)
2460 {
2461  struct lpc32xx_ep *ep0 = &udc->ep[0];
2462  u32 epstatus;
2463 
2464  /* Clear EP interrupt */
2465  epstatus = udc_clearep_getsts(udc, EP_OUT);
2466 
2467 
2468 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
2469  ep0->totalints++;
2470 #endif
2471 
2472  /* Stalled? */
2473  if (epstatus & EP_SEL_ST) {
2474  udc_clrstall_hwep(udc, EP_OUT);
2475  nuke(ep0, -ECONNABORTED);
2476  udc->ep0state = WAIT_FOR_SETUP;
2477  return;
2478  }
2479 
2480  /* A NAK may occur if a packet couldn't be received yet */
2481  if (epstatus & EP_SEL_EPN)
2482  return;
2483  /* Setup packet incoming? */
2484  if (epstatus & EP_SEL_STP) {
2485  nuke(ep0, 0);
2486  udc->ep0state = WAIT_FOR_SETUP;
2487  }
2488 
2489  /* Data available? */
2490  if (epstatus & EP_SEL_F)
2491  /* Handle based on current state */
2492  switch (udc->ep0state) {
2493  case WAIT_FOR_SETUP:
2494  udc_handle_ep0_setup(udc);
2495  break;
2496 
2497  case DATA_OUT:
2498  udc_ep0_out_req(udc);
2499  break;
2500 
2501  default:
2502  /* Unknown state for EP0 */
2503  nuke(ep0, -ECONNABORTED);
2504  udc->ep0state = WAIT_FOR_SETUP;
2505  }
2506 }
2507 
2508 /* Must be called without lock */
2509 static int lpc32xx_get_frame(struct usb_gadget *gadget)
2510 {
2511  int frame;
2512  unsigned long flags;
2513  struct lpc32xx_udc *udc = to_udc(gadget);
2514 
2515  if (!udc->clocked)
2516  return -EINVAL;
2517 
2518  spin_lock_irqsave(&udc->lock, flags);
2519 
2520  frame = (int) udc_get_current_frame(udc);
2521 
2522  spin_unlock_irqrestore(&udc->lock, flags);
2523 
2524  return frame;
2525 }
2526 
2527 static int lpc32xx_wakeup(struct usb_gadget *gadget)
2528 {
2529  return -ENOTSUPP;
2530 }
2531 
2532 static int lpc32xx_set_selfpowered(struct usb_gadget *gadget, int is_on)
2533 {
2534  struct lpc32xx_udc *udc = to_udc(gadget);
2535 
2536  /* Always self-powered */
2537  udc->selfpowered = (is_on != 0);
2538 
2539  return 0;
2540 }
2541 
2542 /*
2543  * vbus is here! turn everything on that's ready
2544  * Must be called without lock
2545  */
2546 static int lpc32xx_vbus_session(struct usb_gadget *gadget, int is_active)
2547 {
2548  unsigned long flags;
2549  struct lpc32xx_udc *udc = to_udc(gadget);
2550 
2551  spin_lock_irqsave(&udc->lock, flags);
2552 
2553  /* Doesn't need lock */
2554  if (udc->driver) {
2555  udc_clk_set(udc, 1);
2556  udc_enable(udc);
2557  pullup(udc, is_active);
2558  } else {
2559  stop_activity(udc);
2560  pullup(udc, 0);
2561 
2562  spin_unlock_irqrestore(&udc->lock, flags);
2563  /*
2564  * Wait for all the endpoints to disable,
2565  * before disabling clocks. Don't wait if
2566  * endpoints are not enabled.
2567  */
2568  if (atomic_read(&udc->enabled_ep_cnt))
2570  (atomic_read(&udc->enabled_ep_cnt) == 0));
2571 
2572  spin_lock_irqsave(&udc->lock, flags);
2573 
2574  udc_clk_set(udc, 0);
2575  }
2576 
2577  spin_unlock_irqrestore(&udc->lock, flags);
2578 
2579  return 0;
2580 }
2581 
2582 /* Can be called with or without lock */
2583 static int lpc32xx_pullup(struct usb_gadget *gadget, int is_on)
2584 {
2585  struct lpc32xx_udc *udc = to_udc(gadget);
2586 
2587  /* Doesn't need lock */
2588  pullup(udc, is_on);
2589 
2590  return 0;
2591 }
2592 
2593 static int lpc32xx_start(struct usb_gadget *, struct usb_gadget_driver *);
2594 static int lpc32xx_stop(struct usb_gadget *, struct usb_gadget_driver *);
2595 
2596 static const struct usb_gadget_ops lpc32xx_udc_ops = {
2597  .get_frame = lpc32xx_get_frame,
2598  .wakeup = lpc32xx_wakeup,
2599  .set_selfpowered = lpc32xx_set_selfpowered,
2600  .vbus_session = lpc32xx_vbus_session,
2601  .pullup = lpc32xx_pullup,
2602  .udc_start = lpc32xx_start,
2603  .udc_stop = lpc32xx_stop,
2604 };
2605 
2606 static void nop_release(struct device *dev)
2607 {
2608  /* nothing to free */
2609 }
2610 
2611 static const struct lpc32xx_udc controller_template = {
2612  .gadget = {
2613  .ops = &lpc32xx_udc_ops,
2614  .name = driver_name,
2615  .dev = {
2616  .init_name = "gadget",
2617  .release = nop_release,
2618  }
2619  },
2620  .ep[0] = {
2621  .ep = {
2622  .name = "ep0",
2623  .ops = &lpc32xx_ep_ops,
2624  },
2625  .maxpacket = 64,
2626  .hwep_num_base = 0,
2627  .hwep_num = 0, /* Can be 0 or 1, has special handling */
2628  .lep = 0,
2629  .eptype = EP_CTL_TYPE,
2630  },
2631  .ep[1] = {
2632  .ep = {
2633  .name = "ep1-int",
2634  .ops = &lpc32xx_ep_ops,
2635  },
2636  .maxpacket = 64,
2637  .hwep_num_base = 2,
2638  .hwep_num = 0, /* 2 or 3, will be set later */
2639  .lep = 1,
2640  .eptype = EP_INT_TYPE,
2641  },
2642  .ep[2] = {
2643  .ep = {
2644  .name = "ep2-bulk",
2645  .ops = &lpc32xx_ep_ops,
2646  },
2647  .maxpacket = 64,
2648  .hwep_num_base = 4,
2649  .hwep_num = 0, /* 4 or 5, will be set later */
2650  .lep = 2,
2651  .eptype = EP_BLK_TYPE,
2652  },
2653  .ep[3] = {
2654  .ep = {
2655  .name = "ep3-iso",
2656  .ops = &lpc32xx_ep_ops,
2657  },
2658  .maxpacket = 1023,
2659  .hwep_num_base = 6,
2660  .hwep_num = 0, /* 6 or 7, will be set later */
2661  .lep = 3,
2662  .eptype = EP_ISO_TYPE,
2663  },
2664  .ep[4] = {
2665  .ep = {
2666  .name = "ep4-int",
2667  .ops = &lpc32xx_ep_ops,
2668  },
2669  .maxpacket = 64,
2670  .hwep_num_base = 8,
2671  .hwep_num = 0, /* 8 or 9, will be set later */
2672  .lep = 4,
2673  .eptype = EP_INT_TYPE,
2674  },
2675  .ep[5] = {
2676  .ep = {
2677  .name = "ep5-bulk",
2678  .ops = &lpc32xx_ep_ops,
2679  },
2680  .maxpacket = 64,
2681  .hwep_num_base = 10,
2682  .hwep_num = 0, /* 10 or 11, will be set later */
2683  .lep = 5,
2684  .eptype = EP_BLK_TYPE,
2685  },
2686  .ep[6] = {
2687  .ep = {
2688  .name = "ep6-iso",
2689  .ops = &lpc32xx_ep_ops,
2690  },
2691  .maxpacket = 1023,
2692  .hwep_num_base = 12,
2693  .hwep_num = 0, /* 12 or 13, will be set later */
2694  .lep = 6,
2695  .eptype = EP_ISO_TYPE,
2696  },
2697  .ep[7] = {
2698  .ep = {
2699  .name = "ep7-int",
2700  .ops = &lpc32xx_ep_ops,
2701  },
2702  .maxpacket = 64,
2703  .hwep_num_base = 14,
2704  .hwep_num = 0,
2705  .lep = 7,
2706  .eptype = EP_INT_TYPE,
2707  },
2708  .ep[8] = {
2709  .ep = {
2710  .name = "ep8-bulk",
2711  .ops = &lpc32xx_ep_ops,
2712  },
2713  .maxpacket = 64,
2714  .hwep_num_base = 16,
2715  .hwep_num = 0,
2716  .lep = 8,
2717  .eptype = EP_BLK_TYPE,
2718  },
2719  .ep[9] = {
2720  .ep = {
2721  .name = "ep9-iso",
2722  .ops = &lpc32xx_ep_ops,
2723  },
2724  .maxpacket = 1023,
2725  .hwep_num_base = 18,
2726  .hwep_num = 0,
2727  .lep = 9,
2728  .eptype = EP_ISO_TYPE,
2729  },
2730  .ep[10] = {
2731  .ep = {
2732  .name = "ep10-int",
2733  .ops = &lpc32xx_ep_ops,
2734  },
2735  .maxpacket = 64,
2736  .hwep_num_base = 20,
2737  .hwep_num = 0,
2738  .lep = 10,
2739  .eptype = EP_INT_TYPE,
2740  },
2741  .ep[11] = {
2742  .ep = {
2743  .name = "ep11-bulk",
2744  .ops = &lpc32xx_ep_ops,
2745  },
2746  .maxpacket = 64,
2747  .hwep_num_base = 22,
2748  .hwep_num = 0,
2749  .lep = 11,
2750  .eptype = EP_BLK_TYPE,
2751  },
2752  .ep[12] = {
2753  .ep = {
2754  .name = "ep12-iso",
2755  .ops = &lpc32xx_ep_ops,
2756  },
2757  .maxpacket = 1023,
2758  .hwep_num_base = 24,
2759  .hwep_num = 0,
2760  .lep = 12,
2761  .eptype = EP_ISO_TYPE,
2762  },
2763  .ep[13] = {
2764  .ep = {
2765  .name = "ep13-int",
2766  .ops = &lpc32xx_ep_ops,
2767  },
2768  .maxpacket = 64,
2769  .hwep_num_base = 26,
2770  .hwep_num = 0,
2771  .lep = 13,
2772  .eptype = EP_INT_TYPE,
2773  },
2774  .ep[14] = {
2775  .ep = {
2776  .name = "ep14-bulk",
2777  .ops = &lpc32xx_ep_ops,
2778  },
2779  .maxpacket = 64,
2780  .hwep_num_base = 28,
2781  .hwep_num = 0,
2782  .lep = 14,
2783  .eptype = EP_BLK_TYPE,
2784  },
2785  .ep[15] = {
2786  .ep = {
2787  .name = "ep15-bulk",
2788  .ops = &lpc32xx_ep_ops,
2789  },
2790  .maxpacket = 1023,
2791  .hwep_num_base = 30,
2792  .hwep_num = 0,
2793  .lep = 15,
2794  .eptype = EP_BLK_TYPE,
2795  },
2796 };
2797 
2798 /* ISO and status interrupts */
2799 static irqreturn_t lpc32xx_usb_lp_irq(int irq, void *_udc)
2800 {
2801  u32 tmp, devstat;
2802  struct lpc32xx_udc *udc = _udc;
2803 
2804  spin_lock(&udc->lock);
2805 
2806  /* Read the device status register */
2807  devstat = readl(USBD_DEVINTST(udc->udp_baseaddr));
2808 
2809  devstat &= ~USBD_EP_FAST;
2810  writel(devstat, USBD_DEVINTCLR(udc->udp_baseaddr));
2811  devstat = devstat & udc->enabled_devints;
2812 
2813  /* Device specific handling needed? */
2814  if (devstat & USBD_DEV_STAT)
2815  udc_handle_dev(udc);
2816 
2817  /* Start of frame? (devstat & FRAME_INT):
2818  * The frame interrupt isn't really needed for ISO support,
2819  * as the driver will queue the necessary packets */
2820 
2821  /* Error? */
2822  if (devstat & ERR_INT) {
2823  /* All types of errors, from cable removal during transfer to
2824  * misc protocol and bit errors. These are mostly for just info,
2825  * as the USB hardware will work around these. If these errors
2826  * happen alot, something is wrong. */
2827  udc_protocol_cmd_w(udc, CMD_RD_ERR_STAT);
2828  tmp = udc_protocol_cmd_r(udc, DAT_RD_ERR_STAT);
2829  dev_dbg(udc->dev, "Device error (0x%x)!\n", tmp);
2830  }
2831 
2832  spin_unlock(&udc->lock);
2833 
2834  return IRQ_HANDLED;
2835 }
2836 
2837 /* EP interrupts */
2838 static irqreturn_t lpc32xx_usb_hp_irq(int irq, void *_udc)
2839 {
2840  u32 tmp;
2841  struct lpc32xx_udc *udc = _udc;
2842 
2843  spin_lock(&udc->lock);
2844 
2845  /* Read the device status register */
2847 
2848  /* Endpoints */
2849  tmp = readl(USBD_EPINTST(udc->udp_baseaddr));
2850 
2851  /* Special handling for EP0 */
2852  if (tmp & (EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2853  /* Handle EP0 IN */
2854  if (tmp & (EP_MASK_SEL(0, EP_IN)))
2855  udc_handle_ep0_in(udc);
2856 
2857  /* Handle EP0 OUT */
2858  if (tmp & (EP_MASK_SEL(0, EP_OUT)))
2859  udc_handle_ep0_out(udc);
2860  }
2861 
2862  /* All other EPs */
2863  if (tmp & ~(EP_MASK_SEL(0, EP_OUT) | EP_MASK_SEL(0, EP_IN))) {
2864  int i;
2865 
2866  /* Handle other EP interrupts */
2867  for (i = 1; i < NUM_ENDPOINTS; i++) {
2868  if (tmp & (1 << udc->ep[i].hwep_num))
2869  udc_handle_eps(udc, &udc->ep[i]);
2870  }
2871  }
2872 
2873  spin_unlock(&udc->lock);
2874 
2875  return IRQ_HANDLED;
2876 }
2877 
2878 static irqreturn_t lpc32xx_usb_devdma_irq(int irq, void *_udc)
2879 {
2880  struct lpc32xx_udc *udc = _udc;
2881 
2882  int i;
2883  u32 tmp;
2884 
2885  spin_lock(&udc->lock);
2886 
2887  /* Handle EP DMA EOT interrupts */
2888  tmp = readl(USBD_EOTINTST(udc->udp_baseaddr)) |
2889  (readl(USBD_EPDMAST(udc->udp_baseaddr)) &
2892  for (i = 1; i < NUM_ENDPOINTS; i++) {
2893  if (tmp & (1 << udc->ep[i].hwep_num))
2894  udc_handle_dma_ep(udc, &udc->ep[i]);
2895  }
2896 
2897  spin_unlock(&udc->lock);
2898 
2899  return IRQ_HANDLED;
2900 }
2901 
2902 /*
2903  *
2904  * VBUS detection, pullup handler, and Gadget cable state notification
2905  *
2906  */
2907 static void vbus_work(struct work_struct *work)
2908 {
2909  u8 value;
2910  struct lpc32xx_udc *udc = container_of(work, struct lpc32xx_udc,
2911  vbus_job);
2912 
2913  if (udc->enabled != 0) {
2914  /* Discharge VBUS real quick */
2917 
2918  /* Give VBUS some time (100mS) to discharge */
2919  msleep(100);
2920 
2921  /* Disable VBUS discharge resistor */
2925 
2926  /* Clear interrupt */
2930 
2931  /* Get the VBUS status from the transceiver */
2934 
2935  /* VBUS on or off? */
2936  if (value & INT_SESS_VLD)
2937  udc->vbus = 1;
2938  else
2939  udc->vbus = 0;
2940 
2941  /* VBUS changed? */
2942  if (udc->last_vbus != udc->vbus) {
2943  udc->last_vbus = udc->vbus;
2944  lpc32xx_vbus_session(&udc->gadget, udc->vbus);
2945  }
2946  }
2947 
2948  /* Re-enable after completion */
2950 }
2951 
2952 static irqreturn_t lpc32xx_usb_vbus_irq(int irq, void *_udc)
2953 {
2954  struct lpc32xx_udc *udc = _udc;
2955 
2956  /* Defer handling of VBUS IRQ to work queue */
2958  schedule_work(&udc->vbus_job);
2959 
2960  return IRQ_HANDLED;
2961 }
2962 
2963 static int lpc32xx_start(struct usb_gadget *gadget,
2964  struct usb_gadget_driver *driver)
2965 {
2966  struct lpc32xx_udc *udc = to_udc(gadget);
2967  int i;
2968 
2969  if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) {
2970  dev_err(udc->dev, "bad parameter.\n");
2971  return -EINVAL;
2972  }
2973 
2974  if (udc->driver) {
2975  dev_err(udc->dev, "UDC already has a gadget driver\n");
2976  return -EBUSY;
2977  }
2978 
2979  udc->driver = driver;
2980  udc->gadget.dev.driver = &driver->driver;
2981  udc->gadget.dev.of_node = udc->dev->of_node;
2982  udc->enabled = 1;
2983  udc->selfpowered = 1;
2984  udc->vbus = 0;
2985 
2986  /* Force VBUS process once to check for cable insertion */
2987  udc->last_vbus = udc->vbus = 0;
2988  schedule_work(&udc->vbus_job);
2989 
2990  /* Do not re-enable ATX IRQ (3) */
2991  for (i = IRQ_USB_LP; i < IRQ_USB_ATX; i++)
2992  enable_irq(udc->udp_irq[i]);
2993 
2994  return 0;
2995 }
2996 
2997 static int lpc32xx_stop(struct usb_gadget *gadget,
2998  struct usb_gadget_driver *driver)
2999 {
3000  int i;
3001  struct lpc32xx_udc *udc = to_udc(gadget);
3002 
3003  if (!driver || driver != udc->driver)
3004  return -EINVAL;
3005 
3006  for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3007  disable_irq(udc->udp_irq[i]);
3008 
3009  if (udc->clocked) {
3010  spin_lock(&udc->lock);
3011  stop_activity(udc);
3012  spin_unlock(&udc->lock);
3013 
3014  /*
3015  * Wait for all the endpoints to disable,
3016  * before disabling clocks. Don't wait if
3017  * endpoints are not enabled.
3018  */
3019  if (atomic_read(&udc->enabled_ep_cnt))
3021  (atomic_read(&udc->enabled_ep_cnt) == 0));
3022 
3023  spin_lock(&udc->lock);
3024  udc_clk_set(udc, 0);
3025  spin_unlock(&udc->lock);
3026  }
3027 
3028  udc->enabled = 0;
3029  udc->gadget.dev.driver = NULL;
3030  udc->driver = NULL;
3031 
3032  return 0;
3033 }
3034 
3035 static void lpc32xx_udc_shutdown(struct platform_device *dev)
3036 {
3037  /* Force disconnect on reboot */
3038  struct lpc32xx_udc *udc = platform_get_drvdata(dev);
3039 
3040  pullup(udc, 0);
3041 }
3042 
3043 /*
3044  * Callbacks to be overridden by options passed via OF (TODO)
3045  */
3046 
3047 static void lpc32xx_usbd_conn_chg(int conn)
3048 {
3049  /* Do nothing, it might be nice to enable an LED
3050  * based on conn state being !0 */
3051 }
3052 
3053 static void lpc32xx_usbd_susp_chg(int susp)
3054 {
3055  /* Device suspend if susp != 0 */
3056 }
3057 
3058 static void lpc32xx_rmwkup_chg(int remote_wakup_enable)
3059 {
3060  /* Enable or disable USB remote wakeup */
3061 }
3062 
3064  .vbus_drv_pol = 0,
3065  .conn_chgb = &lpc32xx_usbd_conn_chg,
3066  .susp_chgb = &lpc32xx_usbd_susp_chg,
3067  .rmwk_chgb = &lpc32xx_rmwkup_chg,
3068 };
3069 
3070 
3071 static u64 lpc32xx_usbd_dmamask = ~(u32) 0x7F;
3072 
3073 static int __init lpc32xx_udc_probe(struct platform_device *pdev)
3074 {
3075  struct device *dev = &pdev->dev;
3076  struct lpc32xx_udc *udc;
3077  int retval, i;
3078  struct resource *res;
3080  struct device_node *isp1301_node;
3081 
3082  udc = kzalloc(sizeof(*udc), GFP_KERNEL);
3083  if (!udc)
3084  return -ENOMEM;
3085 
3086  memcpy(udc, &controller_template, sizeof(*udc));
3087  for (i = 0; i <= 15; i++)
3088  udc->ep[i].udc = udc;
3089  udc->gadget.ep0 = &udc->ep[0].ep;
3090 
3091  /* init software state */
3092  udc->gadget.dev.parent = dev;
3093  udc->pdev = pdev;
3094  udc->dev = &pdev->dev;
3095  udc->enabled = 0;
3096 
3097  if (pdev->dev.of_node) {
3098  isp1301_node = of_parse_phandle(pdev->dev.of_node,
3099  "transceiver", 0);
3100  } else {
3101  isp1301_node = NULL;
3102  }
3103 
3104  udc->isp1301_i2c_client = isp1301_get_client(isp1301_node);
3105  if (!udc->isp1301_i2c_client) {
3106  retval = -EPROBE_DEFER;
3107  goto phy_fail;
3108  }
3109 
3110  dev_info(udc->dev, "ISP1301 I2C device at address 0x%x\n",
3111  udc->isp1301_i2c_client->addr);
3112 
3113  pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
3114  pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
3115 
3116  udc->board = &lpc32xx_usbddata;
3117 
3118  /*
3119  * Resources are mapped as follows:
3120  * IORESOURCE_MEM, base address and size of USB space
3121  * IORESOURCE_IRQ, USB device low priority interrupt number
3122  * IORESOURCE_IRQ, USB device high priority interrupt number
3123  * IORESOURCE_IRQ, USB device interrupt number
3124  * IORESOURCE_IRQ, USB transceiver interrupt number
3125  */
3126  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3127  if (!res) {
3128  retval = -ENXIO;
3129  goto resource_fail;
3130  }
3131 
3132  spin_lock_init(&udc->lock);
3133 
3134  /* Get IRQs */
3135  for (i = 0; i < 4; i++) {
3136  udc->udp_irq[i] = platform_get_irq(pdev, i);
3137  if (udc->udp_irq[i] < 0) {
3138  dev_err(udc->dev,
3139  "irq resource %d not available!\n", i);
3140  retval = udc->udp_irq[i];
3141  goto irq_fail;
3142  }
3143  }
3144 
3145  udc->io_p_start = res->start;
3146  udc->io_p_size = resource_size(res);
3147  if (!request_mem_region(udc->io_p_start, udc->io_p_size, driver_name)) {
3148  dev_err(udc->dev, "someone's using UDC memory\n");
3149  retval = -EBUSY;
3150  goto request_mem_region_fail;
3151  }
3152 
3153  udc->udp_baseaddr = ioremap(udc->io_p_start, udc->io_p_size);
3154  if (!udc->udp_baseaddr) {
3155  retval = -ENOMEM;
3156  dev_err(udc->dev, "IO map failure\n");
3157  goto io_map_fail;
3158  }
3159 
3160  /* Enable AHB slave USB clock, needed for further USB clock control */
3161  writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
3162 
3163  /* Get required clocks */
3164  udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
3165  if (IS_ERR(udc->usb_pll_clk)) {
3166  dev_err(udc->dev, "failed to acquire USB PLL\n");
3167  retval = PTR_ERR(udc->usb_pll_clk);
3168  goto pll_get_fail;
3169  }
3170  udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
3171  if (IS_ERR(udc->usb_slv_clk)) {
3172  dev_err(udc->dev, "failed to acquire USB device clock\n");
3173  retval = PTR_ERR(udc->usb_slv_clk);
3174  goto usb_clk_get_fail;
3175  }
3176  udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
3177  if (IS_ERR(udc->usb_otg_clk)) {
3178  dev_err(udc->dev, "failed to acquire USB otg clock\n");
3179  retval = PTR_ERR(udc->usb_otg_clk);
3180  goto usb_otg_clk_get_fail;
3181  }
3182 
3183  /* Setup PLL clock to 48MHz */
3184  retval = clk_enable(udc->usb_pll_clk);
3185  if (retval < 0) {
3186  dev_err(udc->dev, "failed to start USB PLL\n");
3187  goto pll_enable_fail;
3188  }
3189 
3190  retval = clk_set_rate(udc->usb_pll_clk, 48000);
3191  if (retval < 0) {
3192  dev_err(udc->dev, "failed to set USB clock rate\n");
3193  goto pll_set_fail;
3194  }
3195 
3197 
3198  /* Enable USB device clock */
3199  retval = clk_enable(udc->usb_slv_clk);
3200  if (retval < 0) {
3201  dev_err(udc->dev, "failed to start USB device clock\n");
3202  goto usb_clk_enable_fail;
3203  }
3204 
3205  /* Enable USB OTG clock */
3206  retval = clk_enable(udc->usb_otg_clk);
3207  if (retval < 0) {
3208  dev_err(udc->dev, "failed to start USB otg clock\n");
3209  goto usb_otg_clk_enable_fail;
3210  }
3211 
3212  /* Setup deferred workqueue data */
3213  udc->poweron = udc->pullup = 0;
3214  INIT_WORK(&udc->pullup_job, pullup_work);
3215  INIT_WORK(&udc->vbus_job, vbus_work);
3216 #ifdef CONFIG_PM
3217  INIT_WORK(&udc->power_job, power_work);
3218 #endif
3219 
3220  /* All clocks are now on */
3221  udc->clocked = 1;
3222 
3223  isp1301_udc_configure(udc);
3224  /* Allocate memory for the UDCA */
3226  &dma_handle,
3227  (GFP_KERNEL | GFP_DMA));
3228  if (!udc->udca_v_base) {
3229  dev_err(udc->dev, "error getting UDCA region\n");
3230  retval = -ENOMEM;
3231  goto i2c_fail;
3232  }
3233  udc->udca_p_base = dma_handle;
3234  dev_dbg(udc->dev, "DMA buffer(0x%x bytes), P:0x%08x, V:0x%p\n",
3235  UDCA_BUFF_SIZE, udc->udca_p_base, udc->udca_v_base);
3236 
3237  /* Setup the DD DMA memory pool */
3238  udc->dd_cache = dma_pool_create("udc_dd", udc->dev,
3239  sizeof(struct lpc32xx_usbd_dd_gad),
3240  sizeof(u32), 0);
3241  if (!udc->dd_cache) {
3242  dev_err(udc->dev, "error getting DD DMA region\n");
3243  retval = -ENOMEM;
3244  goto dma_alloc_fail;
3245  }
3246 
3247  /* Clear USB peripheral and initialize gadget endpoints */
3248  udc_disable(udc);
3249  udc_reinit(udc);
3250 
3251  retval = device_register(&udc->gadget.dev);
3252  if (retval < 0) {
3253  dev_err(udc->dev, "Device registration failure\n");
3254  goto dev_register_fail;
3255  }
3256 
3257  /* Request IRQs - low and high priority USB device IRQs are routed to
3258  * the same handler, while the DMA interrupt is routed elsewhere */
3259  retval = request_irq(udc->udp_irq[IRQ_USB_LP], lpc32xx_usb_lp_irq,
3260  0, "udc_lp", udc);
3261  if (retval < 0) {
3262  dev_err(udc->dev, "LP request irq %d failed\n",
3263  udc->udp_irq[IRQ_USB_LP]);
3264  goto irq_lp_fail;
3265  }
3266  retval = request_irq(udc->udp_irq[IRQ_USB_HP], lpc32xx_usb_hp_irq,
3267  0, "udc_hp", udc);
3268  if (retval < 0) {
3269  dev_err(udc->dev, "HP request irq %d failed\n",
3270  udc->udp_irq[IRQ_USB_HP]);
3271  goto irq_hp_fail;
3272  }
3273 
3274  retval = request_irq(udc->udp_irq[IRQ_USB_DEVDMA],
3275  lpc32xx_usb_devdma_irq, 0, "udc_dma", udc);
3276  if (retval < 0) {
3277  dev_err(udc->dev, "DEV request irq %d failed\n",
3278  udc->udp_irq[IRQ_USB_DEVDMA]);
3279  goto irq_dev_fail;
3280  }
3281 
3282  /* The transceiver interrupt is used for VBUS detection and will
3283  kick off the VBUS handler function */
3284  retval = request_irq(udc->udp_irq[IRQ_USB_ATX], lpc32xx_usb_vbus_irq,
3285  0, "udc_otg", udc);
3286  if (retval < 0) {
3287  dev_err(udc->dev, "VBUS request irq %d failed\n",
3288  udc->udp_irq[IRQ_USB_ATX]);
3289  goto irq_xcvr_fail;
3290  }
3291 
3292  /* Initialize wait queue */
3294  atomic_set(&udc->enabled_ep_cnt, 0);
3295 
3296  /* Keep all IRQs disabled until GadgetFS starts up */
3297  for (i = IRQ_USB_LP; i <= IRQ_USB_ATX; i++)
3298  disable_irq(udc->udp_irq[i]);
3299 
3300  retval = usb_add_gadget_udc(dev, &udc->gadget);
3301  if (retval < 0)
3302  goto add_gadget_fail;
3303 
3304  dev_set_drvdata(dev, udc);
3305  device_init_wakeup(dev, 1);
3306  create_debug_file(udc);
3307 
3308  /* Disable clocks for now */
3309  udc_clk_set(udc, 0);
3310 
3311  dev_info(udc->dev, "%s version %s\n", driver_name, DRIVER_VERSION);
3312  return 0;
3313 
3314 add_gadget_fail:
3315  free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3316 irq_xcvr_fail:
3317  free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3318 irq_dev_fail:
3319  free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3320 irq_hp_fail:
3321  free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3322 irq_lp_fail:
3323  device_unregister(&udc->gadget.dev);
3324 dev_register_fail:
3325  dma_pool_destroy(udc->dd_cache);
3326 dma_alloc_fail:
3328  udc->udca_v_base, udc->udca_p_base);
3329 i2c_fail:
3330  clk_disable(udc->usb_otg_clk);
3331 usb_otg_clk_enable_fail:
3332  clk_disable(udc->usb_slv_clk);
3333 usb_clk_enable_fail:
3334 pll_set_fail:
3335  clk_disable(udc->usb_pll_clk);
3336 pll_enable_fail:
3337  clk_put(udc->usb_slv_clk);
3338 usb_otg_clk_get_fail:
3339  clk_put(udc->usb_otg_clk);
3340 usb_clk_get_fail:
3341  clk_put(udc->usb_pll_clk);
3342 pll_get_fail:
3343  iounmap(udc->udp_baseaddr);
3344 io_map_fail:
3346  dev_err(udc->dev, "%s probe failed, %d\n", driver_name, retval);
3347 request_mem_region_fail:
3348 irq_fail:
3349 resource_fail:
3350 phy_fail:
3351  kfree(udc);
3352  return retval;
3353 }
3354 
3355 static int __devexit lpc32xx_udc_remove(struct platform_device *pdev)
3356 {
3357  struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3358 
3359  usb_del_gadget_udc(&udc->gadget);
3360  if (udc->driver)
3361  return -EBUSY;
3362 
3363  udc_clk_set(udc, 1);
3364  udc_disable(udc);
3365  pullup(udc, 0);
3366 
3367  free_irq(udc->udp_irq[IRQ_USB_ATX], udc);
3368 
3369  device_init_wakeup(&pdev->dev, 0);
3370  remove_debug_file(udc);
3371 
3372  dma_pool_destroy(udc->dd_cache);
3374  udc->udca_v_base, udc->udca_p_base);
3375  free_irq(udc->udp_irq[IRQ_USB_DEVDMA], udc);
3376  free_irq(udc->udp_irq[IRQ_USB_HP], udc);
3377  free_irq(udc->udp_irq[IRQ_USB_LP], udc);
3378 
3379  device_unregister(&udc->gadget.dev);
3380 
3381  clk_disable(udc->usb_otg_clk);
3382  clk_put(udc->usb_otg_clk);
3383  clk_disable(udc->usb_slv_clk);
3384  clk_put(udc->usb_slv_clk);
3385  clk_disable(udc->usb_pll_clk);
3386  clk_put(udc->usb_pll_clk);
3387  iounmap(udc->udp_baseaddr);
3389  kfree(udc);
3390 
3391  return 0;
3392 }
3393 
3394 #ifdef CONFIG_PM
3395 static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
3396 {
3397  struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3398 
3399  if (udc->clocked) {
3400  /* Power down ISP */
3401  udc->poweron = 0;
3402  isp1301_set_powerstate(udc, 0);
3403 
3404  /* Disable clocking */
3405  udc_clk_set(udc, 0);
3406 
3407  /* Keep clock flag on, so we know to re-enable clocks
3408  on resume */
3409  udc->clocked = 1;
3410 
3411  /* Kill global USB clock */
3412  clk_disable(udc->usb_slv_clk);
3413  }
3414 
3415  return 0;
3416 }
3417 
3418 static int lpc32xx_udc_resume(struct platform_device *pdev)
3419 {
3420  struct lpc32xx_udc *udc = platform_get_drvdata(pdev);
3421 
3422  if (udc->clocked) {
3423  /* Enable global USB clock */
3424  clk_enable(udc->usb_slv_clk);
3425 
3426  /* Enable clocking */
3427  udc_clk_set(udc, 1);
3428 
3429  /* ISP back to normal power mode */
3430  udc->poweron = 1;
3431  isp1301_set_powerstate(udc, 1);
3432  }
3433 
3434  return 0;
3435 }
3436 #else
3437 #define lpc32xx_udc_suspend NULL
3438 #define lpc32xx_udc_resume NULL
3439 #endif
3440 
3441 #ifdef CONFIG_OF
3442 static struct of_device_id lpc32xx_udc_of_match[] = {
3443  { .compatible = "nxp,lpc3220-udc", },
3444  { },
3445 };
3446 MODULE_DEVICE_TABLE(of, lpc32xx_udc_of_match);
3447 #endif
3448 
3449 static struct platform_driver lpc32xx_udc_driver = {
3450  .remove = __devexit_p(lpc32xx_udc_remove),
3451  .shutdown = lpc32xx_udc_shutdown,
3452  .suspend = lpc32xx_udc_suspend,
3453  .resume = lpc32xx_udc_resume,
3454  .driver = {
3455  .name = (char *) driver_name,
3456  .owner = THIS_MODULE,
3457  .of_match_table = of_match_ptr(lpc32xx_udc_of_match),
3458  },
3459 };
3460 
3461 static int __init udc_init_module(void)
3462 {
3463  return platform_driver_probe(&lpc32xx_udc_driver, lpc32xx_udc_probe);
3464 }
3465 module_init(udc_init_module);
3466 
3467 static void __exit udc_exit_module(void)
3468 {
3469  platform_driver_unregister(&lpc32xx_udc_driver);
3470 }
3471 module_exit(udc_exit_module);
3472 
3473 MODULE_DESCRIPTION("LPC32XX udc driver");
3474 MODULE_AUTHOR("Kevin Wells <[email protected]>");
3475 MODULE_AUTHOR("Roland Stigge <[email protected]>");
3476 MODULE_LICENSE("GPL");
3477 MODULE_ALIAS("platform:lpc32xx_udc");