54 #include <linux/kernel.h>
55 #include <linux/list.h>
57 #include <linux/slab.h>
65 #define DEBUG_LOG_FRAME(imx21, etd, event) \
66 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
68 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
71 static const char hcd_name[] =
"imx21-hcd";
73 static inline struct imx21 *hcd_to_imx21(
struct usb_hcd *
hcd)
75 return (
struct imx21 *)hcd->hcd_priv;
89 static inline void clear_register_bits(
struct imx21 *imx21,
96 static inline void clear_toggle_bit(
struct imx21 *imx21,
u32 offset,
u32 mask)
100 if (
readl(reg) & mask)
104 static inline void set_toggle_bit(
struct imx21 *imx21,
u32 offset,
u32 mask)
108 if (!(
readl(reg) & mask))
112 static void etd_writel(
struct imx21 *imx21,
int etd_num,
int dword,
u32 value)
117 static u32 etd_readl(
struct imx21 *imx21,
int etd_num,
int dword)
122 static inline int wrap_frame(
int counter)
124 return counter & 0xFFFF;
127 static inline int frame_after(
int frame,
int after)
133 static int imx21_hc_get_frame(
struct usb_hcd *
hcd)
135 struct imx21 *imx21 = hcd_to_imx21(hcd);
142 return (addr & 3) != 0;
147 static void nonisoc_urb_completed_for_etd(
149 static void schedule_nonisoc_etd(
struct imx21 *imx21,
struct urb *
urb);
150 static void free_dmem(
struct imx21 *imx21,
struct etd_priv *
etd);
156 static int alloc_etd(
struct imx21 *imx21)
162 if (etd->
alloc == 0) {
165 debug_etd_allocated(imx21);
172 static void disactivate_etd(
struct imx21 *imx21,
int num)
174 int etd_mask = (1 << num);
187 static void reset_etd(
struct imx21 *imx21,
int num)
192 disactivate_etd(imx21, num);
194 for (i = 0; i < 4; i++)
195 etd_writel(imx21, num, i, 0);
202 static void free_etd(
struct imx21 *imx21,
int num)
207 if (num >= USB_NUM_ETD) {
211 if (imx21->
etd[num].alloc == 0) {
212 dev_err(imx21->
dev,
"ETD %d already free!\n", num);
216 debug_etd_freed(imx21);
217 reset_etd(imx21, num);
222 static void setup_etd_dword0(
struct imx21 *imx21,
223 int etd_num,
struct urb *
urb,
u8 dir,
u16 maxpacket)
225 etd_writel(imx21, etd_num, 0,
231 ((
u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] <<
DW0_FORMAT) |
239 static void copy_to_dmem(
248 for (i = 0; i <
count; i++) {
250 word += (*p++ << (byte * 8));
258 if (count && byte != 3)
262 static void activate_etd(
struct imx21 *imx21,
int etd_num,
u8 dir)
264 u32 etd_mask = 1 << etd_num;
294 goto err_bounce_alloc;
334 etd->disactivated_frame = -1;
335 etd->last_int_frame = -1;
336 etd->last_req_frame = -1;
338 for (i = 0; i < 4; i++)
339 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
351 free_dmem(imx21, etd);
352 nonisoc_urb_completed_for_etd(imx21, etd, -
ENOMEM);
359 static int alloc_dmem(
struct imx21 *imx21,
unsigned int size,
360 struct usb_host_endpoint *
ep)
362 unsigned int offset = 0;
366 size += (~size + 1) & 0x3;
369 dev_err(imx21->
dev,
"size=%d > DMEM_SIZE(%d)\n",
375 if ((size + offset) < offset)
377 if ((size + offset) <= tmp->
offset)
392 debug_dmem_allocated(imx21, size);
400 static void activate_queued_etd(
struct imx21 *imx21,
404 int etd_num = etd - &imx21->
etd[0];
406 u8 dir = (etd_readl(imx21, etd_num, 2) >>
DW2_DIRPID) & 0x03;
408 dev_dbg(imx21->
dev,
"activating queued ETD %d now DMEM available\n",
410 etd_writel(imx21, etd_num, 1,
415 activate_etd(imx21, etd_num, dir);
418 static void free_dmem(
struct imx21 *imx21,
struct etd_priv *etd)
431 if (area->
offset == offset) {
432 debug_dmem_freed(imx21, area->
size);
442 "Trying to free unallocated DMEM %d\n", offset);
448 offset = alloc_dmem(imx21, etd->
dmem_size, etd->
ep);
451 activate_queued_etd(imx21, etd, (
u32)offset);
456 static void free_epdmem(
struct imx21 *imx21,
struct usb_host_endpoint *ep)
461 if (area->
ep == ep) {
463 "Active DMEM %d for disabled ep=%p\n",
482 int etd_num = ep_priv->
etd[
i];
487 etd = &imx21->
etd[etd_num];
488 ep_priv->
etd[
i] = -1;
490 free_dmem(imx21, etd);
493 free_etd(imx21, etd_num);
498 "assigning idle etd %d for queued request\n", etd_num);
500 struct ep_priv,
queue);
502 reset_etd(imx21, etd_num);
504 ep_priv->
etd[
i] = etd_num;
506 if (list_empty(&ep_priv->
ep->urb_list)) {
507 dev_err(imx21->
dev,
"No urb for queued ep!\n");
511 &ep_priv->
ep->urb_list,
struct urb,
urb_list));
515 static void urb_done(
struct usb_hcd *hcd,
struct urb *urb,
int status)
519 struct imx21 *imx21 = hcd_to_imx21(hcd);
520 struct ep_priv *ep_priv = urb->
ep->hcpriv;
521 struct urb_priv *urb_priv = urb->hcpriv;
523 debug_urb_completed(imx21, urb,
status);
530 spin_unlock(&imx21->
lock);
532 spin_lock(&imx21->
lock);
533 if (list_empty(&ep_priv->
ep->urb_list))
534 ep_idle(imx21, ep_priv);
537 static void nonisoc_urb_completed_for_etd(
540 struct usb_host_endpoint *ep = etd->
ep;
542 urb_done(imx21->
hcd, etd->
urb, status);
545 if (!list_empty(&ep->urb_list)) {
547 &ep->urb_list,
struct urb,
urb_list);
550 schedule_nonisoc_etd(imx21, urb);
559 static void schedule_isoc_etds(
struct usb_hcd *hcd,
560 struct usb_host_endpoint *ep)
562 struct imx21 *imx21 = hcd_to_imx21(hcd);
563 struct ep_priv *ep_priv = ep->hcpriv;
565 struct urb_priv *urb_priv;
574 if (list_empty(&ep_priv->
td_list))
577 etd_num = ep_priv->
etd[
i];
581 etd = &imx21->
etd[etd_num];
587 urb_priv = td->
urb->hcpriv;
589 cur_frame = imx21_hc_get_frame(hcd);
590 if (frame_after(cur_frame, td->
frame)) {
591 dev_dbg(imx21->
dev,
"isoc too late frame %d > %d\n",
592 cur_frame, td->
frame);
594 td->
urb->iso_frame_desc[
610 debug_isoc_submitted(imx21, cur_frame, td);
613 setup_etd_dword0(imx21, etd_num, td->
urb, dir, etd->
dmem_size);
615 etd_writel(imx21, etd_num, 2,
618 etd_writel(imx21, etd_num, 3,
622 activate_etd(imx21, etd_num, dir);
626 static void isoc_etd_done(
struct usb_hcd *hcd,
int etd_num)
628 struct imx21 *imx21 = hcd_to_imx21(hcd);
629 int etd_mask = 1 << etd_num;
631 struct urb *urb = etd->
urb;
632 struct urb_priv *urb_priv = urb->hcpriv;
633 struct td *td = etd->
td;
634 struct usb_host_endpoint *ep = etd->
ep;
636 unsigned int pipe = urb->pipe;
637 int dir_in = usb_pipein(pipe);
641 disactivate_etd(imx21, etd_num);
644 bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
655 debug_isoc_completed(imx21,
656 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
660 "bad iso cc=0x%X frame=%d sched frame=%d "
661 "cnt=%d len=%d urb=%p etd=%d index=%d\n",
662 cc, imx21_hc_get_frame(hcd), td->
frame,
663 bytes_xfrd, td->
len, urb, etd_num, isoc_index);
674 urb->actual_length += bytes_xfrd;
675 urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
676 urb->iso_frame_desc[isoc_index].status = cc_to_error[
cc];
685 schedule_isoc_etds(hcd, ep);
688 static struct ep_priv *alloc_isoc_ep(
689 struct imx21 *imx21,
struct usb_host_endpoint *ep)
691 struct ep_priv *ep_priv;
694 ep_priv = kzalloc(
sizeof(
struct ep_priv),
GFP_ATOMIC);
699 ep_priv->
etd[i] = -1;
701 INIT_LIST_HEAD(&ep_priv->
td_list);
703 ep->hcpriv = ep_priv;
707 static int alloc_isoc_etds(
struct imx21 *imx21,
struct ep_priv *ep_priv)
714 if (ep_priv->
etd[i] < 0) {
715 etd_num = alloc_etd(imx21);
717 goto alloc_etd_failed;
719 ep_priv->
etd[
i] = etd_num;
720 imx21->
etd[etd_num].ep = ep_priv->
ep;
726 dev_err(imx21->
dev,
"isoc: Couldn't allocate etd\n");
727 for (j = 0; j <
i; j++) {
728 free_etd(imx21, ep_priv->
etd[j]);
729 ep_priv->
etd[
j] = -1;
734 static int imx21_hc_urb_enqueue_isoc(
struct usb_hcd *hcd,
735 struct usb_host_endpoint *ep,
736 struct urb *urb,
gfp_t mem_flags)
738 struct imx21 *imx21 = hcd_to_imx21(hcd);
739 struct urb_priv *urb_priv;
741 struct ep_priv *ep_priv;
742 struct td *td =
NULL;
748 urb_priv = kzalloc(
sizeof(
struct urb_priv), mem_flags);
749 if (urb_priv ==
NULL)
753 sizeof(
struct td) * urb->number_of_packets, mem_flags);
756 goto alloc_td_failed;
761 if (ep->hcpriv ==
NULL) {
762 ep_priv = alloc_isoc_ep(imx21, ep);
763 if (ep_priv ==
NULL) {
765 goto alloc_ep_failed;
768 ep_priv = ep->hcpriv;
771 ret = alloc_isoc_etds(imx21, ep_priv);
773 goto alloc_etd_failed;
780 urb->actual_length = 0;
781 urb->error_count = 0;
782 urb->hcpriv = urb_priv;
786 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
792 dev_err(imx21->
dev,
"increasing isoc buffer %d->%d\n",
795 goto alloc_dmem_failed;
799 etd->
dmem_offset = alloc_dmem(imx21, maxpacket, ep);
801 dev_dbg(imx21->
dev,
"failed alloc isoc dmem\n");
803 goto alloc_dmem_failed;
810 cur_frame = imx21_hc_get_frame(hcd);
811 if (urb->transfer_flags & URB_ISO_ASAP) {
812 if (list_empty(&ep_priv->
td_list))
813 urb->start_frame = cur_frame + 5;
817 struct td,
list)->frame + urb->interval;
819 urb->start_frame = wrap_frame(urb->start_frame);
820 if (frame_after(cur_frame, urb->start_frame)) {
822 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
823 urb->start_frame, cur_frame,
824 (urb->transfer_flags & URB_ISO_ASAP) != 0);
825 urb->start_frame = wrap_frame(cur_frame + 1);
830 for (i = 0; i < urb->number_of_packets; i++, td++) {
831 unsigned int offset = urb->iso_frame_desc[
i].offset;
834 td->
len = urb->iso_frame_desc[
i].length;
836 td->
frame = wrap_frame(urb->start_frame + urb->interval * i);
843 dev_vdbg(imx21->
dev,
"setup %d packets for iso frame %d->%d\n",
844 urb->number_of_packets, urb->start_frame, td->
frame);
846 debug_urb_submitted(imx21, urb);
847 schedule_isoc_etds(hcd, ep);
849 spin_unlock_irqrestore(&imx21->
lock, flags);
858 spin_unlock_irqrestore(&imx21->
lock, flags);
866 static void dequeue_isoc_urb(
struct imx21 *imx21,
867 struct urb *urb,
struct ep_priv *ep_priv)
869 struct urb_priv *urb_priv = urb->hcpriv;
875 int etd_num = ep_priv->
etd[
i];
876 if (etd_num != -1 && imx21->
etd[etd_num].urb == urb) {
879 reset_etd(imx21, etd_num);
880 free_dmem(imx21, etd);
886 if (td->
urb == urb) {
897 static void schedule_nonisoc_etd(
struct imx21 *imx21,
struct urb *urb)
899 unsigned int pipe = urb->pipe;
900 struct urb_priv *urb_priv = urb->hcpriv;
901 struct ep_priv *ep_priv = urb_priv->
ep->hcpriv;
903 int etd_num = ep_priv->
etd[0];
919 dev_err(imx21->
dev,
"submitting to active ETD %d\n", etd_num);
921 etd = &imx21->
etd[etd_num];
922 maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
929 if (unsuitable_for_dma(urb->setup_dma))
946 if (unsuitable_for_dma(urb->transfer_dma))
951 if (usb_pipebulk(pipe) && (state ==
US_BULK0))
954 count = urb->transfer_buffer_length;
956 if (usb_pipecontrol(pipe)) {
961 usb_pipeendpoint(urb->pipe),
962 usb_pipeout(urb->pipe)))
970 etd->
ep = urb_priv->
ep;
973 if (usb_pipeint(pipe)) {
974 interval = urb->interval;
979 setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
981 etd_writel(imx21, etd_num, 2,
992 if (count && count < maxpacket)
993 etd_buf_size =
count;
995 etd_buf_size = maxpacket;
997 etd_writel(imx21, etd_num, 3,
1004 etd->
dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1008 etd_writel(imx21, etd_num, 1, (
u32)maxpacket << 16);
1010 dev_dbg(imx21->
dev,
"Queuing etd %d for DMEM\n", etd_num);
1011 debug_urb_queued_for_dmem(imx21, urb);
1016 etd_writel(imx21, etd_num, 1,
1023 dev_vdbg(imx21->
dev,
"Activating etd %d for %d bytes %s\n",
1024 etd_num, count, dir !=
TD_DIR_IN ?
"out" :
"in");
1025 activate_etd(imx21, etd_num, dir);
1029 static void nonisoc_etd_done(
struct usb_hcd *hcd,
int etd_num)
1031 struct imx21 *imx21 = hcd_to_imx21(hcd);
1033 struct urb *urb = etd->
urb;
1034 u32 etd_mask = 1 << etd_num;
1035 struct urb_priv *urb_priv = urb->hcpriv;
1041 disactivate_etd(imx21, etd_num);
1043 dir = (etd_readl(imx21, etd_num, 0) >>
DW0_DIRECT) & 0x3;
1044 cc = (etd_readl(imx21, etd_num, 2) >>
DW2_COMPCODE) & 0xf;
1045 bytes_xfrd = etd->
len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1048 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1049 usb_pipeout(urb->pipe),
1050 (etd_readl(imx21, etd_num, 0) >>
DW0_TOGCRY) & 0x1);
1069 free_dmem(imx21, etd);
1071 urb->error_count = 0;
1072 if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1079 etd_done = (cc_to_error[
cc] != 0);
1081 switch (usb_pipetype(urb->pipe)) {
1083 switch (urb_priv->
state) {
1085 if (urb->transfer_buffer_length > 0)
1091 urb->actual_length += bytes_xfrd;
1099 "Invalid pipe state %d\n", urb_priv->
state);
1106 urb->actual_length += bytes_xfrd;
1108 && (urb->transfer_flags & URB_ZERO_PACKET)
1109 && urb->transfer_buffer_length > 0
1110 && ((urb->transfer_buffer_length %
1111 usb_maxpacket(urb->dev, urb->pipe,
1112 usb_pipeout(urb->pipe))) == 0)) {
1120 case PIPE_INTERRUPT:
1121 urb->actual_length += bytes_xfrd;
1127 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1130 schedule_nonisoc_etd(imx21, urb);
1135 static struct ep_priv *alloc_ep(
void)
1138 struct ep_priv *ep_priv;
1140 ep_priv = kzalloc(
sizeof(
struct ep_priv),
GFP_ATOMIC);
1145 ep_priv->
etd[i] = -1;
1150 static int imx21_hc_urb_enqueue(
struct usb_hcd *hcd,
1151 struct urb *urb,
gfp_t mem_flags)
1153 struct imx21 *imx21 = hcd_to_imx21(hcd);
1154 struct usb_host_endpoint *ep = urb->ep;
1155 struct urb_priv *urb_priv;
1156 struct ep_priv *ep_priv;
1159 unsigned long flags;
1162 "enqueue urb=%p ep=%p len=%d "
1163 "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
1165 urb->transfer_buffer_length,
1166 urb->transfer_buffer, urb->transfer_dma,
1167 urb->setup_packet, urb->setup_dma);
1169 if (usb_pipeisoc(urb->pipe))
1170 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1172 urb_priv = kzalloc(
sizeof(
struct urb_priv), mem_flags);
1178 ep_priv = ep->hcpriv;
1179 if (ep_priv ==
NULL) {
1180 ep_priv = alloc_ep();
1183 goto failed_alloc_ep;
1185 ep->hcpriv = ep_priv;
1194 urb->actual_length = 0;
1195 urb->error_count = 0;
1196 urb->hcpriv = urb_priv;
1199 switch (usb_pipetype(urb->pipe)) {
1208 debug_urb_submitted(imx21, urb);
1209 if (ep_priv->
etd[0] < 0) {
1212 "no ETD available already queued %p\n",
1214 debug_urb_queued_for_etd(imx21, urb);
1217 ep_priv->
etd[0] = alloc_etd(imx21);
1218 if (ep_priv->
etd[0] < 0) {
1220 "no ETD available queueing %p\n", ep_priv);
1221 debug_urb_queued_for_etd(imx21, urb);
1229 etd = &imx21->
etd[ep_priv->
etd[0]];
1232 schedule_nonisoc_etd(imx21, urb);
1236 spin_unlock_irqrestore(&imx21->
lock, flags);
1241 spin_unlock_irqrestore(&imx21->
lock, flags);
1246 static int imx21_hc_urb_dequeue(
struct usb_hcd *hcd,
struct urb *urb,
1249 struct imx21 *imx21 = hcd_to_imx21(hcd);
1250 unsigned long flags;
1251 struct usb_host_endpoint *ep;
1252 struct ep_priv *ep_priv;
1253 struct urb_priv *urb_priv = urb->hcpriv;
1256 dev_vdbg(imx21->
dev,
"dequeue urb=%p iso=%d status=%d\n",
1257 urb, usb_pipeisoc(urb->pipe), status);
1265 ep_priv = ep->hcpriv;
1267 debug_urb_unlinked(imx21, urb);
1269 if (usb_pipeisoc(urb->pipe)) {
1270 dequeue_isoc_urb(imx21, urb, ep_priv);
1271 schedule_isoc_etds(hcd, ep);
1272 }
else if (urb_priv->
active) {
1273 int etd_num = ep_priv->
etd[0];
1274 if (etd_num != -1) {
1277 disactivate_etd(imx21, etd_num);
1278 free_dmem(imx21, etd);
1285 urb_done(hcd, urb, status);
1287 spin_unlock_irqrestore(&imx21->
lock, flags);
1291 spin_unlock_irqrestore(&imx21->
lock, flags);
1299 static void process_etds(
struct usb_hcd *hcd,
struct imx21 *imx21,
int sof)
1302 int enable_sof_int = 0;
1303 unsigned long flags;
1307 for (etd_num = 0; etd_num <
USB_NUM_ETD; etd_num++) {
1308 u32 etd_mask = 1 << etd_num;
1349 dword0 = etd_readl(imx21, etd_num, 0);
1351 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1352 etd_num, dword0 & 0x7F,
1358 "frame: act=%d disact=%d"
1359 " int=%d req=%d cur=%d\n",
1360 etd->activated_frame,
1361 etd->disactivated_frame,
1362 etd->last_int_frame,
1363 etd->last_req_frame,
1365 imx21->debug_unblocks++;
1373 "Interrupt for unexpected etd %d"
1375 etd_num, etd->
ep, etd->
urb);
1376 disactivate_etd(imx21, etd_num);
1380 if (usb_pipeisoc(etd->
urb->pipe))
1381 isoc_etd_done(hcd, etd_num);
1383 nonisoc_etd_done(hcd, etd_num);
1393 spin_unlock_irqrestore(&imx21->
lock, flags);
1398 struct imx21 *imx21 = hcd_to_imx21(hcd);
1414 static void imx21_hc_endpoint_disable(
struct usb_hcd *hcd,
1415 struct usb_host_endpoint *ep)
1417 struct imx21 *imx21 = hcd_to_imx21(hcd);
1418 unsigned long flags;
1419 struct ep_priv *ep_priv;
1426 ep_priv = ep->hcpriv;
1427 dev_vdbg(imx21->
dev,
"disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1429 if (!list_empty(&ep->urb_list))
1430 dev_dbg(imx21->
dev,
"ep's URB list is not empty\n");
1432 if (ep_priv !=
NULL) {
1434 if (ep_priv->
etd[i] > -1)
1435 dev_dbg(imx21->
dev,
"free etd %d for disable\n",
1438 free_etd(imx21, ep_priv->
etd[i]);
1445 if (imx21->
etd[i].alloc && imx21->
etd[i].ep == ep) {
1447 "Active etd %d for disabled ep=%p!\n", i, ep);
1451 free_epdmem(imx21, ep);
1452 spin_unlock_irqrestore(&imx21->
lock, flags);
1459 static int get_hub_descriptor(
struct usb_hcd *hcd,
1462 struct imx21 *imx21 = hcd_to_imx21(hcd);
1480 static int imx21_hc_hub_status_data(
struct usb_hcd *hcd,
char *
buf)
1482 struct imx21 *imx21 = hcd_to_imx21(hcd);
1486 unsigned long flags;
1493 dev_err(imx21->
dev,
"ports %d > 7\n", ports);
1495 for (i = 0; i < ports; i++) {
1504 buf[0] |= 1 << (i + 1);
1507 spin_unlock_irqrestore(&imx21->
lock, flags);
1514 static int imx21_hc_hub_control(
struct usb_hcd *hcd,
1518 struct imx21 *imx21 = hcd_to_imx21(hcd);
1520 u32 status_write = 0;
1523 case ClearHubFeature:
1539 case ClearPortFeature:
1582 case GetHubDescriptor:
1584 rc = get_hub_descriptor(hcd, (
void *)buf);
1593 dev_dbg(imx21->
dev,
"GetPortStatus: port: %d, 0x%x\n",
1617 case SetPortFeature:
1654 static int imx21_hc_reset(
struct usb_hcd *hcd)
1656 struct imx21 *imx21 = hcd_to_imx21(hcd);
1658 unsigned long flags;
1671 spin_unlock_irqrestore(&imx21->
lock, flags);
1672 dev_err(imx21->
dev,
"timeout waiting for reset\n");
1675 spin_unlock_irq(&imx21->
lock);
1677 spin_lock_irq(&imx21->
lock);
1679 spin_unlock_irqrestore(&imx21->
lock, flags);
1683 static int __devinit imx21_hc_start(
struct usb_hcd *hcd)
1685 struct imx21 *imx21 = hcd_to_imx21(hcd);
1686 unsigned long flags;
1689 u32 usb_control = 0;
1696 if (imx21->
pdata->host1_txenoe)
1699 if (!imx21->
pdata->host1_xcverless)
1702 if (imx21->
pdata->otg_ext_xcvr)
1717 for (j = 0; j < 4; j++)
1718 etd_writel(imx21, i, j, 0);
1725 if (imx21->
pdata->enable_otg_host)
1729 if (imx21->
pdata->enable_host1)
1733 if (imx21->
pdata->enable_host2)
1738 hcd->state = HC_STATE_RUNNING;
1742 USBH_SYSIEN_HERRINT |
1746 spin_unlock_irqrestore(&imx21->
lock, flags);
1751 static void imx21_hc_stop(
struct usb_hcd *hcd)
1753 struct imx21 *imx21 = hcd_to_imx21(hcd);
1754 unsigned long flags;
1762 spin_unlock_irqrestore(&imx21->
lock, flags);
1769 static struct hc_driver imx21_hc_driver = {
1770 .description = hcd_name,
1771 .product_desc =
"IMX21 USB Host Controller",
1772 .hcd_priv_size =
sizeof(
struct imx21),
1777 .reset = imx21_hc_reset,
1778 .start = imx21_hc_start,
1779 .stop = imx21_hc_stop,
1782 .urb_enqueue = imx21_hc_urb_enqueue,
1783 .urb_dequeue = imx21_hc_urb_dequeue,
1784 .endpoint_disable = imx21_hc_endpoint_disable,
1787 .get_frame_number = imx21_hc_get_frame,
1790 .hub_status_data = imx21_hc_hub_status_data,
1791 .hub_control = imx21_hc_hub_control,
1800 .enable_otg_host = 1,
1806 struct usb_hcd *hcd = platform_get_drvdata(pdev);
1807 struct imx21 *imx21 = hcd_to_imx21(hcd);
1814 clk_disable_unprepare(imx21->
clk);
1827 struct usb_hcd *hcd;
1828 struct imx21 *imx21;
1843 &pdev->
dev, dev_name(&pdev->
dev));
1845 dev_err(&pdev->
dev,
"Cannot create hcd (%s)\n",
1846 dev_name(&pdev->
dev));
1850 imx21 = hcd_to_imx21(hcd);
1853 imx21->
pdata = pdev->
dev.platform_data;
1866 goto failed_request_mem;
1871 dev_err(imx21->
dev,
"Cannot map registers\n");
1873 goto failed_ioremap;
1878 if (IS_ERR(imx21->
clk)) {
1880 ret = PTR_ERR(imx21->
clk);
1881 goto failed_clock_get;
1886 goto failed_clock_set;
1887 ret = clk_prepare_enable(imx21->
clk);
1889 goto failed_clock_enable;
1891 dev_info(imx21->
dev,
"Hardware HC revision: 0x%02X\n",
1896 dev_err(imx21->
dev,
"usb_add_hcd() returned %d\n", ret);
1897 goto failed_add_hcd;
1903 clk_disable_unprepare(imx21->
clk);
1904 failed_clock_enable:
1919 .name = (
char *)hcd_name,
1921 .
probe = imx21_probe,
1922 .
remove = imx21_remove,