Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fifo.c
Go to the documentation of this file.
1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <[email protected]>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "common.h"
21 #include "pipe.h"
22 
23 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f)
27 
28 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
29 
30 /*
31  * packet initialize
32  */
33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
34 {
35  pkt->dma = DMA_ADDR_INVALID;
36  INIT_LIST_HEAD(&pkt->node);
37 }
38 
39 /*
40  * packet control function
41  */
42 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
43 {
44  struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
45  struct device *dev = usbhs_priv_to_dev(priv);
46 
47  dev_err(dev, "null handler\n");
48 
49  return -EINVAL;
50 }
51 
52 static struct usbhs_pkt_handle usbhsf_null_handler = {
53  .prepare = usbhsf_null_handle,
54  .try_run = usbhsf_null_handle,
55 };
56 
57 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
58  void (*done)(struct usbhs_priv *priv,
59  struct usbhs_pkt *pkt),
60  void *buf, int len, int zero, int sequence)
61 {
62  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
63  struct device *dev = usbhs_priv_to_dev(priv);
64  unsigned long flags;
65 
66  if (!done) {
67  dev_err(dev, "no done function\n");
68  return;
69  }
70 
71  /******************** spin lock ********************/
72  usbhs_lock(priv, flags);
73 
74  if (!pipe->handler) {
75  dev_err(dev, "no handler function\n");
76  pipe->handler = &usbhsf_null_handler;
77  }
78 
79  list_move_tail(&pkt->node, &pipe->list);
80 
81  /*
82  * each pkt must hold own handler.
83  * because handler might be changed by its situation.
84  * dma handler -> pio handler.
85  */
86  pkt->pipe = pipe;
87  pkt->buf = buf;
88  pkt->handler = pipe->handler;
89  pkt->length = len;
90  pkt->zero = zero;
91  pkt->actual = 0;
92  pkt->done = done;
93  pkt->sequence = sequence;
94 
95  usbhs_unlock(priv, flags);
96  /******************** spin unlock ******************/
97 }
98 
99 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
100 {
101  list_del_init(&pkt->node);
102 }
103 
104 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
105 {
106  if (list_empty(&pipe->list))
107  return NULL;
108 
109  return list_first_entry(&pipe->list, struct usbhs_pkt, node);
110 }
111 
112 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
113 {
114  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
115  unsigned long flags;
116 
117  /******************** spin lock ********************/
118  usbhs_lock(priv, flags);
119 
120  if (!pkt)
121  pkt = __usbhsf_pkt_get(pipe);
122 
123  if (pkt)
124  __usbhsf_pkt_del(pkt);
125 
126  usbhs_unlock(priv, flags);
127  /******************** spin unlock ******************/
128 
129  return pkt;
130 }
131 
132 enum {
136 };
137 
138 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
139 {
140  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
141  struct usbhs_pkt *pkt;
142  struct device *dev = usbhs_priv_to_dev(priv);
143  int (*func)(struct usbhs_pkt *pkt, int *is_done);
144  unsigned long flags;
145  int ret = 0;
146  int is_done = 0;
147 
148  /******************** spin lock ********************/
149  usbhs_lock(priv, flags);
150 
151  pkt = __usbhsf_pkt_get(pipe);
152  if (!pkt)
153  goto __usbhs_pkt_handler_end;
154 
155  switch (type) {
156  case USBHSF_PKT_PREPARE:
157  func = pkt->handler->prepare;
158  break;
159  case USBHSF_PKT_TRY_RUN:
160  func = pkt->handler->try_run;
161  break;
162  case USBHSF_PKT_DMA_DONE:
163  func = pkt->handler->dma_done;
164  break;
165  default:
166  dev_err(dev, "unknown pkt hander\n");
167  goto __usbhs_pkt_handler_end;
168  }
169 
170  ret = func(pkt, &is_done);
171 
172  if (is_done)
173  __usbhsf_pkt_del(pkt);
174 
175 __usbhs_pkt_handler_end:
176  usbhs_unlock(priv, flags);
177  /******************** spin unlock ******************/
178 
179  if (is_done) {
180  pkt->done(priv, pkt);
181  usbhs_pkt_start(pipe);
182  }
183 
184  return ret;
185 }
186 
187 void usbhs_pkt_start(struct usbhs_pipe *pipe)
188 {
189  usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
190 }
191 
192 /*
193  * irq enable/disable function
194  */
195 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e)
196 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e)
197 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
198  ({ \
199  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
200  struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
201  u16 status = (1 << usbhs_pipe_number(pipe)); \
202  if (!mod) \
203  return; \
204  if (enable) \
205  mod->irq_##status |= status; \
206  else \
207  mod->irq_##status &= ~status; \
208  usbhs_irq_callback_update(priv, mod); \
209  })
210 
211 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
212 {
213  /*
214  * And DCP pipe can NOT use "ready interrupt" for "send"
215  * it should use "empty" interrupt.
216  * see
217  * "Operation" - "Interrupt Function" - "BRDY Interrupt"
218  *
219  * on the other hand, normal pipe can use "ready interrupt" for "send"
220  * even though it is single/double buffer
221  */
222  if (usbhs_pipe_is_dcp(pipe))
223  usbhsf_irq_empty_ctrl(pipe, enable);
224  else
225  usbhsf_irq_ready_ctrl(pipe, enable);
226 }
227 
228 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
229 {
230  usbhsf_irq_ready_ctrl(pipe, enable);
231 }
232 
233 /*
234  * FIFO ctrl
235  */
236 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
237  struct usbhs_fifo *fifo)
238 {
239  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
240 
241  usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
242 }
243 
244 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
245  struct usbhs_fifo *fifo)
246 {
247  int timeout = 1024;
248 
249  do {
250  /* The FIFO port is accessible */
251  if (usbhs_read(priv, fifo->ctr) & FRDY)
252  return 0;
253 
254  udelay(10);
255  } while (timeout--);
256 
257  return -EBUSY;
258 }
259 
260 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
261  struct usbhs_fifo *fifo)
262 {
263  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
264 
265  if (!usbhs_pipe_is_dcp(pipe))
266  usbhsf_fifo_barrier(priv, fifo);
267 
268  usbhs_write(priv, fifo->ctr, BCLR);
269 }
270 
271 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
272  struct usbhs_fifo *fifo)
273 {
274  return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
275 }
276 
277 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
278  struct usbhs_fifo *fifo)
279 {
280  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
281 
283  usbhs_write(priv, fifo->sel, 0);
284 }
285 
286 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
287  struct usbhs_fifo *fifo,
288  int write)
289 {
290  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
291  struct device *dev = usbhs_priv_to_dev(priv);
292  int timeout = 1024;
293  u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
294  u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
295 
296  if (usbhs_pipe_is_busy(pipe) ||
297  usbhsf_fifo_is_busy(fifo))
298  return -EBUSY;
299 
300  if (usbhs_pipe_is_dcp(pipe)) {
301  base |= (1 == write) << 5; /* ISEL */
302 
303  if (usbhs_mod_is_host(priv))
304  usbhs_dcp_dir_for_host(pipe, write);
305  }
306 
307  /* "base" will be used below */
308  if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
309  usbhs_write(priv, fifo->sel, base);
310  else
311  usbhs_write(priv, fifo->sel, base | MBW_32);
312 
313  /* check ISEL and CURPIPE value */
314  while (timeout--) {
315  if (base == (mask & usbhs_read(priv, fifo->sel))) {
316  usbhs_pipe_select_fifo(pipe, fifo);
317  return 0;
318  }
319  udelay(10);
320  }
321 
322  dev_err(dev, "fifo select error\n");
323 
324  return -EIO;
325 }
326 
327 /*
328  * DCP status stage
329  */
330 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
331 {
332  struct usbhs_pipe *pipe = pkt->pipe;
333  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
334  struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
335  struct device *dev = usbhs_priv_to_dev(priv);
336  int ret;
337 
338  usbhs_pipe_disable(pipe);
339 
340  ret = usbhsf_fifo_select(pipe, fifo, 1);
341  if (ret < 0) {
342  dev_err(dev, "%s() faile\n", __func__);
343  return ret;
344  }
345 
346  usbhs_pipe_sequence_data1(pipe); /* DATA1 */
347 
348  usbhsf_fifo_clear(pipe, fifo);
349  usbhsf_send_terminator(pipe, fifo);
350 
351  usbhsf_fifo_unselect(pipe, fifo);
352 
353  usbhsf_tx_irq_ctrl(pipe, 1);
354  usbhs_pipe_enable(pipe);
355 
356  return ret;
357 }
358 
359 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
360 {
361  struct usbhs_pipe *pipe = pkt->pipe;
362  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
363  struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
364  struct device *dev = usbhs_priv_to_dev(priv);
365  int ret;
366 
367  usbhs_pipe_disable(pipe);
368 
369  ret = usbhsf_fifo_select(pipe, fifo, 0);
370  if (ret < 0) {
371  dev_err(dev, "%s() fail\n", __func__);
372  return ret;
373  }
374 
375  usbhs_pipe_sequence_data1(pipe); /* DATA1 */
376  usbhsf_fifo_clear(pipe, fifo);
377 
378  usbhsf_fifo_unselect(pipe, fifo);
379 
380  usbhsf_rx_irq_ctrl(pipe, 1);
381  usbhs_pipe_enable(pipe);
382 
383  return ret;
384 
385 }
386 
387 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
388 {
389  struct usbhs_pipe *pipe = pkt->pipe;
390 
392  usbhsf_tx_irq_ctrl(pipe, 0);
393  else
394  usbhsf_rx_irq_ctrl(pipe, 0);
395 
396  pkt->actual = pkt->length;
397  *is_done = 1;
398 
399  return 0;
400 }
401 
403  .prepare = usbhs_dcp_dir_switch_to_write,
404  .try_run = usbhs_dcp_dir_switch_done,
405 };
406 
408  .prepare = usbhs_dcp_dir_switch_to_read,
409  .try_run = usbhs_dcp_dir_switch_done,
410 };
411 
412 /*
413  * DCP data stage (push)
414  */
415 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
416 {
417  struct usbhs_pipe *pipe = pkt->pipe;
418 
419  usbhs_pipe_sequence_data1(pipe); /* DATA1 */
420 
421  /*
422  * change handler to PIO push
423  */
425 
426  return pkt->handler->prepare(pkt, is_done);
427 }
428 
430  .prepare = usbhsf_dcp_data_stage_try_push,
431 };
432 
433 /*
434  * DCP data stage (pop)
435  */
436 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
437  int *is_done)
438 {
439  struct usbhs_pipe *pipe = pkt->pipe;
440  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
441  struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
442 
443  if (usbhs_pipe_is_busy(pipe))
444  return 0;
445 
446  /*
447  * prepare pop for DCP should
448  * - change DCP direction,
449  * - clear fifo
450  * - DATA1
451  */
452  usbhs_pipe_disable(pipe);
453 
454  usbhs_pipe_sequence_data1(pipe); /* DATA1 */
455 
456  usbhsf_fifo_select(pipe, fifo, 0);
457  usbhsf_fifo_clear(pipe, fifo);
458  usbhsf_fifo_unselect(pipe, fifo);
459 
460  /*
461  * change handler to PIO pop
462  */
464 
465  return pkt->handler->prepare(pkt, is_done);
466 }
467 
469  .prepare = usbhsf_dcp_data_stage_prepare_pop,
470 };
471 
472 /*
473  * PIO push handler
474  */
475 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
476 {
477  struct usbhs_pipe *pipe = pkt->pipe;
478  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
479  struct device *dev = usbhs_priv_to_dev(priv);
480  struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
481  void __iomem *addr = priv->base + fifo->port;
482  u8 *buf;
483  int maxp = usbhs_pipe_get_maxpacket(pipe);
484  int total_len;
485  int i, ret, len;
486  int is_short;
487 
489  pkt->sequence = -1; /* -1 sequence will be ignored */
490 
491  ret = usbhsf_fifo_select(pipe, fifo, 1);
492  if (ret < 0)
493  return 0;
494 
495  ret = usbhs_pipe_is_accessible(pipe);
496  if (ret < 0) {
497  /* inaccessible pipe is not an error */
498  ret = 0;
499  goto usbhs_fifo_write_busy;
500  }
501 
502  ret = usbhsf_fifo_barrier(priv, fifo);
503  if (ret < 0)
504  goto usbhs_fifo_write_busy;
505 
506  buf = pkt->buf + pkt->actual;
507  len = pkt->length - pkt->actual;
508  len = min(len, maxp);
509  total_len = len;
510  is_short = total_len < maxp;
511 
512  /*
513  * FIXME
514  *
515  * 32-bit access only
516  */
517  if (len >= 4 && !((unsigned long)buf & 0x03)) {
518  iowrite32_rep(addr, buf, len / 4);
519  len %= 4;
520  buf += total_len - len;
521  }
522 
523  /* the rest operation */
524  for (i = 0; i < len; i++)
525  iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
526 
527  /*
528  * variable update
529  */
530  pkt->actual += total_len;
531 
532  if (pkt->actual < pkt->length)
533  *is_done = 0; /* there are remainder data */
534  else if (is_short)
535  *is_done = 1; /* short packet */
536  else
537  *is_done = !pkt->zero; /* send zero packet ? */
538 
539  /*
540  * pipe/irq handling
541  */
542  if (is_short)
543  usbhsf_send_terminator(pipe, fifo);
544 
545  usbhsf_tx_irq_ctrl(pipe, !*is_done);
546  usbhs_pipe_enable(pipe);
547 
548  dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
549  usbhs_pipe_number(pipe),
550  pkt->length, pkt->actual, *is_done, pkt->zero);
551 
552  /*
553  * Transmission end
554  */
555  if (*is_done) {
556  if (usbhs_pipe_is_dcp(pipe))
558  }
559 
560  usbhsf_fifo_unselect(pipe, fifo);
561 
562  return 0;
563 
564 usbhs_fifo_write_busy:
565  usbhsf_fifo_unselect(pipe, fifo);
566 
567  /*
568  * pipe is busy.
569  * retry in interrupt
570  */
571  usbhsf_tx_irq_ctrl(pipe, 1);
572 
573  return ret;
574 }
575 
577  .prepare = usbhsf_pio_try_push,
578  .try_run = usbhsf_pio_try_push,
579 };
580 
581 /*
582  * PIO pop handler
583  */
584 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
585 {
586  struct usbhs_pipe *pipe = pkt->pipe;
587 
588  if (usbhs_pipe_is_busy(pipe))
589  return 0;
590 
591  /*
592  * pipe enable to prepare packet receive
593  */
595  pkt->sequence = -1; /* -1 sequence will be ignored */
596 
597  usbhs_pipe_enable(pipe);
598  usbhsf_rx_irq_ctrl(pipe, 1);
599 
600  return 0;
601 }
602 
603 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
604 {
605  struct usbhs_pipe *pipe = pkt->pipe;
606  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
607  struct device *dev = usbhs_priv_to_dev(priv);
608  struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
609  void __iomem *addr = priv->base + fifo->port;
610  u8 *buf;
611  u32 data = 0;
612  int maxp = usbhs_pipe_get_maxpacket(pipe);
613  int rcv_len, len;
614  int i, ret;
615  int total_len = 0;
616 
617  ret = usbhsf_fifo_select(pipe, fifo, 0);
618  if (ret < 0)
619  return 0;
620 
621  ret = usbhsf_fifo_barrier(priv, fifo);
622  if (ret < 0)
623  goto usbhs_fifo_read_busy;
624 
625  rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
626 
627  buf = pkt->buf + pkt->actual;
628  len = pkt->length - pkt->actual;
629  len = min(len, rcv_len);
630  total_len = len;
631 
632  /*
633  * update actual length first here to decide disable pipe.
634  * if this pipe keeps BUF status and all data were popped,
635  * then, next interrupt/token will be issued again
636  */
637  pkt->actual += total_len;
638 
639  if ((pkt->actual == pkt->length) || /* receive all data */
640  (total_len < maxp)) { /* short packet */
641  *is_done = 1;
642  usbhsf_rx_irq_ctrl(pipe, 0);
643  usbhs_pipe_disable(pipe); /* disable pipe first */
644  }
645 
646  /*
647  * Buffer clear if Zero-Length packet
648  *
649  * see
650  * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
651  */
652  if (0 == rcv_len) {
653  pkt->zero = 1;
654  usbhsf_fifo_clear(pipe, fifo);
655  goto usbhs_fifo_read_end;
656  }
657 
658  /*
659  * FIXME
660  *
661  * 32-bit access only
662  */
663  if (len >= 4 && !((unsigned long)buf & 0x03)) {
664  ioread32_rep(addr, buf, len / 4);
665  len %= 4;
666  buf += total_len - len;
667  }
668 
669  /* the rest operation */
670  for (i = 0; i < len; i++) {
671  if (!(i & 0x03))
672  data = ioread32(addr);
673 
674  buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
675  }
676 
677 usbhs_fifo_read_end:
678  dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
679  usbhs_pipe_number(pipe),
680  pkt->length, pkt->actual, *is_done, pkt->zero);
681 
682 usbhs_fifo_read_busy:
683  usbhsf_fifo_unselect(pipe, fifo);
684 
685  return ret;
686 }
687 
689  .prepare = usbhsf_prepare_pop,
690  .try_run = usbhsf_pio_try_pop,
691 };
692 
693 /*
694  * DCP ctrol statge handler
695  */
696 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
697 {
699 
700  *is_done = 1;
701 
702  return 0;
703 }
704 
706  .prepare = usbhsf_ctrl_stage_end,
707  .try_run = usbhsf_ctrl_stage_end,
708 };
709 
710 /*
711  * DMA fifo functions
712  */
713 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
714  struct usbhs_pkt *pkt)
715 {
717  return fifo->tx_chan;
718 
719  if (&usbhs_fifo_dma_pop_handler == pkt->handler)
720  return fifo->rx_chan;
721 
722  return NULL;
723 }
724 
725 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
726  struct usbhs_pkt *pkt)
727 {
728  struct usbhs_fifo *fifo;
729 
730  /* DMA :: D0FIFO */
731  fifo = usbhsf_get_d0fifo(priv);
732  if (usbhsf_dma_chan_get(fifo, pkt) &&
733  !usbhsf_fifo_is_busy(fifo))
734  return fifo;
735 
736  /* DMA :: D1FIFO */
737  fifo = usbhsf_get_d1fifo(priv);
738  if (usbhsf_dma_chan_get(fifo, pkt) &&
739  !usbhsf_fifo_is_busy(fifo))
740  return fifo;
741 
742  return NULL;
743 }
744 
745 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
746 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
747 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
748  struct usbhs_fifo *fifo,
749  u16 dreqe)
750 {
751  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
752 
753  usbhs_bset(priv, fifo->sel, DREQE, dreqe);
754 }
755 
756 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
757 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
758 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
759 {
760  struct usbhs_pipe *pipe = pkt->pipe;
761  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
763 
764  return info->dma_map_ctrl(pkt, map);
765 }
766 
767 static void usbhsf_dma_complete(void *arg);
768 static void xfer_work(struct work_struct *work)
769 {
770  struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
771  struct usbhs_pipe *pipe = pkt->pipe;
772  struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
773  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
775  struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
776  struct device *dev = usbhs_priv_to_dev(priv);
778 
780 
781  desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
782  pkt->trans, dir,
784  if (!desc)
785  return;
786 
787  desc->callback = usbhsf_dma_complete;
788  desc->callback_param = pipe;
789 
790  if (dmaengine_submit(desc) < 0) {
791  dev_err(dev, "Failed to submit dma descriptor\n");
792  return;
793  }
794 
795  dev_dbg(dev, " %s %d (%d/ %d)\n",
796  fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
797 
798  usbhs_pipe_enable(pipe);
799  usbhsf_dma_start(pipe, fifo);
800  dma_async_issue_pending(chan);
801 }
802 
803 /*
804  * DMA push handler
805  */
806 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
807 {
808  struct usbhs_pipe *pipe = pkt->pipe;
809  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
810  struct usbhs_fifo *fifo;
811  int len = pkt->length - pkt->actual;
812  int ret;
813 
814  if (usbhs_pipe_is_busy(pipe))
815  return 0;
816 
817  /* use PIO if packet is less than pio_dma_border or pipe is DCP */
818  if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
819  usbhs_pipe_is_dcp(pipe))
820  goto usbhsf_pio_prepare_push;
821 
822  if (len & 0x7) /* 8byte alignment */
823  goto usbhsf_pio_prepare_push;
824 
825  if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
826  goto usbhsf_pio_prepare_push;
827 
828  /* get enable DMA fifo */
829  fifo = usbhsf_get_dma_fifo(priv, pkt);
830  if (!fifo)
831  goto usbhsf_pio_prepare_push;
832 
833  if (usbhsf_dma_map(pkt) < 0)
834  goto usbhsf_pio_prepare_push;
835 
836  ret = usbhsf_fifo_select(pipe, fifo, 0);
837  if (ret < 0)
838  goto usbhsf_pio_prepare_push_unmap;
839 
840  pkt->trans = len;
841 
842  INIT_WORK(&pkt->work, xfer_work);
843  schedule_work(&pkt->work);
844 
845  return 0;
846 
847 usbhsf_pio_prepare_push_unmap:
848  usbhsf_dma_unmap(pkt);
849 usbhsf_pio_prepare_push:
850  /*
851  * change handler to PIO
852  */
854 
855  return pkt->handler->prepare(pkt, is_done);
856 }
857 
858 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
859 {
860  struct usbhs_pipe *pipe = pkt->pipe;
861 
862  pkt->actual = pkt->trans;
863 
864  *is_done = !pkt->zero; /* send zero packet ? */
865 
866  usbhsf_dma_stop(pipe, pipe->fifo);
867  usbhsf_dma_unmap(pkt);
868  usbhsf_fifo_unselect(pipe, pipe->fifo);
869 
870  return 0;
871 }
872 
874  .prepare = usbhsf_dma_prepare_push,
875  .dma_done = usbhsf_dma_push_done,
876 };
877 
878 /*
879  * DMA pop handler
880  */
881 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
882 {
883  struct usbhs_pipe *pipe = pkt->pipe;
884  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
885  struct usbhs_fifo *fifo;
886  int len, ret;
887 
888  if (usbhs_pipe_is_busy(pipe))
889  return 0;
890 
891  if (usbhs_pipe_is_dcp(pipe))
892  goto usbhsf_pio_prepare_pop;
893 
894  /* get enable DMA fifo */
895  fifo = usbhsf_get_dma_fifo(priv, pkt);
896  if (!fifo)
897  goto usbhsf_pio_prepare_pop;
898 
899  if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
900  goto usbhsf_pio_prepare_pop;
901 
902  ret = usbhsf_fifo_select(pipe, fifo, 0);
903  if (ret < 0)
904  goto usbhsf_pio_prepare_pop;
905 
906  /* use PIO if packet is less than pio_dma_border */
907  len = usbhsf_fifo_rcv_len(priv, fifo);
908  len = min(pkt->length - pkt->actual, len);
909  if (len & 0x7) /* 8byte alignment */
910  goto usbhsf_pio_prepare_pop_unselect;
911 
912  if (len < usbhs_get_dparam(priv, pio_dma_border))
913  goto usbhsf_pio_prepare_pop_unselect;
914 
915  ret = usbhsf_fifo_barrier(priv, fifo);
916  if (ret < 0)
917  goto usbhsf_pio_prepare_pop_unselect;
918 
919  if (usbhsf_dma_map(pkt) < 0)
920  goto usbhsf_pio_prepare_pop_unselect;
921 
922  /* DMA */
923 
924  /*
925  * usbhs_fifo_dma_pop_handler :: prepare
926  * enabled irq to come here.
927  * but it is no longer needed for DMA. disable it.
928  */
929  usbhsf_rx_irq_ctrl(pipe, 0);
930 
931  pkt->trans = len;
932 
933  INIT_WORK(&pkt->work, xfer_work);
934  schedule_work(&pkt->work);
935 
936  return 0;
937 
938 usbhsf_pio_prepare_pop_unselect:
939  usbhsf_fifo_unselect(pipe, fifo);
940 usbhsf_pio_prepare_pop:
941 
942  /*
943  * change handler to PIO
944  */
946 
947  return pkt->handler->try_run(pkt, is_done);
948 }
949 
950 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
951 {
952  struct usbhs_pipe *pipe = pkt->pipe;
953  int maxp = usbhs_pipe_get_maxpacket(pipe);
954 
955  usbhsf_dma_stop(pipe, pipe->fifo);
956  usbhsf_dma_unmap(pkt);
957  usbhsf_fifo_unselect(pipe, pipe->fifo);
958 
959  pkt->actual += pkt->trans;
960 
961  if ((pkt->actual == pkt->length) || /* receive all data */
962  (pkt->trans < maxp)) { /* short packet */
963  *is_done = 1;
964  } else {
965  /* re-enable */
966  usbhsf_prepare_pop(pkt, is_done);
967  }
968 
969  return 0;
970 }
971 
973  .prepare = usbhsf_prepare_pop,
974  .try_run = usbhsf_dma_try_pop,
975  .dma_done = usbhsf_dma_pop_done
976 };
977 
978 /*
979  * DMA setting
980  */
981 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
982 {
983  struct sh_dmae_slave *slave = param;
984 
985  /*
986  * FIXME
987  *
988  * usbhs doesn't recognize id = 0 as valid DMA
989  */
990  if (0 == slave->shdma_slave.slave_id)
991  return false;
992 
993  chan->private = slave;
994 
995  return true;
996 }
997 
998 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
999 {
1000  if (fifo->tx_chan)
1002  if (fifo->rx_chan)
1004 
1005  fifo->tx_chan = NULL;
1006  fifo->rx_chan = NULL;
1007 }
1008 
1009 static void usbhsf_dma_init(struct usbhs_priv *priv,
1010  struct usbhs_fifo *fifo)
1011 {
1012  struct device *dev = usbhs_priv_to_dev(priv);
1014 
1015  dma_cap_zero(mask);
1016  dma_cap_set(DMA_SLAVE, mask);
1017  fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1018  &fifo->tx_slave);
1019 
1020  dma_cap_zero(mask);
1021  dma_cap_set(DMA_SLAVE, mask);
1022  fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1023  &fifo->rx_slave);
1024 
1025  if (fifo->tx_chan || fifo->rx_chan)
1026  dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1027  fifo->name,
1028  fifo->tx_chan ? "[TX]" : " ",
1029  fifo->rx_chan ? "[RX]" : " ");
1030 }
1031 
1032 /*
1033  * irq functions
1034  */
1035 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1036  struct usbhs_irq_state *irq_state)
1037 {
1038  struct usbhs_pipe *pipe;
1039  struct device *dev = usbhs_priv_to_dev(priv);
1040  int i, ret;
1041 
1042  if (!irq_state->bempsts) {
1043  dev_err(dev, "debug %s !!\n", __func__);
1044  return -EIO;
1045  }
1046 
1047  dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1048 
1049  /*
1050  * search interrupted "pipe"
1051  * not "uep".
1052  */
1053  usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1054  if (!(irq_state->bempsts & (1 << i)))
1055  continue;
1056 
1057  ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1058  if (ret < 0)
1059  dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1060  }
1061 
1062  return 0;
1063 }
1064 
1065 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1066  struct usbhs_irq_state *irq_state)
1067 {
1068  struct usbhs_pipe *pipe;
1069  struct device *dev = usbhs_priv_to_dev(priv);
1070  int i, ret;
1071 
1072  if (!irq_state->brdysts) {
1073  dev_err(dev, "debug %s !!\n", __func__);
1074  return -EIO;
1075  }
1076 
1077  dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1078 
1079  /*
1080  * search interrupted "pipe"
1081  * not "uep".
1082  */
1083  usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1084  if (!(irq_state->brdysts & (1 << i)))
1085  continue;
1086 
1087  ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1088  if (ret < 0)
1089  dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1090  }
1091 
1092  return 0;
1093 }
1094 
1095 static void usbhsf_dma_complete(void *arg)
1096 {
1097  struct usbhs_pipe *pipe = arg;
1098  struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1099  struct device *dev = usbhs_priv_to_dev(priv);
1100  int ret;
1101 
1102  ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1103  if (ret < 0)
1104  dev_err(dev, "dma_complete run_error %d : %d\n",
1105  usbhs_pipe_number(pipe), ret);
1106 }
1107 
1108 /*
1109  * fifo init
1110  */
1111 void usbhs_fifo_init(struct usbhs_priv *priv)
1112 {
1113  struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1114  struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1115  struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1116  struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1117 
1118  mod->irq_empty = usbhsf_irq_empty;
1119  mod->irq_ready = usbhsf_irq_ready;
1120  mod->irq_bempsts = 0;
1121  mod->irq_brdysts = 0;
1122 
1123  cfifo->pipe = NULL;
1124  cfifo->tx_chan = NULL;
1125  cfifo->rx_chan = NULL;
1126 
1127  d0fifo->pipe = NULL;
1128  d0fifo->tx_chan = NULL;
1129  d0fifo->rx_chan = NULL;
1130 
1131  d1fifo->pipe = NULL;
1132  d1fifo->tx_chan = NULL;
1133  d1fifo->rx_chan = NULL;
1134 
1135  usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
1136  usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
1137 }
1138 
1139 void usbhs_fifo_quit(struct usbhs_priv *priv)
1140 {
1141  struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1142 
1143  mod->irq_empty = NULL;
1144  mod->irq_ready = NULL;
1145  mod->irq_bempsts = 0;
1146  mod->irq_brdysts = 0;
1147 
1148  usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1149  usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1150 }
1151 
1152 int usbhs_fifo_probe(struct usbhs_priv *priv)
1153 {
1154  struct usbhs_fifo *fifo;
1155 
1156  /* CFIFO */
1157  fifo = usbhsf_get_cfifo(priv);
1158  fifo->name = "CFIFO";
1159  fifo->port = CFIFO;
1160  fifo->sel = CFIFOSEL;
1161  fifo->ctr = CFIFOCTR;
1162 
1163  /* D0FIFO */
1164  fifo = usbhsf_get_d0fifo(priv);
1165  fifo->name = "D0FIFO";
1166  fifo->port = D0FIFO;
1167  fifo->sel = D0FIFOSEL;
1168  fifo->ctr = D0FIFOCTR;
1169  fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
1170  fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
1171 
1172  /* D1FIFO */
1173  fifo = usbhsf_get_d1fifo(priv);
1174  fifo->name = "D1FIFO";
1175  fifo->port = D1FIFO;
1176  fifo->sel = D1FIFOSEL;
1177  fifo->ctr = D1FIFOCTR;
1178  fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
1179  fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
1180 
1181  return 0;
1182 }
1183 
1184 void usbhs_fifo_remove(struct usbhs_priv *priv)
1185 {
1186 }