Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma.c
Go to the documentation of this file.
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <[email protected]>
6  * DMA channel linking for 1610 by Samuel Ortiz <[email protected]>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <[email protected]>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <[email protected]>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <[email protected]>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19  * Converted DMA library into DMA platform driver.
20  * - G, Manjunath Kondaiah <[email protected]>
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License version 2 as
24  * published by the Free Software Foundation.
25  *
26  */
27 
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/io.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 
39 #include <plat/cpu.h>
40 #include <plat/dma.h>
41 #include <plat/tc.h>
42 
43 /*
44  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
45  * channels that an instance of the SDMA IP block can support. Used
46  * to size arrays. (The actual maximum on a particular SoC may be less
47  * than this -- for example, OMAP1 SDMA instances only support 17 logical
48  * DMA channels.)
49  */
50 #define MAX_LOGICAL_DMA_CH_COUNT 32
51 
52 #undef DEBUG
53 
54 #ifndef CONFIG_ARCH_OMAP1
57 };
58 
60 #endif
61 
62 #define OMAP_DMA_ACTIVE 0x01
63 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
64 
65 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
66 
67 static struct omap_system_dma_plat_info *p;
68 static struct omap_dma_dev_attr *d;
69 
70 static int enable_1510_mode;
71 static u32 errata;
72 
73 static struct omap_dma_global_context_registers {
74  u32 dma_irqenable_l0;
75  u32 dma_ocp_sysconfig;
76  u32 dma_gcr;
77 } omap_dma_global_context;
78 
79 struct dma_link_info {
82 
83  int q_count;
84  int q_tail;
85  int q_head;
86 
89 
90 };
91 
92 static struct dma_link_info *dma_linked_lch;
93 
94 #ifndef CONFIG_ARCH_OMAP1
95 
96 /* Chain handling macros */
97 #define OMAP_DMA_CHAIN_QINIT(chain_id) \
98  do { \
99  dma_linked_lch[chain_id].q_head = \
100  dma_linked_lch[chain_id].q_tail = \
101  dma_linked_lch[chain_id].q_count = 0; \
102  } while (0)
103 #define OMAP_DMA_CHAIN_QFULL(chain_id) \
104  (dma_linked_lch[chain_id].no_of_lchs_linked == \
105  dma_linked_lch[chain_id].q_count)
106 #define OMAP_DMA_CHAIN_QLAST(chain_id) \
107  do { \
108  ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
109  dma_linked_lch[chain_id].q_count) \
110  } while (0)
111 #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
112  (0 == dma_linked_lch[chain_id].q_count)
113 #define __OMAP_DMA_CHAIN_INCQ(end) \
114  ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
115 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
116  do { \
117  __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
118  dma_linked_lch[chain_id].q_count--; \
119  } while (0)
120 
121 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
122  do { \
123  __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
124  dma_linked_lch[chain_id].q_count++; \
125  } while (0)
126 #endif
127 
128 static int dma_lch_count;
129 static int dma_chan_count;
130 static int omap_dma_reserve_channels;
131 
132 static spinlock_t dma_chan_lock;
133 static struct omap_dma_lch *dma_chan;
134 
135 static inline void disable_lnk(int lch);
136 static void omap_disable_channel_irq(int lch);
137 static inline void omap_enable_channel_irq(int lch);
138 
139 #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
140  __func__);
141 
142 #ifdef CONFIG_ARCH_OMAP15XX
143 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
144 static int omap_dma_in_1510_mode(void)
145 {
146  return enable_1510_mode;
147 }
148 #else
149 #define omap_dma_in_1510_mode() 0
150 #endif
151 
152 #ifdef CONFIG_ARCH_OMAP1
153 static inline int get_gdma_dev(int req)
154 {
155  u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
156  int shift = ((req - 1) % 5) * 6;
157 
158  return ((omap_readl(reg) >> shift) & 0x3f) + 1;
159 }
160 
161 static inline void set_gdma_dev(int req, int dev)
162 {
163  u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
164  int shift = ((req - 1) % 5) * 6;
165  u32 l;
166 
167  l = omap_readl(reg);
168  l &= ~(0x3f << shift);
169  l |= (dev - 1) << shift;
170  omap_writel(l, reg);
171 }
172 #else
173 #define set_gdma_dev(req, dev) do {} while (0)
174 #define omap_readl(reg) 0
175 #define omap_writel(val, reg) do {} while (0)
176 #endif
177 
178 void omap_set_dma_priority(int lch, int dst_port, int priority)
179 {
180  unsigned long reg;
181  u32 l;
182 
183  if (cpu_class_is_omap1()) {
184  switch (dst_port) {
185  case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
186  reg = OMAP_TC_OCPT1_PRIOR;
187  break;
188  case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
189  reg = OMAP_TC_OCPT2_PRIOR;
190  break;
191  case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
192  reg = OMAP_TC_EMIFF_PRIOR;
193  break;
194  case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
195  reg = OMAP_TC_EMIFS_PRIOR;
196  break;
197  default:
198  BUG();
199  return;
200  }
201  l = omap_readl(reg);
202  l &= ~(0xf << 8);
203  l |= (priority & 0xf) << 8;
204  omap_writel(l, reg);
205  }
206 
207  if (cpu_class_is_omap2()) {
208  u32 ccr;
209 
210  ccr = p->dma_read(CCR, lch);
211  if (priority)
212  ccr |= (1 << 6);
213  else
214  ccr &= ~(1 << 6);
215  p->dma_write(ccr, CCR, lch);
216  }
217 }
219 
220 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
221  int frame_count, int sync_mode,
222  int dma_trigger, int src_or_dst_synch)
223 {
224  u32 l;
225 
226  l = p->dma_read(CSDP, lch);
227  l &= ~0x03;
228  l |= data_type;
229  p->dma_write(l, CSDP, lch);
230 
231  if (cpu_class_is_omap1()) {
232  u16 ccr;
233 
234  ccr = p->dma_read(CCR, lch);
235  ccr &= ~(1 << 5);
236  if (sync_mode == OMAP_DMA_SYNC_FRAME)
237  ccr |= 1 << 5;
238  p->dma_write(ccr, CCR, lch);
239 
240  ccr = p->dma_read(CCR2, lch);
241  ccr &= ~(1 << 2);
242  if (sync_mode == OMAP_DMA_SYNC_BLOCK)
243  ccr |= 1 << 2;
244  p->dma_write(ccr, CCR2, lch);
245  }
246 
247  if (cpu_class_is_omap2() && dma_trigger) {
248  u32 val;
249 
250  val = p->dma_read(CCR, lch);
251 
252  /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
253  val &= ~((1 << 23) | (3 << 19) | 0x1f);
254  val |= (dma_trigger & ~0x1f) << 14;
255  val |= dma_trigger & 0x1f;
256 
257  if (sync_mode & OMAP_DMA_SYNC_FRAME)
258  val |= 1 << 5;
259  else
260  val &= ~(1 << 5);
261 
262  if (sync_mode & OMAP_DMA_SYNC_BLOCK)
263  val |= 1 << 18;
264  else
265  val &= ~(1 << 18);
266 
267  if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
268  val &= ~(1 << 24); /* dest synch */
269  val |= (1 << 23); /* Prefetch */
270  } else if (src_or_dst_synch) {
271  val |= 1 << 24; /* source synch */
272  } else {
273  val &= ~(1 << 24); /* dest synch */
274  }
275  p->dma_write(val, CCR, lch);
276  }
277 
278  p->dma_write(elem_count, CEN, lch);
279  p->dma_write(frame_count, CFN, lch);
280 }
282 
284 {
286 
287  if (cpu_class_is_omap1()) {
288  u16 w;
289 
290  w = p->dma_read(CCR2, lch);
291  w &= ~0x03;
292 
293  switch (mode) {
295  w |= 0x01;
296  break;
298  w |= 0x02;
299  break;
300  case OMAP_DMA_COLOR_DIS:
301  break;
302  default:
303  BUG();
304  }
305  p->dma_write(w, CCR2, lch);
306 
307  w = p->dma_read(LCH_CTRL, lch);
308  w &= ~0x0f;
309  /* Default is channel type 2D */
310  if (mode) {
311  p->dma_write(color, COLOR, lch);
312  w |= 1; /* Channel type G */
313  }
314  p->dma_write(w, LCH_CTRL, lch);
315  }
316 
317  if (cpu_class_is_omap2()) {
318  u32 val;
319 
320  val = p->dma_read(CCR, lch);
321  val &= ~((1 << 17) | (1 << 16));
322 
323  switch (mode) {
325  val |= 1 << 16;
326  break;
328  val |= 1 << 17;
329  break;
330  case OMAP_DMA_COLOR_DIS:
331  break;
332  default:
333  BUG();
334  }
335  p->dma_write(val, CCR, lch);
336 
337  color &= 0xffffff;
338  p->dma_write(color, COLOR, lch);
339  }
340 }
342 
344 {
345  if (cpu_class_is_omap2()) {
346  u32 csdp;
347 
348  csdp = p->dma_read(CSDP, lch);
349  csdp &= ~(0x3 << 16);
350  csdp |= (mode << 16);
351  p->dma_write(csdp, CSDP, lch);
352  }
353 }
355 
357 {
358  if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
359  u32 l;
360 
361  l = p->dma_read(LCH_CTRL, lch);
362  l &= ~0x7;
363  l |= mode;
364  p->dma_write(l, LCH_CTRL, lch);
365  }
366 }
368 
369 /* Note that src_port is only for omap1 */
370 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
371  unsigned long src_start,
372  int src_ei, int src_fi)
373 {
374  u32 l;
375 
376  if (cpu_class_is_omap1()) {
377  u16 w;
378 
379  w = p->dma_read(CSDP, lch);
380  w &= ~(0x1f << 2);
381  w |= src_port << 2;
382  p->dma_write(w, CSDP, lch);
383  }
384 
385  l = p->dma_read(CCR, lch);
386  l &= ~(0x03 << 12);
387  l |= src_amode << 12;
388  p->dma_write(l, CCR, lch);
389 
390  p->dma_write(src_start, CSSA, lch);
391 
392  p->dma_write(src_ei, CSEI, lch);
393  p->dma_write(src_fi, CSFI, lch);
394 }
396 
398 {
400  params->elem_count, params->frame_count,
401  params->sync_mode, params->trigger,
402  params->src_or_dst_synch);
403  omap_set_dma_src_params(lch, params->src_port,
404  params->src_amode, params->src_start,
405  params->src_ei, params->src_fi);
406 
407  omap_set_dma_dest_params(lch, params->dst_port,
408  params->dst_amode, params->dst_start,
409  params->dst_ei, params->dst_fi);
410  if (params->read_prio || params->write_prio)
411  omap_dma_set_prio_lch(lch, params->read_prio,
412  params->write_prio);
413 }
415 
416 void omap_set_dma_src_index(int lch, int eidx, int fidx)
417 {
418  if (cpu_class_is_omap2())
419  return;
420 
421  p->dma_write(eidx, CSEI, lch);
422  p->dma_write(fidx, CSFI, lch);
423 }
425 
427 {
428  u32 l;
429 
430  l = p->dma_read(CSDP, lch);
431  l &= ~(1 << 6);
432  if (enable)
433  l |= (1 << 6);
434  p->dma_write(l, CSDP, lch);
435 }
437 
438 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
439 {
440  unsigned int burst = 0;
441  u32 l;
442 
443  l = p->dma_read(CSDP, lch);
444  l &= ~(0x03 << 7);
445 
446  switch (burst_mode) {
448  break;
450  if (cpu_class_is_omap2())
451  burst = 0x1;
452  else
453  burst = 0x2;
454  break;
456  if (cpu_class_is_omap2()) {
457  burst = 0x2;
458  break;
459  }
460  /*
461  * not supported by current hardware on OMAP1
462  * w |= (0x03 << 7);
463  * fall through
464  */
466  if (cpu_class_is_omap2()) {
467  burst = 0x3;
468  break;
469  }
470  /*
471  * OMAP1 don't support burst 16
472  * fall through
473  */
474  default:
475  BUG();
476  }
477 
478  l |= (burst << 7);
479  p->dma_write(l, CSDP, lch);
480 }
482 
483 /* Note that dest_port is only for OMAP1 */
484 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
485  unsigned long dest_start,
486  int dst_ei, int dst_fi)
487 {
488  u32 l;
489 
490  if (cpu_class_is_omap1()) {
491  l = p->dma_read(CSDP, lch);
492  l &= ~(0x1f << 9);
493  l |= dest_port << 9;
494  p->dma_write(l, CSDP, lch);
495  }
496 
497  l = p->dma_read(CCR, lch);
498  l &= ~(0x03 << 14);
499  l |= dest_amode << 14;
500  p->dma_write(l, CCR, lch);
501 
502  p->dma_write(dest_start, CDSA, lch);
503 
504  p->dma_write(dst_ei, CDEI, lch);
505  p->dma_write(dst_fi, CDFI, lch);
506 }
508 
509 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
510 {
511  if (cpu_class_is_omap2())
512  return;
513 
514  p->dma_write(eidx, CDEI, lch);
515  p->dma_write(fidx, CDFI, lch);
516 }
518 
520 {
521  u32 l;
522 
523  l = p->dma_read(CSDP, lch);
524  l &= ~(1 << 13);
525  if (enable)
526  l |= 1 << 13;
527  p->dma_write(l, CSDP, lch);
528 }
530 
531 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
532 {
533  unsigned int burst = 0;
534  u32 l;
535 
536  l = p->dma_read(CSDP, lch);
537  l &= ~(0x03 << 14);
538 
539  switch (burst_mode) {
541  break;
543  if (cpu_class_is_omap2())
544  burst = 0x1;
545  else
546  burst = 0x2;
547  break;
549  if (cpu_class_is_omap2())
550  burst = 0x2;
551  else
552  burst = 0x3;
553  break;
555  if (cpu_class_is_omap2()) {
556  burst = 0x3;
557  break;
558  }
559  /*
560  * OMAP1 don't support burst 16
561  * fall through
562  */
563  default:
564  printk(KERN_ERR "Invalid DMA burst mode\n");
565  BUG();
566  return;
567  }
568  l |= (burst << 14);
569  p->dma_write(l, CSDP, lch);
570 }
572 
573 static inline void omap_enable_channel_irq(int lch)
574 {
575  /* Clear CSR */
576  if (cpu_class_is_omap1())
577  p->dma_read(CSR, lch);
578  else
580 
581  /* Enable some nice interrupts. */
582  p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
583 }
584 
585 static inline void omap_disable_channel_irq(int lch)
586 {
587  /* disable channel interrupts */
588  p->dma_write(0, CICR, lch);
589  /* Clear CSR */
590  if (cpu_class_is_omap1())
591  p->dma_read(CSR, lch);
592  else
594 }
595 
597 {
598  dma_chan[lch].enabled_irqs |= bits;
599 }
601 
603 {
604  dma_chan[lch].enabled_irqs &= ~bits;
605 }
607 
608 static inline void enable_lnk(int lch)
609 {
610  u32 l;
611 
612  l = p->dma_read(CLNK_CTRL, lch);
613 
614  if (cpu_class_is_omap1())
615  l &= ~(1 << 14);
616 
617  /* Set the ENABLE_LNK bits */
618  if (dma_chan[lch].next_lch != -1)
619  l = dma_chan[lch].next_lch | (1 << 15);
620 
621 #ifndef CONFIG_ARCH_OMAP1
622  if (cpu_class_is_omap2())
623  if (dma_chan[lch].next_linked_ch != -1)
624  l = dma_chan[lch].next_linked_ch | (1 << 15);
625 #endif
626 
627  p->dma_write(l, CLNK_CTRL, lch);
628 }
629 
630 static inline void disable_lnk(int lch)
631 {
632  u32 l;
633 
634  l = p->dma_read(CLNK_CTRL, lch);
635 
636  /* Disable interrupts */
637  omap_disable_channel_irq(lch);
638 
639  if (cpu_class_is_omap1()) {
640  /* Set the STOP_LNK bit */
641  l |= 1 << 14;
642  }
643 
644  if (cpu_class_is_omap2()) {
645  /* Clear the ENABLE_LNK bit */
646  l &= ~(1 << 15);
647  }
648 
649  p->dma_write(l, CLNK_CTRL, lch);
650  dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
651 }
652 
653 static inline void omap2_enable_irq_lch(int lch)
654 {
655  u32 val;
656  unsigned long flags;
657 
658  if (!cpu_class_is_omap2())
659  return;
660 
661  spin_lock_irqsave(&dma_chan_lock, flags);
662  /* clear IRQ STATUS */
663  p->dma_write(1 << lch, IRQSTATUS_L0, lch);
664  /* Enable interrupt */
665  val = p->dma_read(IRQENABLE_L0, lch);
666  val |= 1 << lch;
667  p->dma_write(val, IRQENABLE_L0, lch);
668  spin_unlock_irqrestore(&dma_chan_lock, flags);
669 }
670 
671 static inline void omap2_disable_irq_lch(int lch)
672 {
673  u32 val;
674  unsigned long flags;
675 
676  if (!cpu_class_is_omap2())
677  return;
678 
679  spin_lock_irqsave(&dma_chan_lock, flags);
680  /* Disable interrupt */
681  val = p->dma_read(IRQENABLE_L0, lch);
682  val &= ~(1 << lch);
683  p->dma_write(val, IRQENABLE_L0, lch);
684  /* clear IRQ STATUS */
685  p->dma_write(1 << lch, IRQSTATUS_L0, lch);
686  spin_unlock_irqrestore(&dma_chan_lock, flags);
687 }
688 
689 int omap_request_dma(int dev_id, const char *dev_name,
690  void (*callback)(int lch, u16 ch_status, void *data),
691  void *data, int *dma_ch_out)
692 {
693  int ch, free_ch = -1;
694  unsigned long flags;
695  struct omap_dma_lch *chan;
696 
697  spin_lock_irqsave(&dma_chan_lock, flags);
698  for (ch = 0; ch < dma_chan_count; ch++) {
699  if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
700  free_ch = ch;
701  if (dev_id == 0)
702  break;
703  }
704  }
705  if (free_ch == -1) {
706  spin_unlock_irqrestore(&dma_chan_lock, flags);
707  return -EBUSY;
708  }
709  chan = dma_chan + free_ch;
710  chan->dev_id = dev_id;
711 
712  if (p->clear_lch_regs)
713  p->clear_lch_regs(free_ch);
714 
715  if (cpu_class_is_omap2())
716  omap_clear_dma(free_ch);
717 
718  spin_unlock_irqrestore(&dma_chan_lock, flags);
719 
720  chan->dev_name = dev_name;
721  chan->callback = callback;
722  chan->data = data;
723  chan->flags = 0;
724 
725 #ifndef CONFIG_ARCH_OMAP1
726  if (cpu_class_is_omap2()) {
727  chan->chain_id = -1;
728  chan->next_linked_ch = -1;
729  }
730 #endif
731 
733 
734  if (cpu_class_is_omap1())
736  else if (cpu_class_is_omap2())
739 
740  if (cpu_is_omap16xx()) {
741  /* If the sync device is set, configure it dynamically. */
742  if (dev_id != 0) {
743  set_gdma_dev(free_ch + 1, dev_id);
744  dev_id = free_ch + 1;
745  }
746  /*
747  * Disable the 1510 compatibility mode and set the sync device
748  * id.
749  */
750  p->dma_write(dev_id | (1 << 10), CCR, free_ch);
751  } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
752  p->dma_write(dev_id, CCR, free_ch);
753  }
754 
755  if (cpu_class_is_omap2()) {
756  omap_enable_channel_irq(free_ch);
757  omap2_enable_irq_lch(free_ch);
758  }
759 
760  *dma_ch_out = free_ch;
761 
762  return 0;
763 }
765 
766 void omap_free_dma(int lch)
767 {
768  unsigned long flags;
769 
770  if (dma_chan[lch].dev_id == -1) {
771  pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
772  lch);
773  return;
774  }
775 
776  /* Disable interrupt for logical channel */
777  if (cpu_class_is_omap2())
778  omap2_disable_irq_lch(lch);
779 
780  /* Disable all DMA interrupts for the channel. */
781  omap_disable_channel_irq(lch);
782 
783  /* Make sure the DMA transfer is stopped. */
784  p->dma_write(0, CCR, lch);
785 
786  /* Clear registers */
787  if (cpu_class_is_omap2())
788  omap_clear_dma(lch);
789 
790  spin_lock_irqsave(&dma_chan_lock, flags);
791  dma_chan[lch].dev_id = -1;
792  dma_chan[lch].next_lch = -1;
793  dma_chan[lch].callback = NULL;
794  spin_unlock_irqrestore(&dma_chan_lock, flags);
795 }
797 
808 void
809 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
810 {
811  u32 reg;
812 
813  if (!cpu_class_is_omap2()) {
814  printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
815  return;
816  }
817 
818  if (max_fifo_depth == 0)
819  max_fifo_depth = 1;
820  if (arb_rate == 0)
821  arb_rate = 1;
822 
823  reg = 0xff & max_fifo_depth;
824  reg |= (0x3 & tparams) << 12;
825  reg |= (arb_rate & 0xff) << 16;
826 
827  p->dma_write(reg, GCR, 0);
828 }
830 
840 int
841 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
842  unsigned char write_prio)
843 {
844  u32 l;
845 
846  if (unlikely((lch < 0 || lch >= dma_lch_count))) {
847  printk(KERN_ERR "Invalid channel id\n");
848  return -EINVAL;
849  }
850  l = p->dma_read(CCR, lch);
851  l &= ~((1 << 6) | (1 << 26));
853  l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
854  else
855  l |= ((read_prio & 0x1) << 6);
856 
857  p->dma_write(l, CCR, lch);
858 
859  return 0;
860 }
862 
863 /*
864  * Clears any DMA state so the DMA engine is ready to restart with new buffers
865  * through omap_start_dma(). Any buffers in flight are discarded.
866  */
867 void omap_clear_dma(int lch)
868 {
869  unsigned long flags;
870 
871  local_irq_save(flags);
872  p->clear_dma(lch);
873  local_irq_restore(flags);
874 }
876 
877 void omap_start_dma(int lch)
878 {
879  u32 l;
880 
881  /*
882  * The CPC/CDAC register needs to be initialized to zero
883  * before starting dma transfer.
884  */
885  if (cpu_is_omap15xx())
886  p->dma_write(0, CPC, lch);
887  else
888  p->dma_write(0, CDAC, lch);
889 
890  if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
891  int next_lch, cur_lch;
892  char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
893 
894  dma_chan_link_map[lch] = 1;
895  /* Set the link register of the first channel */
896  enable_lnk(lch);
897 
898  memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
899  cur_lch = dma_chan[lch].next_lch;
900  do {
901  next_lch = dma_chan[cur_lch].next_lch;
902 
903  /* The loop case: we've been here already */
904  if (dma_chan_link_map[cur_lch])
905  break;
906  /* Mark the current channel */
907  dma_chan_link_map[cur_lch] = 1;
908 
909  enable_lnk(cur_lch);
910  omap_enable_channel_irq(cur_lch);
911 
912  cur_lch = next_lch;
913  } while (next_lch != -1);
915  p->dma_write(lch, CLNK_CTRL, lch);
916 
917  omap_enable_channel_irq(lch);
918 
919  l = p->dma_read(CCR, lch);
920 
923  l |= OMAP_DMA_CCR_EN;
924 
925  /*
926  * As dma_write() uses IO accessors which are weakly ordered, there
927  * is no guarantee that data in coherent DMA memory will be visible
928  * to the DMA device. Add a memory barrier here to ensure that any
929  * such data is visible prior to enabling DMA.
930  */
931  mb();
932  p->dma_write(l, CCR, lch);
933 
934  dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
935 }
937 
938 void omap_stop_dma(int lch)
939 {
940  u32 l;
941 
942  /* Disable all interrupts on the channel */
943  omap_disable_channel_irq(lch);
944 
945  l = p->dma_read(CCR, lch);
948  int i = 0;
949  u32 sys_cf;
950 
951  /* Configure No-Standby */
952  l = p->dma_read(OCP_SYSCONFIG, lch);
953  sys_cf = l;
956  p->dma_write(l , OCP_SYSCONFIG, 0);
957 
958  l = p->dma_read(CCR, lch);
959  l &= ~OMAP_DMA_CCR_EN;
960  p->dma_write(l, CCR, lch);
961 
962  /* Wait for sDMA FIFO drain */
963  l = p->dma_read(CCR, lch);
964  while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
966  udelay(5);
967  i++;
968  l = p->dma_read(CCR, lch);
969  }
970  if (i >= 100)
971  pr_err("DMA drain did not complete on lch %d\n", lch);
972  /* Restore OCP_SYSCONFIG */
973  p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
974  } else {
975  l &= ~OMAP_DMA_CCR_EN;
976  p->dma_write(l, CCR, lch);
977  }
978 
979  /*
980  * Ensure that data transferred by DMA is visible to any access
981  * after DMA has been disabled. This is important for coherent
982  * DMA regions.
983  */
984  mb();
985 
986  if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
987  int next_lch, cur_lch = lch;
988  char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
989 
990  memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
991  do {
992  /* The loop case: we've been here already */
993  if (dma_chan_link_map[cur_lch])
994  break;
995  /* Mark the current channel */
996  dma_chan_link_map[cur_lch] = 1;
997 
998  disable_lnk(cur_lch);
999 
1000  next_lch = dma_chan[cur_lch].next_lch;
1001  cur_lch = next_lch;
1002  } while (next_lch != -1);
1003  }
1004 
1005  dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1006 }
1008 
1009 /*
1010  * Allows changing the DMA callback function or data. This may be needed if
1011  * the driver shares a single DMA channel for multiple dma triggers.
1012  */
1014  void (*callback)(int lch, u16 ch_status, void *data),
1015  void *data)
1016 {
1017  unsigned long flags;
1018 
1019  if (lch < 0)
1020  return -ENODEV;
1021 
1022  spin_lock_irqsave(&dma_chan_lock, flags);
1023  if (dma_chan[lch].dev_id == -1) {
1024  printk(KERN_ERR "DMA callback for not set for free channel\n");
1025  spin_unlock_irqrestore(&dma_chan_lock, flags);
1026  return -EINVAL;
1027  }
1028  dma_chan[lch].callback = callback;
1029  dma_chan[lch].data = data;
1030  spin_unlock_irqrestore(&dma_chan_lock, flags);
1031 
1032  return 0;
1033 }
1035 
1036 /*
1037  * Returns current physical source address for the given DMA channel.
1038  * If the channel is running the caller must disable interrupts prior calling
1039  * this function and process the returned value before re-enabling interrupt to
1040  * prevent races with the interrupt handler. Note that in continuous mode there
1041  * is a chance for CSSA_L register overflow between the two reads resulting
1042  * in incorrect return value.
1043  */
1045 {
1046  dma_addr_t offset = 0;
1047 
1048  if (cpu_is_omap15xx())
1049  offset = p->dma_read(CPC, lch);
1050  else
1051  offset = p->dma_read(CSAC, lch);
1052 
1053  if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1054  offset = p->dma_read(CSAC, lch);
1055 
1056  if (!cpu_is_omap15xx()) {
1057  /*
1058  * CDAC == 0 indicates that the DMA transfer on the channel has
1059  * not been started (no data has been transferred so far).
1060  * Return the programmed source start address in this case.
1061  */
1062  if (likely(p->dma_read(CDAC, lch)))
1063  offset = p->dma_read(CSAC, lch);
1064  else
1065  offset = p->dma_read(CSSA, lch);
1066  }
1067 
1068  if (cpu_class_is_omap1())
1069  offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1070 
1071  return offset;
1072 }
1074 
1075 /*
1076  * Returns current physical destination address for the given DMA channel.
1077  * If the channel is running the caller must disable interrupts prior calling
1078  * this function and process the returned value before re-enabling interrupt to
1079  * prevent races with the interrupt handler. Note that in continuous mode there
1080  * is a chance for CDSA_L register overflow between the two reads resulting
1081  * in incorrect return value.
1082  */
1084 {
1085  dma_addr_t offset = 0;
1086 
1087  if (cpu_is_omap15xx())
1088  offset = p->dma_read(CPC, lch);
1089  else
1090  offset = p->dma_read(CDAC, lch);
1091 
1092  /*
1093  * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1094  * read before the DMA controller finished disabling the channel.
1095  */
1096  if (!cpu_is_omap15xx() && offset == 0) {
1097  offset = p->dma_read(CDAC, lch);
1098  /*
1099  * CDAC == 0 indicates that the DMA transfer on the channel has
1100  * not been started (no data has been transferred so far).
1101  * Return the programmed destination start address in this case.
1102  */
1103  if (unlikely(!offset))
1104  offset = p->dma_read(CDSA, lch);
1105  }
1106 
1107  if (cpu_class_is_omap1())
1108  offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1109 
1110  return offset;
1111 }
1113 
1115 {
1116  return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1117 }
1119 
1121 {
1122  int lch;
1123 
1124  if (cpu_class_is_omap1())
1125  if (omap_lcd_dma_running())
1126  return 1;
1127 
1128  for (lch = 0; lch < dma_chan_count; lch++)
1129  if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1130  return 1;
1131 
1132  return 0;
1133 }
1134 
1135 /*
1136  * lch_queue DMA will start right after lch_head one is finished.
1137  * For this DMA link to start, you still need to start (see omap_start_dma)
1138  * the first one. That will fire up the entire queue.
1139  */
1140 void omap_dma_link_lch(int lch_head, int lch_queue)
1141 {
1142  if (omap_dma_in_1510_mode()) {
1143  if (lch_head == lch_queue) {
1144  p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1145  CCR, lch_head);
1146  return;
1147  }
1148  printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1149  BUG();
1150  return;
1151  }
1152 
1153  if ((dma_chan[lch_head].dev_id == -1) ||
1154  (dma_chan[lch_queue].dev_id == -1)) {
1155  pr_err("omap_dma: trying to link non requested channels\n");
1156  dump_stack();
1157  }
1158 
1159  dma_chan[lch_head].next_lch = lch_queue;
1160 }
1162 
1163 /*
1164  * Once the DMA queue is stopped, we can destroy it.
1165  */
1166 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1167 {
1168  if (omap_dma_in_1510_mode()) {
1169  if (lch_head == lch_queue) {
1170  p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1171  CCR, lch_head);
1172  return;
1173  }
1174  printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1175  BUG();
1176  return;
1177  }
1178 
1179  if (dma_chan[lch_head].next_lch != lch_queue ||
1180  dma_chan[lch_head].next_lch == -1) {
1181  pr_err("omap_dma: trying to unlink non linked channels\n");
1182  dump_stack();
1183  }
1184 
1185  if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1186  (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1187  pr_err("omap_dma: You need to stop the DMA channels before unlinking\n");
1188  dump_stack();
1189  }
1190 
1191  dma_chan[lch_head].next_lch = -1;
1192 }
1194 
1195 #ifndef CONFIG_ARCH_OMAP1
1196 /* Create chain of DMA channesls */
1197 static void create_dma_lch_chain(int lch_head, int lch_queue)
1198 {
1199  u32 l;
1200 
1201  /* Check if this is the first link in chain */
1202  if (dma_chan[lch_head].next_linked_ch == -1) {
1203  dma_chan[lch_head].next_linked_ch = lch_queue;
1204  dma_chan[lch_head].prev_linked_ch = lch_queue;
1205  dma_chan[lch_queue].next_linked_ch = lch_head;
1206  dma_chan[lch_queue].prev_linked_ch = lch_head;
1207  }
1208 
1209  /* a link exists, link the new channel in circular chain */
1210  else {
1211  dma_chan[lch_queue].next_linked_ch =
1212  dma_chan[lch_head].next_linked_ch;
1213  dma_chan[lch_queue].prev_linked_ch = lch_head;
1214  dma_chan[lch_head].next_linked_ch = lch_queue;
1215  dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1216  lch_queue;
1217  }
1218 
1219  l = p->dma_read(CLNK_CTRL, lch_head);
1220  l &= ~(0x1f);
1221  l |= lch_queue;
1222  p->dma_write(l, CLNK_CTRL, lch_head);
1223 
1224  l = p->dma_read(CLNK_CTRL, lch_queue);
1225  l &= ~(0x1f);
1226  l |= (dma_chan[lch_queue].next_linked_ch);
1227  p->dma_write(l, CLNK_CTRL, lch_queue);
1228 }
1229 
1245 int omap_request_dma_chain(int dev_id, const char *dev_name,
1246  void (*callback) (int lch, u16 ch_status,
1247  void *data),
1248  int *chain_id, int no_of_chans, int chain_mode,
1250 {
1251  int *channels;
1252  int i, err;
1253 
1254  /* Is the chain mode valid ? */
1255  if (chain_mode != OMAP_DMA_STATIC_CHAIN
1256  && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1257  printk(KERN_ERR "Invalid chain mode requested\n");
1258  return -EINVAL;
1259  }
1260 
1261  if (unlikely((no_of_chans < 1
1262  || no_of_chans > dma_lch_count))) {
1263  printk(KERN_ERR "Invalid Number of channels requested\n");
1264  return -EINVAL;
1265  }
1266 
1267  /*
1268  * Allocate a queue to maintain the status of the channels
1269  * in the chain
1270  */
1271  channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1272  if (channels == NULL) {
1273  printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1274  return -ENOMEM;
1275  }
1276 
1277  /* request and reserve DMA channels for the chain */
1278  for (i = 0; i < no_of_chans; i++) {
1279  err = omap_request_dma(dev_id, dev_name,
1280  callback, NULL, &channels[i]);
1281  if (err < 0) {
1282  int j;
1283  for (j = 0; j < i; j++)
1284  omap_free_dma(channels[j]);
1285  kfree(channels);
1286  printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1287  return err;
1288  }
1289  dma_chan[channels[i]].prev_linked_ch = -1;
1290  dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1291 
1292  /*
1293  * Allowing client drivers to set common parameters now,
1294  * so that later only relevant (src_start, dest_start
1295  * and element count) can be set
1296  */
1297  omap_set_dma_params(channels[i], &params);
1298  }
1299 
1300  *chain_id = channels[0];
1301  dma_linked_lch[*chain_id].linked_dmach_q = channels;
1302  dma_linked_lch[*chain_id].chain_mode = chain_mode;
1303  dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1304  dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1305 
1306  for (i = 0; i < no_of_chans; i++)
1307  dma_chan[channels[i]].chain_id = *chain_id;
1308 
1309  /* Reset the Queue pointers */
1310  OMAP_DMA_CHAIN_QINIT(*chain_id);
1311 
1312  /* Set up the chain */
1313  if (no_of_chans == 1)
1314  create_dma_lch_chain(channels[0], channels[0]);
1315  else {
1316  for (i = 0; i < (no_of_chans - 1); i++)
1317  create_dma_lch_chain(channels[i], channels[i + 1]);
1318  }
1319 
1320  return 0;
1321 }
1323 
1336 {
1337  int *channels;
1338  u32 i;
1339 
1340  /* Check for input params */
1341  if (unlikely((chain_id < 0
1342  || chain_id >= dma_lch_count))) {
1343  printk(KERN_ERR "Invalid chain id\n");
1344  return -EINVAL;
1345  }
1346 
1347  /* Check if the chain exists */
1348  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1349  printk(KERN_ERR "Chain doesn't exists\n");
1350  return -EINVAL;
1351  }
1352  channels = dma_linked_lch[chain_id].linked_dmach_q;
1353 
1354  for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1355  /*
1356  * Allowing client drivers to set common parameters now,
1357  * so that later only relevant (src_start, dest_start
1358  * and element count) can be set
1359  */
1360  omap_set_dma_params(channels[i], &params);
1361  }
1362 
1363  return 0;
1364 }
1366 
1376 {
1377  int *channels;
1378  u32 i;
1379 
1380  /* Check for input params */
1381  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1382  printk(KERN_ERR "Invalid chain id\n");
1383  return -EINVAL;
1384  }
1385 
1386  /* Check if the chain exists */
1387  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1388  printk(KERN_ERR "Chain doesn't exists\n");
1389  return -EINVAL;
1390  }
1391 
1392  channels = dma_linked_lch[chain_id].linked_dmach_q;
1393  for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1394  dma_chan[channels[i]].next_linked_ch = -1;
1395  dma_chan[channels[i]].prev_linked_ch = -1;
1396  dma_chan[channels[i]].chain_id = -1;
1397  dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1398  omap_free_dma(channels[i]);
1399  }
1400 
1401  kfree(channels);
1402 
1403  dma_linked_lch[chain_id].linked_dmach_q = NULL;
1404  dma_linked_lch[chain_id].chain_mode = -1;
1405  dma_linked_lch[chain_id].chain_state = -1;
1406 
1407  return (0);
1408 }
1410 
1420 {
1421  /* Check for input params */
1422  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1423  printk(KERN_ERR "Invalid chain id\n");
1424  return -EINVAL;
1425  }
1426 
1427  /* Check if the chain exists */
1428  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1429  printk(KERN_ERR "Chain doesn't exists\n");
1430  return -EINVAL;
1431  }
1432  pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1433  dma_linked_lch[chain_id].q_count);
1434 
1435  if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1436  return OMAP_DMA_CHAIN_INACTIVE;
1437 
1438  return OMAP_DMA_CHAIN_ACTIVE;
1439 }
1441 
1456 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1457  int elem_count, int frame_count, void *callbk_data)
1458 {
1459  int *channels;
1460  u32 l, lch;
1461  int start_dma = 0;
1462 
1463  /*
1464  * if buffer size is less than 1 then there is
1465  * no use of starting the chain
1466  */
1467  if (elem_count < 1) {
1468  printk(KERN_ERR "Invalid buffer size\n");
1469  return -EINVAL;
1470  }
1471 
1472  /* Check for input params */
1473  if (unlikely((chain_id < 0
1474  || chain_id >= dma_lch_count))) {
1475  printk(KERN_ERR "Invalid chain id\n");
1476  return -EINVAL;
1477  }
1478 
1479  /* Check if the chain exists */
1480  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1481  printk(KERN_ERR "Chain doesn't exist\n");
1482  return -EINVAL;
1483  }
1484 
1485  /* Check if all the channels in chain are in use */
1486  if (OMAP_DMA_CHAIN_QFULL(chain_id))
1487  return -EBUSY;
1488 
1489  /* Frame count may be negative in case of indexed transfers */
1490  channels = dma_linked_lch[chain_id].linked_dmach_q;
1491 
1492  /* Get a free channel */
1493  lch = channels[dma_linked_lch[chain_id].q_tail];
1494 
1495  /* Store the callback data */
1496  dma_chan[lch].data = callbk_data;
1497 
1498  /* Increment the q_tail */
1499  OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1500 
1501  /* Set the params to the free channel */
1502  if (src_start != 0)
1503  p->dma_write(src_start, CSSA, lch);
1504  if (dest_start != 0)
1505  p->dma_write(dest_start, CDSA, lch);
1506 
1507  /* Write the buffer size */
1508  p->dma_write(elem_count, CEN, lch);
1509  p->dma_write(frame_count, CFN, lch);
1510 
1511  /*
1512  * If the chain is dynamically linked,
1513  * then we may have to start the chain if its not active
1514  */
1515  if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1516 
1517  /*
1518  * In Dynamic chain, if the chain is not started,
1519  * queue the channel
1520  */
1521  if (dma_linked_lch[chain_id].chain_state ==
1523  /* Enable the link in previous channel */
1524  if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1525  DMA_CH_QUEUED)
1526  enable_lnk(dma_chan[lch].prev_linked_ch);
1527  dma_chan[lch].state = DMA_CH_QUEUED;
1528  }
1529 
1530  /*
1531  * Chain is already started, make sure its active,
1532  * if not then start the chain
1533  */
1534  else {
1535  start_dma = 1;
1536 
1537  if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1538  DMA_CH_STARTED) {
1539  enable_lnk(dma_chan[lch].prev_linked_ch);
1540  dma_chan[lch].state = DMA_CH_QUEUED;
1541  start_dma = 0;
1542  if (0 == ((1 << 7) & p->dma_read(
1543  CCR, dma_chan[lch].prev_linked_ch))) {
1544  disable_lnk(dma_chan[lch].
1545  prev_linked_ch);
1546  pr_debug("\n prev ch is stopped\n");
1547  start_dma = 1;
1548  }
1549  }
1550 
1551  else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1552  == DMA_CH_QUEUED) {
1553  enable_lnk(dma_chan[lch].prev_linked_ch);
1554  dma_chan[lch].state = DMA_CH_QUEUED;
1555  start_dma = 0;
1556  }
1557  omap_enable_channel_irq(lch);
1558 
1559  l = p->dma_read(CCR, lch);
1560 
1561  if ((0 == (l & (1 << 24))))
1562  l &= ~(1 << 25);
1563  else
1564  l |= (1 << 25);
1565  if (start_dma == 1) {
1566  if (0 == (l & (1 << 7))) {
1567  l |= (1 << 7);
1568  dma_chan[lch].state = DMA_CH_STARTED;
1569  pr_debug("starting %d\n", lch);
1570  p->dma_write(l, CCR, lch);
1571  } else
1572  start_dma = 0;
1573  } else {
1574  if (0 == (l & (1 << 7)))
1575  p->dma_write(l, CCR, lch);
1576  }
1577  dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1578  }
1579  }
1580 
1581  return 0;
1582 }
1584 
1594 {
1595  int *channels;
1596  u32 l, i;
1597 
1598  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1599  printk(KERN_ERR "Invalid chain id\n");
1600  return -EINVAL;
1601  }
1602 
1603  channels = dma_linked_lch[chain_id].linked_dmach_q;
1604 
1605  if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1606  printk(KERN_ERR "Chain is already started\n");
1607  return -EBUSY;
1608  }
1609 
1610  if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1611  for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1612  i++) {
1613  enable_lnk(channels[i]);
1614  omap_enable_channel_irq(channels[i]);
1615  }
1616  } else {
1617  omap_enable_channel_irq(channels[0]);
1618  }
1619 
1620  l = p->dma_read(CCR, channels[0]);
1621  l |= (1 << 7);
1622  dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1623  dma_chan[channels[0]].state = DMA_CH_STARTED;
1624 
1625  if ((0 == (l & (1 << 24))))
1626  l &= ~(1 << 25);
1627  else
1628  l |= (1 << 25);
1629  p->dma_write(l, CCR, channels[0]);
1630 
1631  dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1632 
1633  return 0;
1634 }
1636 
1646 {
1647  int *channels;
1648  u32 l, i;
1649  u32 sys_cf = 0;
1650 
1651  /* Check for input params */
1652  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1653  printk(KERN_ERR "Invalid chain id\n");
1654  return -EINVAL;
1655  }
1656 
1657  /* Check if the chain exists */
1658  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1659  printk(KERN_ERR "Chain doesn't exists\n");
1660  return -EINVAL;
1661  }
1662  channels = dma_linked_lch[chain_id].linked_dmach_q;
1663 
1665  sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1666  l = sys_cf;
1667  /* Middle mode reg set no Standby */
1668  l &= ~((1 << 12)|(1 << 13));
1669  p->dma_write(l, OCP_SYSCONFIG, 0);
1670  }
1671 
1672  for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1673 
1674  /* Stop the Channel transmission */
1675  l = p->dma_read(CCR, channels[i]);
1676  l &= ~(1 << 7);
1677  p->dma_write(l, CCR, channels[i]);
1678 
1679  /* Disable the link in all the channels */
1680  disable_lnk(channels[i]);
1681  dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1682 
1683  }
1684  dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1685 
1686  /* Reset the Queue pointers */
1687  OMAP_DMA_CHAIN_QINIT(chain_id);
1688 
1690  p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1691 
1692  return 0;
1693 }
1695 
1696 /* Get the index of the ongoing DMA in chain */
1708 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1709 {
1710  int lch;
1711  int *channels;
1712 
1713  /* Check for input params */
1714  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1715  printk(KERN_ERR "Invalid chain id\n");
1716  return -EINVAL;
1717  }
1718 
1719  /* Check if the chain exists */
1720  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1721  printk(KERN_ERR "Chain doesn't exists\n");
1722  return -EINVAL;
1723  }
1724  if ((!ei) || (!fi))
1725  return -EINVAL;
1726 
1727  channels = dma_linked_lch[chain_id].linked_dmach_q;
1728 
1729  /* Get the current channel */
1730  lch = channels[dma_linked_lch[chain_id].q_head];
1731 
1732  *ei = p->dma_read(CCEN, lch);
1733  *fi = p->dma_read(CCFN, lch);
1734 
1735  return 0;
1736 }
1738 
1749 {
1750  int lch;
1751  int *channels;
1752 
1753  /* Check for input params */
1754  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1755  printk(KERN_ERR "Invalid chain id\n");
1756  return -EINVAL;
1757  }
1758 
1759  /* Check if the chain exists */
1760  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1761  printk(KERN_ERR "Chain doesn't exists\n");
1762  return -EINVAL;
1763  }
1764 
1765  channels = dma_linked_lch[chain_id].linked_dmach_q;
1766 
1767  /* Get the current channel */
1768  lch = channels[dma_linked_lch[chain_id].q_head];
1769 
1770  return p->dma_read(CDAC, lch);
1771 }
1773 
1783 {
1784  int lch;
1785  int *channels;
1786 
1787  /* Check for input params */
1788  if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1789  printk(KERN_ERR "Invalid chain id\n");
1790  return -EINVAL;
1791  }
1792 
1793  /* Check if the chain exists */
1794  if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1795  printk(KERN_ERR "Chain doesn't exists\n");
1796  return -EINVAL;
1797  }
1798 
1799  channels = dma_linked_lch[chain_id].linked_dmach_q;
1800 
1801  /* Get the current channel */
1802  lch = channels[dma_linked_lch[chain_id].q_head];
1803 
1804  return p->dma_read(CSAC, lch);
1805 }
1807 #endif /* ifndef CONFIG_ARCH_OMAP1 */
1808 
1809 /*----------------------------------------------------------------------------*/
1810 
1811 #ifdef CONFIG_ARCH_OMAP1
1812 
1813 static int omap1_dma_handle_ch(int ch)
1814 {
1815  u32 csr;
1816 
1817  if (enable_1510_mode && ch >= 6) {
1818  csr = dma_chan[ch].saved_csr;
1819  dma_chan[ch].saved_csr = 0;
1820  } else
1821  csr = p->dma_read(CSR, ch);
1822  if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1823  dma_chan[ch + 6].saved_csr = csr >> 7;
1824  csr &= 0x7f;
1825  }
1826  if ((csr & 0x3f) == 0)
1827  return 0;
1828  if (unlikely(dma_chan[ch].dev_id == -1)) {
1829  pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
1830  ch, csr);
1831  return 0;
1832  }
1833  if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1834  pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
1835  if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1836  pr_warn("DMA synchronization event drop occurred with device %d\n",
1837  dma_chan[ch].dev_id);
1838  if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1839  dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1840  if (likely(dma_chan[ch].callback != NULL))
1841  dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1842 
1843  return 1;
1844 }
1845 
1846 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1847 {
1848  int ch = ((int) dev_id) - 1;
1849  int handled = 0;
1850 
1851  for (;;) {
1852  int handled_now = 0;
1853 
1854  handled_now += omap1_dma_handle_ch(ch);
1855  if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1856  handled_now += omap1_dma_handle_ch(ch + 6);
1857  if (!handled_now)
1858  break;
1859  handled += handled_now;
1860  }
1861 
1862  return handled ? IRQ_HANDLED : IRQ_NONE;
1863 }
1864 
1865 #else
1866 #define omap1_dma_irq_handler NULL
1867 #endif
1868 
1869 #ifdef CONFIG_ARCH_OMAP2PLUS
1870 
1871 static int omap2_dma_handle_ch(int ch)
1872 {
1873  u32 status = p->dma_read(CSR, ch);
1874 
1875  if (!status) {
1876  if (printk_ratelimit())
1877  pr_warn("Spurious DMA IRQ for lch %d\n", ch);
1878  p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1879  return 0;
1880  }
1881  if (unlikely(dma_chan[ch].dev_id == -1)) {
1882  if (printk_ratelimit())
1883  pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
1884  status, ch);
1885  return 0;
1886  }
1887  if (unlikely(status & OMAP_DMA_DROP_IRQ))
1888  pr_info("DMA synchronization event drop occurred with device %d\n",
1889  dma_chan[ch].dev_id);
1890  if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1891  printk(KERN_INFO "DMA transaction error with device %d\n",
1892  dma_chan[ch].dev_id);
1894  u32 ccr;
1895 
1896  ccr = p->dma_read(CCR, ch);
1897  ccr &= ~OMAP_DMA_CCR_EN;
1898  p->dma_write(ccr, CCR, ch);
1899  dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1900  }
1901  }
1902  if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1903  printk(KERN_INFO "DMA secure error with device %d\n",
1904  dma_chan[ch].dev_id);
1905  if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1906  printk(KERN_INFO "DMA misaligned error with device %d\n",
1907  dma_chan[ch].dev_id);
1908 
1909  p->dma_write(status, CSR, ch);
1910  p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1911  /* read back the register to flush the write */
1912  p->dma_read(IRQSTATUS_L0, ch);
1913 
1914  /* If the ch is not chained then chain_id will be -1 */
1915  if (dma_chan[ch].chain_id != -1) {
1916  int chain_id = dma_chan[ch].chain_id;
1917  dma_chan[ch].state = DMA_CH_NOTSTARTED;
1918  if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1919  dma_chan[dma_chan[ch].next_linked_ch].state =
1921  if (dma_linked_lch[chain_id].chain_mode ==
1923  disable_lnk(ch);
1924 
1925  if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1926  OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1927 
1928  status = p->dma_read(CSR, ch);
1929  p->dma_write(status, CSR, ch);
1930  }
1931 
1932  if (likely(dma_chan[ch].callback != NULL))
1933  dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1934 
1935  return 0;
1936 }
1937 
1938 /* STATUS register count is from 1-32 while our is 0-31 */
1939 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1940 {
1941  u32 val, enable_reg;
1942  int i;
1943 
1944  val = p->dma_read(IRQSTATUS_L0, 0);
1945  if (val == 0) {
1946  if (printk_ratelimit())
1947  printk(KERN_WARNING "Spurious DMA IRQ\n");
1948  return IRQ_HANDLED;
1949  }
1950  enable_reg = p->dma_read(IRQENABLE_L0, 0);
1951  val &= enable_reg; /* Dispatch only relevant interrupts */
1952  for (i = 0; i < dma_lch_count && val != 0; i++) {
1953  if (val & 1)
1954  omap2_dma_handle_ch(i);
1955  val >>= 1;
1956  }
1957 
1958  return IRQ_HANDLED;
1959 }
1960 
1961 static struct irqaction omap24xx_dma_irq = {
1962  .name = "DMA",
1963  .handler = omap2_dma_irq_handler,
1964  .flags = IRQF_DISABLED
1965 };
1966 
1967 #else
1968 static struct irqaction omap24xx_dma_irq;
1969 #endif
1970 
1971 /*----------------------------------------------------------------------------*/
1972 
1974 {
1975  omap_dma_global_context.dma_irqenable_l0 =
1976  p->dma_read(IRQENABLE_L0, 0);
1977  omap_dma_global_context.dma_ocp_sysconfig =
1978  p->dma_read(OCP_SYSCONFIG, 0);
1979  omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1980 }
1981 
1983 {
1984  int ch;
1985 
1986  p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1987  p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1988  OCP_SYSCONFIG, 0);
1989  p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1990  IRQENABLE_L0, 0);
1991 
1993  p->dma_write(0x3 , IRQSTATUS_L0, 0);
1994 
1995  for (ch = 0; ch < dma_chan_count; ch++)
1996  if (dma_chan[ch].dev_id != -1)
1997  omap_clear_dma(ch);
1998 }
1999 
2000 static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2001 {
2002  int ch, ret = 0;
2003  int dma_irq;
2004  char irq_name[4];
2005  int irq_rel;
2006 
2007  p = pdev->dev.platform_data;
2008  if (!p) {
2009  dev_err(&pdev->dev,
2010  "%s: System DMA initialized without platform data\n",
2011  __func__);
2012  return -EINVAL;
2013  }
2014 
2015  d = p->dma_attr;
2016  errata = p->errata;
2017 
2018  if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2019  && (omap_dma_reserve_channels <= dma_lch_count))
2020  d->lch_count = omap_dma_reserve_channels;
2021 
2022  dma_lch_count = d->lch_count;
2023  dma_chan_count = dma_lch_count;
2024  dma_chan = d->chan;
2025  enable_1510_mode = d->dev_caps & ENABLE_1510_MODE;
2026 
2027  if (cpu_class_is_omap2()) {
2028  dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2029  dma_lch_count, GFP_KERNEL);
2030  if (!dma_linked_lch) {
2031  ret = -ENOMEM;
2032  goto exit_dma_lch_fail;
2033  }
2034  }
2035 
2036  spin_lock_init(&dma_chan_lock);
2037  for (ch = 0; ch < dma_chan_count; ch++) {
2038  omap_clear_dma(ch);
2039  if (cpu_class_is_omap2())
2040  omap2_disable_irq_lch(ch);
2041 
2042  dma_chan[ch].dev_id = -1;
2043  dma_chan[ch].next_lch = -1;
2044 
2045  if (ch >= 6 && enable_1510_mode)
2046  continue;
2047 
2048  if (cpu_class_is_omap1()) {
2049  /*
2050  * request_irq() doesn't like dev_id (ie. ch) being
2051  * zero, so we have to kludge around this.
2052  */
2053  sprintf(&irq_name[0], "%d", ch);
2054  dma_irq = platform_get_irq_byname(pdev, irq_name);
2055 
2056  if (dma_irq < 0) {
2057  ret = dma_irq;
2058  goto exit_dma_irq_fail;
2059  }
2060 
2061  /* INT_DMA_LCD is handled in lcd_dma.c */
2062  if (dma_irq == INT_DMA_LCD)
2063  continue;
2064 
2065  ret = request_irq(dma_irq,
2066  omap1_dma_irq_handler, 0, "DMA",
2067  (void *) (ch + 1));
2068  if (ret != 0)
2069  goto exit_dma_irq_fail;
2070  }
2071  }
2072 
2073  if (cpu_class_is_omap2() && !cpu_is_omap242x())
2076 
2077  if (cpu_class_is_omap2()) {
2078  strcpy(irq_name, "0");
2079  dma_irq = platform_get_irq_byname(pdev, irq_name);
2080  if (dma_irq < 0) {
2081  dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2082  goto exit_dma_lch_fail;
2083  }
2084  ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2085  if (ret) {
2086  dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
2087  dma_irq, ret);
2088  goto exit_dma_lch_fail;
2089  }
2090  }
2091 
2092  /* reserve dma channels 0 and 1 in high security devices */
2093  if (cpu_is_omap34xx() &&
2095  pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
2096  dma_chan[0].dev_id = 0;
2097  dma_chan[1].dev_id = 1;
2098  }
2099  p->show_dma_caps();
2100  return 0;
2101 
2102 exit_dma_irq_fail:
2103  dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
2104  dma_irq, ret);
2105  for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2106  dma_irq = platform_get_irq(pdev, irq_rel);
2107  free_irq(dma_irq, (void *)(irq_rel + 1));
2108  }
2109 
2110 exit_dma_lch_fail:
2111  kfree(p);
2112  kfree(d);
2113  kfree(dma_chan);
2114  return ret;
2115 }
2116 
2117 static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2118 {
2119  int dma_irq;
2120 
2121  if (cpu_class_is_omap2()) {
2122  char irq_name[4];
2123  strcpy(irq_name, "0");
2124  dma_irq = platform_get_irq_byname(pdev, irq_name);
2125  remove_irq(dma_irq, &omap24xx_dma_irq);
2126  } else {
2127  int irq_rel = 0;
2128  for ( ; irq_rel < dma_chan_count; irq_rel++) {
2129  dma_irq = platform_get_irq(pdev, irq_rel);
2130  free_irq(dma_irq, (void *)(irq_rel + 1));
2131  }
2132  }
2133  kfree(p);
2134  kfree(d);
2135  kfree(dma_chan);
2136  return 0;
2137 }
2138 
2139 static struct platform_driver omap_system_dma_driver = {
2140  .probe = omap_system_dma_probe,
2141  .remove = __devexit_p(omap_system_dma_remove),
2142  .driver = {
2143  .name = "omap_dma_system"
2144  },
2145 };
2146 
2147 static int __init omap_system_dma_init(void)
2148 {
2149  return platform_driver_register(&omap_system_dma_driver);
2150 }
2151 arch_initcall(omap_system_dma_init);
2152 
2153 static void __exit omap_system_dma_exit(void)
2154 {
2155  platform_driver_unregister(&omap_system_dma_driver);
2156 }
2157 
2158 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2159 MODULE_LICENSE("GPL");
2160 MODULE_ALIAS("platform:" DRIVER_NAME);
2161 MODULE_AUTHOR("Texas Instruments Inc");
2162 
2163 /*
2164  * Reserve the omap SDMA channels using cmdline bootarg
2165  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2166  */
2167 static int __init omap_dma_cmdline_reserve_ch(char *str)
2168 {
2169  if (get_option(&str, &omap_dma_reserve_channels) != 1)
2170  omap_dma_reserve_channels = 0;
2171  return 1;
2172 }
2173 
2174 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2175 
2176