28 #include <linux/module.h>
30 #include <linux/sched.h>
32 #include <linux/errno.h>
36 #include <linux/slab.h>
50 #define MAX_LOGICAL_DMA_CH_COUNT 32
54 #ifndef CONFIG_ARCH_OMAP1
62 #define OMAP_DMA_ACTIVE 0x01
63 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff
65 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
70 static int enable_1510_mode;
73 static struct omap_dma_global_context_registers {
75 u32 dma_ocp_sysconfig;
77 } omap_dma_global_context;
94 #ifndef CONFIG_ARCH_OMAP1
97 #define OMAP_DMA_CHAIN_QINIT(chain_id) \
99 dma_linked_lch[chain_id].q_head = \
100 dma_linked_lch[chain_id].q_tail = \
101 dma_linked_lch[chain_id].q_count = 0; \
103 #define OMAP_DMA_CHAIN_QFULL(chain_id) \
104 (dma_linked_lch[chain_id].no_of_lchs_linked == \
105 dma_linked_lch[chain_id].q_count)
106 #define OMAP_DMA_CHAIN_QLAST(chain_id) \
108 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
109 dma_linked_lch[chain_id].q_count) \
111 #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
112 (0 == dma_linked_lch[chain_id].q_count)
113 #define __OMAP_DMA_CHAIN_INCQ(end) \
114 ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
115 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
117 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
118 dma_linked_lch[chain_id].q_count--; \
121 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
123 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
124 dma_linked_lch[chain_id].q_count++; \
128 static int dma_lch_count;
129 static int dma_chan_count;
130 static int omap_dma_reserve_channels;
135 static inline void disable_lnk(
int lch);
136 static void omap_disable_channel_irq(
int lch);
137 static inline void omap_enable_channel_irq(
int lch);
139 #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
142 #ifdef CONFIG_ARCH_OMAP15XX
146 return enable_1510_mode;
149 #define omap_dma_in_1510_mode() 0
152 #ifdef CONFIG_ARCH_OMAP1
153 static inline int get_gdma_dev(
int req)
156 int shift = ((req - 1) % 5) * 6;
158 return ((
omap_readl(reg) >> shift) & 0x3f) + 1;
164 int shift = ((
req - 1) % 5) * 6;
168 l &= ~(0x3f << shift);
169 l |= (
dev - 1) << shift;
173 #define set_gdma_dev(req, dev) do {} while (0)
174 #define omap_readl(reg) 0
175 #define omap_writel(val, reg) do {} while (0)
203 l |= (priority & 0xf) << 8;
222 int dma_trigger,
int src_or_dst_synch)
253 val &= ~((1 << 23) | (3 << 19) | 0x1f);
254 val |= (dma_trigger & ~0x1f) << 14;
255 val |= dma_trigger & 0x1f;
270 }
else if (src_or_dst_synch) {
321 val &= ~((1 << 17) | (1 << 16));
349 csdp &= ~(0x3 << 16);
350 csdp |= (mode << 16);
371 unsigned long src_start,
372 int src_ei,
int src_fi)
387 l |= src_amode << 12;
440 unsigned int burst = 0;
446 switch (burst_mode) {
485 unsigned long dest_start,
486 int dst_ei,
int dst_fi)
499 l |= dest_amode << 14;
533 unsigned int burst = 0;
539 switch (burst_mode) {
573 static inline void omap_enable_channel_irq(
int lch)
585 static inline void omap_disable_channel_irq(
int lch)
608 static inline void enable_lnk(
int lch)
619 l = dma_chan[lch].
next_lch | (1 << 15);
621 #ifndef CONFIG_ARCH_OMAP1
630 static inline void disable_lnk(
int lch)
637 omap_disable_channel_irq(lch);
653 static inline void omap2_enable_irq_lch(
int lch)
668 spin_unlock_irqrestore(&dma_chan_lock, flags);
671 static inline void omap2_disable_irq_lch(
int lch)
686 spin_unlock_irqrestore(&dma_chan_lock, flags);
691 void *data,
int *dma_ch_out)
693 int ch, free_ch = -1;
698 for (ch = 0; ch < dma_chan_count; ch++) {
699 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
706 spin_unlock_irqrestore(&dma_chan_lock, flags);
709 chan = dma_chan + free_ch;
718 spin_unlock_irqrestore(&dma_chan_lock, flags);
725 #ifndef CONFIG_ARCH_OMAP1
744 dev_id = free_ch + 1;
756 omap_enable_channel_irq(free_ch);
757 omap2_enable_irq_lch(free_ch);
760 *dma_ch_out = free_ch;
770 if (dma_chan[lch].dev_id == -1) {
771 pr_err(
"omap_dma: trying to free unallocated DMA channel %d\n",
778 omap2_disable_irq_lch(lch);
781 omap_disable_channel_irq(lch);
791 dma_chan[lch].
dev_id = -1;
794 spin_unlock_irqrestore(&dma_chan_lock, flags);
818 if (max_fifo_depth == 0)
823 reg = 0xff & max_fifo_depth;
824 reg |= (0x3 & tparams) << 12;
825 reg |= (arb_rate & 0xff) << 16;
842 unsigned char write_prio)
846 if (
unlikely((lch < 0 || lch >= dma_lch_count))) {
851 l &= ~((1 << 6) | (1 << 26));
853 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
855 l |= ((read_prio & 0x1) << 6);
894 dma_chan_link_map[lch] = 1;
898 memset(dma_chan_link_map, 0,
sizeof(dma_chan_link_map));
901 next_lch = dma_chan[cur_lch].
next_lch;
904 if (dma_chan_link_map[cur_lch])
907 dma_chan_link_map[cur_lch] = 1;
910 omap_enable_channel_irq(cur_lch);
913 }
while (next_lch != -1);
917 omap_enable_channel_irq(lch);
943 omap_disable_channel_irq(lch);
971 pr_err(
"DMA drain did not complete on lch %d\n", lch);
990 memset(dma_chan_link_map, 0,
sizeof(dma_chan_link_map));
993 if (dma_chan_link_map[cur_lch])
996 dma_chan_link_map[cur_lch] = 1;
998 disable_lnk(cur_lch);
1000 next_lch = dma_chan[cur_lch].
next_lch;
1002 }
while (next_lch != -1);
1017 unsigned long flags;
1023 if (dma_chan[lch].dev_id == -1) {
1025 spin_unlock_irqrestore(&dma_chan_lock, flags);
1030 spin_unlock_irqrestore(&dma_chan_lock, flags);
1128 for (lch = 0; lch < dma_chan_count; lch++)
1143 if (lch_head == lch_queue) {
1153 if ((dma_chan[lch_head].dev_id == -1) ||
1154 (dma_chan[lch_queue].dev_id == -1)) {
1155 pr_err(
"omap_dma: trying to link non requested channels\n");
1159 dma_chan[lch_head].
next_lch = lch_queue;
1169 if (lch_head == lch_queue) {
1179 if (dma_chan[lch_head].
next_lch != lch_queue ||
1180 dma_chan[lch_head].
next_lch == -1) {
1181 pr_err(
"omap_dma: trying to unlink non linked channels\n");
1186 (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1187 pr_err(
"omap_dma: You need to stop the DMA channels before unlinking\n");
1195 #ifndef CONFIG_ARCH_OMAP1
1197 static void create_dma_lch_chain(
int lch_head,
int lch_queue)
1262 || no_of_chans > dma_lch_count))) {
1272 if (channels ==
NULL) {
1278 for (i = 0; i < no_of_chans; i++) {
1283 for (j = 0; j <
i; j++)
1300 *chain_id = channels[0];
1306 for (i = 0; i < no_of_chans; i++)
1307 dma_chan[channels[i]].chain_id = *chain_id;
1313 if (no_of_chans == 1)
1314 create_dma_lch_chain(channels[0], channels[0]);
1316 for (i = 0; i < (no_of_chans - 1); i++)
1317 create_dma_lch_chain(channels[i], channels[i + 1]);
1342 || chain_id >= dma_lch_count))) {
1348 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1381 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1387 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1422 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1428 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1432 pr_debug(
"CHAINID=%d, qcnt=%d\n", chain_id,
1433 dma_linked_lch[chain_id].q_count);
1457 int elem_count,
int frame_count,
void *callbk_data)
1467 if (elem_count < 1) {
1474 || chain_id >= dma_lch_count))) {
1480 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1496 dma_chan[lch].
data = callbk_data;
1504 if (dest_start != 0)
1521 if (dma_linked_lch[chain_id].chain_state ==
1544 disable_lnk(dma_chan[lch].
1546 pr_debug(
"\n prev ch is stopped\n");
1557 omap_enable_channel_irq(lch);
1561 if ((0 == (l & (1 << 24))))
1565 if (start_dma == 1) {
1566 if (0 == (l & (1 << 7))) {
1574 if (0 == (l & (1 << 7)))
1598 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1613 enable_lnk(channels[i]);
1614 omap_enable_channel_irq(channels[i]);
1617 omap_enable_channel_irq(channels[0]);
1625 if ((0 == (l & (1 << 24))))
1652 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1658 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1668 l &= ~((1 << 12)|(1 << 13));
1680 disable_lnk(channels[i]);
1714 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1720 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1754 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1760 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1788 if (
unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1794 if (dma_linked_lch[chain_id].linked_dmach_q ==
NULL) {
1811 #ifdef CONFIG_ARCH_OMAP1
1813 static int omap1_dma_handle_ch(
int ch)
1817 if (enable_1510_mode && ch >= 6) {
1822 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1826 if ((csr & 0x3f) == 0)
1828 if (
unlikely(dma_chan[ch].dev_id == -1)) {
1829 pr_warn(
"Spurious interrupt from DMA channel %d (CSR %04x)\n",
1834 pr_warn(
"DMA timeout with device %d\n", dma_chan[ch].dev_id);
1836 pr_warn(
"DMA synchronization event drop occurred with device %d\n",
1837 dma_chan[ch].dev_id);
1848 int ch = ((
int) dev_id) - 1;
1852 int handled_now = 0;
1854 handled_now += omap1_dma_handle_ch(ch);
1855 if (enable_1510_mode && dma_chan[ch + 6].
saved_csr)
1856 handled_now += omap1_dma_handle_ch(ch + 6);
1859 handled += handled_now;
1866 #define omap1_dma_irq_handler NULL
1869 #ifdef CONFIG_ARCH_OMAP2PLUS
1871 static int omap2_dma_handle_ch(
int ch)
1876 if (printk_ratelimit())
1877 pr_warn(
"Spurious DMA IRQ for lch %d\n", ch);
1881 if (
unlikely(dma_chan[ch].dev_id == -1)) {
1882 if (printk_ratelimit())
1883 pr_warn(
"IRQ %04x for non-allocated DMA channel %d\n",
1887 if (
unlikely(status & OMAP_DMA_DROP_IRQ))
1888 pr_info(
"DMA synchronization event drop occurred with device %d\n",
1889 dma_chan[ch].dev_id);
1892 dma_chan[ch].dev_id);
1904 dma_chan[ch].dev_id);
1907 dma_chan[ch].dev_id);
1939 static irqreturn_t omap2_dma_irq_handler(
int irq,
void *dev_id)
1946 if (printk_ratelimit())
1952 for (i = 0; i < dma_lch_count && val != 0; i++) {
1954 omap2_dma_handle_ch(i);
1961 static struct irqaction omap24xx_dma_irq = {
1963 .handler = omap2_dma_irq_handler,
1968 static struct irqaction omap24xx_dma_irq;
1975 omap_dma_global_context.dma_irqenable_l0 =
1977 omap_dma_global_context.dma_ocp_sysconfig =
1979 omap_dma_global_context.dma_gcr = p->
dma_read(
GCR, 0);
1987 p->
dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1989 p->
dma_write(omap_dma_global_context.dma_irqenable_l0,
1995 for (ch = 0; ch < dma_chan_count; ch++)
1996 if (dma_chan[ch].dev_id != -1)
2007 p = pdev->
dev.platform_data;
2010 "%s: System DMA initialized without platform data\n",
2019 && (omap_dma_reserve_channels <= dma_lch_count))
2020 d->
lch_count = omap_dma_reserve_channels;
2023 dma_chan_count = dma_lch_count;
2030 if (!dma_linked_lch) {
2032 goto exit_dma_lch_fail;
2037 for (ch = 0; ch < dma_chan_count; ch++) {
2040 omap2_disable_irq_lch(ch);
2042 dma_chan[ch].
dev_id = -1;
2045 if (ch >= 6 && enable_1510_mode)
2053 sprintf(&irq_name[0],
"%d", ch);
2058 goto exit_dma_irq_fail;
2069 goto exit_dma_irq_fail;
2081 dev_err(&pdev->
dev,
"failed: request IRQ %d", dma_irq);
2082 goto exit_dma_lch_fail;
2084 ret =
setup_irq(dma_irq, &omap24xx_dma_irq);
2086 dev_err(&pdev->
dev,
"set_up failed for IRQ %d for DMA (error %d)\n",
2088 goto exit_dma_lch_fail;
2095 pr_info(
"Reserving DMA channels 0 and 1 for HS ROM code\n");
2103 dev_err(&pdev->
dev,
"unable to request IRQ %d for DMA (error %d)\n",
2105 for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2107 free_irq(dma_irq, (
void *)(irq_rel + 1));
2128 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2130 free_irq(dma_irq, (
void *)(irq_rel + 1));
2140 .probe = omap_system_dma_probe,
2143 .name =
"omap_dma_system"
2147 static int __init omap_system_dma_init(
void)
2153 static void __exit omap_system_dma_exit(
void)
2167 static int __init omap_dma_cmdline_reserve_ch(
char *
str)
2169 if (
get_option(&str, &omap_dma_reserve_channels) != 1)
2170 omap_dma_reserve_channels = 0;
2174 __setup(
"omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);