18 #include <linux/types.h>
22 #include <linux/device.h>
24 #include <linux/slab.h>
28 #include <linux/module.h>
32 #include <mach/hardware.h>
35 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36 #define IMX_DMA_CHANNELS 16
38 #define IMX_DMA_2D_SLOTS 2
39 #define IMX_DMA_2D_SLOT_A 0
40 #define IMX_DMA_2D_SLOT_B 1
42 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
43 #define IMX_DMA_MEMSIZE_32 (0 << 4)
44 #define IMX_DMA_MEMSIZE_8 (1 << 4)
45 #define IMX_DMA_MEMSIZE_16 (2 << 4)
46 #define IMX_DMA_TYPE_LINEAR (0 << 10)
47 #define IMX_DMA_TYPE_2D (1 << 10)
48 #define IMX_DMA_TYPE_FIFO (2 << 10)
50 #define IMX_DMA_ERR_BURST (1 << 0)
51 #define IMX_DMA_ERR_REQUEST (1 << 1)
52 #define IMX_DMA_ERR_TRANSFER (1 << 2)
53 #define IMX_DMA_ERR_BUFFER (1 << 3)
54 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
59 #define DMA_DBTOSR 0x0c
60 #define DMA_DRTOSR 0x10
61 #define DMA_DSESR 0x14
62 #define DMA_DBOSR 0x18
63 #define DMA_DBTOCR 0x1c
70 #define DMA_SAR(x) (0x80 + ((x) << 6))
71 #define DMA_DAR(x) (0x84 + ((x) << 6))
72 #define DMA_CNTR(x) (0x88 + ((x) << 6))
73 #define DMA_CCR(x) (0x8c + ((x) << 6))
74 #define DMA_RSSR(x) (0x90 + ((x) << 6))
75 #define DMA_BLR(x) (0x94 + ((x) << 6))
76 #define DMA_RTOR(x) (0x98 + ((x) << 6))
77 #define DMA_BUCR(x) (0x98 + ((x) << 6))
78 #define DMA_CCNR(x) (0x9C + ((x) << 6))
80 #define DCR_DRST (1<<1)
81 #define DCR_DEN (1<<0)
82 #define DBTOCR_EN (1<<15)
83 #define DBTOCR_CNT(x) ((x) & 0x7fff)
84 #define CNTR_CNT(x) ((x) & 0xffffff)
85 #define CCR_ACRPT (1<<14)
86 #define CCR_DMOD_LINEAR (0x0 << 12)
87 #define CCR_DMOD_2D (0x1 << 12)
88 #define CCR_DMOD_FIFO (0x2 << 12)
89 #define CCR_DMOD_EOBFIFO (0x3 << 12)
90 #define CCR_SMOD_LINEAR (0x0 << 10)
91 #define CCR_SMOD_2D (0x1 << 10)
92 #define CCR_SMOD_FIFO (0x2 << 10)
93 #define CCR_SMOD_EOBFIFO (0x3 << 10)
94 #define CCR_MDIR_DEC (1<<9)
95 #define CCR_MSEL_B (1<<8)
96 #define CCR_DSIZ_32 (0x0 << 6)
97 #define CCR_DSIZ_8 (0x1 << 6)
98 #define CCR_DSIZ_16 (0x2 << 6)
99 #define CCR_SSIZ_32 (0x0 << 4)
100 #define CCR_SSIZ_8 (0x1 << 4)
101 #define CCR_SSIZ_16 (0x2 << 4)
102 #define CCR_REN (1<<3)
103 #define CCR_RPT (1<<2)
104 #define CCR_FRC (1<<1)
105 #define CCR_CEN (1<<0)
106 #define RTOR_EN (1<<15)
107 #define RTOR_CLK (1<<14)
108 #define RTOR_PSC (1<<13)
187 static inline bool imxdma_chan_is_doing_cyclic(
struct imxdma_channel *imxdmac)
244 dev_dbg(imxdma->
dev,
" %s channel: %d dst 0x%08x, src 0x%08x, "
245 "size 0x%08x\n", __func__, imxdmac->
channel,
253 static void imxdma_enable_hw(
struct imxdma_desc *d)
260 dev_dbg(imxdma->
dev,
"%s channel %d\n", __func__, channel);
264 imx_dmav1_writel(imxdma, 1 << channel,
DMA_DISR);
265 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma,
DMA_DIMR) &
267 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma,
DMA_CCR(channel)) |
271 d->
sg && imxdma_hw_chain(imxdmac)) {
276 tmp = imx_dmav1_readl(imxdma,
DMA_CCR(channel));
288 int channel = imxdmac->
channel;
291 dev_dbg(imxdma->
dev,
"%s channel %d\n", __func__, channel);
293 if (imxdma_hw_chain(imxdmac))
297 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma,
DMA_DIMR) |
299 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma,
DMA_CCR(channel)) &
301 imx_dmav1_writel(imxdma, 1 << channel,
DMA_DISR);
305 static void imxdma_watchdog(
unsigned long data)
309 int channel = imxdmac->
channel;
311 imx_dmav1_writel(imxdma, 0,
DMA_CCR(channel));
315 dev_dbg(imxdma->
dev,
"channel %d: watchdog timeout!\n",
322 unsigned int err_mask;
326 disr = imx_dmav1_readl(imxdma,
DMA_DISR);
328 err_mask = imx_dmav1_readl(imxdma,
DMA_DBTOSR) |
336 imx_dmav1_writel(imxdma, disr & err_mask,
DMA_DISR);
339 if (!(err_mask & (1 << i)))
343 if (imx_dmav1_readl(imxdma,
DMA_DBTOSR) & (1 << i)) {
347 if (imx_dmav1_readl(imxdma,
DMA_DRTOSR) & (1 << i)) {
351 if (imx_dmav1_readl(imxdma,
DMA_DSESR) & (1 << i)) {
352 imx_dmav1_writel(imxdma, 1 << i,
DMA_DSESR);
355 if (imx_dmav1_readl(imxdma,
DMA_DBOSR) & (1 << i)) {
356 imx_dmav1_writel(imxdma, 1 << i,
DMA_DBOSR);
360 tasklet_schedule(&imxdma->
channel[i].dma_tasklet);
363 "DMA timeout on channel %d -%s%s%s%s\n", i,
372 static void dma_irq_handle_channel(
struct imxdma_channel *imxdmac)
378 spin_lock(&imxdma->
lock);
380 spin_unlock(&imxdma->
lock);
387 spin_unlock(&imxdma->
lock);
394 imxdma_sg_next(desc);
396 tmp = imx_dmav1_readl(imxdma,
DMA_CCR(chno));
398 if (imxdma_hw_chain(imxdmac)) {
406 imx_dmav1_writel(imxdma, tmp,
DMA_CCR(chno));
408 imx_dmav1_writel(imxdma, tmp & ~
CCR_CEN,
413 imx_dmav1_writel(imxdma, tmp,
DMA_CCR(chno));
415 if (imxdma_chan_is_doing_cyclic(imxdmac))
422 if (imxdma_hw_chain(imxdmac)) {
429 imx_dmav1_writel(imxdma, 0,
DMA_CCR(chno));
434 static irqreturn_t dma_irq_handler(
int irq,
void *dev_id)
440 imxdma_err_handler(irq, dev_id);
442 disr = imx_dmav1_readl(imxdma,
DMA_DISR);
444 dev_dbg(imxdma->
dev,
"%s called, disr=0x%08x\n", __func__, disr);
446 imx_dmav1_writel(imxdma, disr,
DMA_DISR);
449 dma_irq_handle_channel(&imxdma->
channel[i]);
469 if ((imxdma->
slots_2d[i].count > 0) &&
478 spin_unlock_irqrestore(&imxdma->
lock, flags);
489 spin_unlock_irqrestore(&imxdma->
lock, flags);
494 imx_dmav1_writel(imxdma, d->
x,
DMA_XSRA);
495 imx_dmav1_writel(imxdma, d->
y,
DMA_YSRA);
496 imx_dmav1_writel(imxdma, d->
w,
DMA_WSRA);
500 imx_dmav1_writel(imxdma, d->
x,
DMA_XSRB);
501 imx_dmav1_writel(imxdma, d->
y,
DMA_YSRB);
502 imx_dmav1_writel(imxdma, d->
w,
DMA_WSRB);
516 dev_dbg(imxdma->
dev,
"%s channel: %d dest=0x%08x src=0x%08x "
517 "dma_length=%d\n", __func__, imxdmac->
channel,
530 dev_dbg(imxdma->
dev,
"%s channel: %d sg=%p sgcount=%d "
531 "total length=%d dev_addr=0x%08x (dev2mem)\n",
540 dev_dbg(imxdma->
dev,
"%s channel: %d sg=%p sgcount=%d "
541 "total length=%d dev_addr=0x%08x (mem2dev)\n",
545 dev_err(imxdma->
dev,
"%s channel: %d bad dma mode\n",
560 static void imxdma_tasklet(
unsigned long data)
566 spin_lock(&imxdma->
lock);
574 if (desc->
desc.callback)
575 desc->
desc.callback(desc->
desc.callback_param);
581 if (imxdma_chan_is_doing_cyclic(imxdmac))
584 dma_cookie_complete(&desc->
desc);
594 if (!list_empty(&imxdmac->
ld_queue)) {
598 if (imxdma_xfer_desc(desc) < 0)
599 dev_warn(imxdma->
dev,
"%s: channel: %d couldn't xfer desc\n",
603 spin_unlock(&imxdma->
lock);
613 unsigned int mode = 0;
617 imxdma_disable_hw(imxdmac);
622 spin_unlock_irqrestore(&imxdma->
lock, flags);
649 if (!imxdma_hw_chain(imxdmac))
676 return dma_cookie_status(chan, cookie, txstate);
688 cookie = dma_cookie_assign(tx);
689 spin_unlock_irqrestore(&imxdma->
lock, flags);
694 static int imxdma_alloc_chan_resources(
struct dma_chan *chan)
710 desc->
desc.tx_submit = imxdma_tx_submit;
725 static void imxdma_free_chan_resources(
struct dma_chan *chan)
734 imxdma_disable_hw(imxdmac);
738 spin_unlock_irqrestore(&imxdma->
lock, flags);
744 INIT_LIST_HEAD(&imxdmac->
ld_free);
755 unsigned long flags,
void *
context)
759 int i, dma_length = 0;
762 if (list_empty(&imxdmac->
ld_free) ||
763 imxdma_chan_is_doing_cyclic(imxdmac))
790 desc->
len = dma_length;
806 unsigned long flags,
void *context)
812 unsigned int periods = buf_len / period_len;
814 dev_dbg(imxdma->
dev,
"%s channel: %d buf_len=%d period_len=%d\n",
815 __func__, imxdmac->
channel, buf_len, period_len);
817 if (list_empty(&imxdmac->
ld_free) ||
818 imxdma_chan_is_doing_cyclic(imxdmac))
826 imxdmac->
sg_list = kcalloc(periods + 1,
833 for (i = 0; i < periods; i++) {
838 dma_addr += period_len;
842 imxdmac->
sg_list[periods].offset = 0;
844 imxdmac->
sg_list[periods].page_link =
871 dev_dbg(imxdma->
dev,
"%s channel: %d src=0x%x dst=0x%x len=%d\n",
872 __func__, imxdmac->
channel, src, dest, len);
874 if (list_empty(&imxdmac->
ld_free) ||
875 imxdma_chan_is_doing_cyclic(imxdmac))
901 dev_dbg(imxdma->
dev,
"%s channel: %d src_start=0x%x dst_start=0x%x\n"
902 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
907 if (list_empty(&imxdmac->
ld_free) ||
908 imxdma_chan_is_doing_cyclic(imxdmac))
919 desc->
x = xt->
sgl[0].size;
921 desc->
w = xt->
sgl[0].icg + desc->
x;
922 desc->
len = desc->
x * desc->
y;
936 static void imxdma_issue_pending(
struct dma_chan *chan)
949 if (imxdma_xfer_desc(desc) < 0) {
951 "%s: channel: %d couldn't issue DMA xfer\n",
954 list_move_tail(imxdmac->
ld_queue.next,
958 spin_unlock_irqrestore(&imxdma->
lock, flags);
967 imxdma = kzalloc(
sizeof(*imxdma),
GFP_KERNEL);
984 ret = PTR_ERR(imxdma->
dma_ipg);
990 ret = PTR_ERR(imxdma->
dma_ahb);
994 clk_prepare_enable(imxdma->
dma_ipg);
995 clk_prepare_enable(imxdma->
dma_ahb);
1003 dev_warn(imxdma->
dev,
"Can't register IRQ for DMA\n");
1009 dev_warn(imxdma->
dev,
"Can't register ERRIRQ for DMA\n");
1019 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1,
DMA_DISR);
1022 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1,
DMA_DIMR);
1024 INIT_LIST_HEAD(&imxdma->
dma_device.channels);
1043 dma_irq_handler, 0,
"DMA", imxdma);
1046 "for DMA channel %d\n",
1051 imxdmac->
watchdog.function = &imxdma_watchdog;
1057 INIT_LIST_HEAD(&imxdmac->
ld_queue);
1058 INIT_LIST_HEAD(&imxdmac->
ld_free);
1062 (
unsigned long)imxdmac);
1064 dma_cookie_init(&imxdmac->
chan);
1072 imxdma->
dev = &pdev->
dev;
1075 imxdma->
dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1076 imxdma->
dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1077 imxdma->
dma_device.device_tx_status = imxdma_tx_status;
1078 imxdma->
dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1079 imxdma->
dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1080 imxdma->
dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1081 imxdma->
dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1082 imxdma->
dma_device.device_control = imxdma_control;
1083 imxdma->
dma_device.device_issue_pending = imxdma_issue_pending;
1085 platform_set_drvdata(pdev, imxdma);
1089 dma_set_max_seg_size(imxdma->
dma_device.dev, 0xffffff);
1109 clk_disable_unprepare(imxdma->
dma_ipg);
1110 clk_disable_unprepare(imxdma->
dma_ahb);
1131 clk_disable_unprepare(imxdma->
dma_ipg);
1132 clk_disable_unprepare(imxdma->
dma_ahb);
1145 static int __init imxdma_module_init(
void)