28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/slab.h>
41 #define chan_dbg(chan, fmt, arg...) \
42 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
43 #define chan_err(chan, fmt, arg...) \
44 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
46 static const char msg_ld_oom[] =
"No free memory for link descriptor";
109 return DMA_TO_CPU(chan, desc->
hw.src_addr, 64) & ~snoop_bits;
129 return DMA_TO_CPU(chan, desc->
hw.dst_addr, 64) & ~snoop_bits;
245 for (i = 0; i < 100; i++) {
246 if (dma_is_idle(chan))
252 if (!dma_is_idle(chan))
253 chan_err(chan,
"DMA halt timeout!\n");
267 static void fsl_chan_set_src_loop_size(
struct fsldma_chan *chan,
int size)
299 static void fsl_chan_set_dst_loop_size(
struct fsldma_chan *chan,
int size)
332 static void fsl_chan_set_request_count(
struct fsldma_chan *chan,
int size)
339 mode |= (__ilog2(size) << 24) & 0x0f000000;
371 static void fsl_chan_toggle_ext_start(
struct fsldma_chan *chan,
int enable)
418 cookie = dma_cookie_assign(&child->
async_tx);
422 append_ld_queue(chan, desc);
424 spin_unlock_irqrestore(&chan->
desc_lock, flags);
442 chan_dbg(chan,
"out of memory for link descriptor\n");
446 memset(desc, 0,
sizeof(*desc));
447 INIT_LIST_HEAD(&desc->
tx_list);
449 desc->
async_tx.tx_submit = fsl_dma_tx_submit;
452 #ifdef FSL_DMA_LD_DEBUG
453 chan_dbg(chan,
"LD %p allocated\n", desc);
467 static int fsl_dma_alloc_chan_resources(
struct dma_chan *dchan)
483 chan_err(chan,
"unable to allocate descriptor pool\n");
498 static void fsldma_free_desc_list(
struct fsldma_chan *chan,
505 #ifdef FSL_DMA_LD_DEBUG
506 chan_dbg(chan,
"LD %p free\n", desc);
512 static void fsldma_free_desc_list_reverse(
struct fsldma_chan *chan,
519 #ifdef FSL_DMA_LD_DEBUG
520 chan_dbg(chan,
"LD %p free\n", desc);
530 static void fsl_dma_free_chan_resources(
struct dma_chan *dchan)
535 chan_dbg(chan,
"free all channel resources\n");
537 fsldma_free_desc_list(chan, &chan->
ld_pending);
538 fsldma_free_desc_list(chan, &chan->
ld_running);
539 spin_unlock_irqrestore(&chan->
desc_lock, flags);
546 fsl_dma_prep_interrupt(
struct dma_chan *dchan,
unsigned long flags)
556 new = fsl_dma_alloc_descriptor(chan);
562 new->async_tx.cookie = -
EBUSY;
563 new->async_tx.flags =
flags;
569 set_ld_eol(chan,
new);
571 return &
new->async_tx;
575 fsl_dma_prep_memcpy(
struct dma_chan *dchan,
577 size_t len,
unsigned long flags)
594 new = fsl_dma_alloc_descriptor(chan);
602 set_desc_cnt(chan, &new->hw, copy);
603 set_desc_src(chan, &new->hw, dma_src);
604 set_desc_dst(chan, &new->hw, dma_dst);
611 new->async_tx.cookie = 0;
612 async_tx_ack(&new->async_tx);
623 new->async_tx.flags =
flags;
624 new->async_tx.cookie = -
EBUSY;
627 set_ld_eol(chan,
new);
635 fsldma_free_desc_list_reverse(chan, &first->
tx_list);
640 struct scatterlist *dst_sg,
unsigned int dst_nents,
641 struct scatterlist *src_sg,
unsigned int src_nents,
646 size_t dst_avail, src_avail;
651 if (dst_nents == 0 || src_nents == 0)
654 if (dst_sg ==
NULL || src_sg ==
NULL)
670 len =
min_t(
size_t, src_avail, dst_avail);
679 new = fsl_dma_alloc_descriptor(chan);
685 set_desc_cnt(chan, &new->hw, len);
686 set_desc_src(chan, &new->hw, src);
687 set_desc_dst(chan, &new->hw, dst);
694 new->async_tx.cookie = 0;
695 async_tx_ack(&new->async_tx);
707 if (dst_avail == 0) {
723 if (src_avail == 0) {
739 new->async_tx.flags =
flags;
740 new->async_tx.cookie = -
EBUSY;
743 set_ld_eol(chan,
new);
751 fsldma_free_desc_list_reverse(chan, &first->
tx_list);
782 static int fsl_dma_device_control(
struct dma_chan *dchan,
803 fsldma_free_desc_list(chan, &chan->
ld_pending);
804 fsldma_free_desc_list(chan, &chan->
ld_running);
807 spin_unlock_irqrestore(&chan->
desc_lock, flags);
851 static void fsldma_cleanup_descriptor(
struct fsldma_chan *chan,
858 u32 len = get_desc_cnt(chan, desc);
862 #ifdef FSL_DMA_LD_DEBUG
863 chan_dbg(chan,
"LD %p callback\n", desc);
887 #ifdef FSL_DMA_LD_DEBUG
888 chan_dbg(chan,
"LD %p free\n", desc);
900 static void fsl_chan_xfer_ld_queue(
struct fsldma_chan *chan)
919 chan_dbg(chan,
"DMA controller still busy\n");
932 chan_dbg(chan,
"idle, starting controller\n");
953 set_cdar(chan, desc->
async_tx.phys);
964 static void fsl_dma_memcpy_issue_pending(
struct dma_chan *dchan)
970 fsl_chan_xfer_ld_queue(chan);
971 spin_unlock_irqrestore(&chan->
desc_lock, flags);
987 ret = dma_cookie_status(dchan, cookie, txstate);
988 spin_unlock_irqrestore(&chan->
desc_lock, flags);
1005 chan_dbg(chan,
"irq: stat = 0x%x\n", stat);
1013 chan_err(chan,
"Transfer Error!\n");
1021 chan_dbg(chan,
"irq: Programming Error INT\n");
1022 stat &= ~FSL_DMA_SR_PE;
1023 if (get_bcr(chan) != 0)
1024 chan_err(chan,
"Programming Error!\n");
1032 chan_dbg(chan,
"irq: End-of-Chain link INT\n");
1033 stat &= ~FSL_DMA_SR_EOCDI;
1042 chan_dbg(chan,
"irq: End-of-link INT\n");
1043 stat &= ~FSL_DMA_SR_EOLNI;
1047 if (!dma_is_idle(chan))
1048 chan_err(chan,
"irq: controller not idle!\n");
1052 chan_err(chan,
"irq: unhandled sr 0x%08x\n", stat);
1059 tasklet_schedule(&chan->
tasklet);
1064 static void dma_do_tasklet(
unsigned long data)
1069 unsigned long flags;
1081 dma_cookie_complete(&desc->
async_tx);
1083 chan_dbg(chan,
"completed_cookie=%d\n", cookie);
1090 list_splice_tail_init(&chan->
ld_running, &ld_cleanup);
1101 fsl_chan_xfer_ld_queue(chan);
1102 spin_unlock_irqrestore(&chan->
desc_lock, flags);
1111 fsldma_cleanup_descriptor(chan, desc);
1117 static irqreturn_t fsldma_ctrl_irq(
int irq,
void *data)
1121 unsigned int handled = 0;
1128 dev_dbg(fdev->
dev,
"IRQ: gsr 0x%.8x\n", gsr);
1131 chan = fdev->
chan[
i];
1137 fsldma_chan_irq(irq, chan);
1154 dev_dbg(fdev->
dev,
"free per-controller IRQ\n");
1160 chan = fdev->
chan[
i];
1162 chan_dbg(chan,
"free per-channel IRQ\n");
1176 dev_dbg(fdev->
dev,
"request per-controller IRQ\n");
1178 "fsldma-controller", fdev);
1184 chan = fdev->
chan[
i];
1189 chan_err(chan,
"interrupts property missing in device tree\n");
1194 chan_dbg(chan,
"request per-channel IRQ\n");
1196 "fsldma-chan", chan);
1198 chan_err(chan,
"unable to request per-channel IRQ\n");
1206 for (; i >= 0; i--) {
1207 chan = fdev->
chan[
i];
1234 dev_err(fdev->
dev,
"no free memory for DMA channels!\n");
1242 dev_err(fdev->
dev,
"unable to ioremap registers\n");
1249 dev_err(fdev->
dev,
"unable to find 'reg' property\n");
1250 goto out_iounmap_regs;
1264 chan->
id = ((
res.start - 0x100) & 0xfff) >> 7;
1265 if (chan->
id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1266 dev_err(fdev->
dev,
"too many channels for device\n");
1268 goto out_iounmap_regs;
1271 fdev->
chan[chan->
id] = chan;
1297 dma_cookie_init(&chan->
common);
1306 dev_info(fdev->
dev,
"#%d (%s), irq %d\n", chan->
id, compatible,
1319 static void fsl_dma_chan_remove(
struct fsldma_chan *chan)
1335 dev_err(&op->
dev,
"No enough memory for 'priv'\n");
1341 INIT_LIST_HEAD(&fdev->
common.channels);
1346 dev_err(&op->
dev,
"unable to ioremap registers\n");
1358 fdev->
common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1359 fdev->
common.device_free_chan_resources = fsl_dma_free_chan_resources;
1360 fdev->
common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1361 fdev->
common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1362 fdev->
common.device_prep_dma_sg = fsl_dma_prep_sg;
1363 fdev->
common.device_tx_status = fsl_tx_status;
1364 fdev->
common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1365 fdev->
common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1366 fdev->
common.device_control = fsl_dma_device_control;
1380 fsl_dma_chan_probe(fdev, child,
1382 "fsl,eloplus-dma-channel");
1386 fsl_dma_chan_probe(fdev, child,
1388 "fsl,elo-dma-channel");
1399 err = fsldma_request_irqs(fdev);
1401 dev_err(fdev->
dev,
"unable to request IRQs\n");
1423 fsldma_free_irqs(fdev);
1427 fsl_dma_chan_remove(fdev->
chan[i]);
1438 { .compatible =
"fsl,eloplus-dma", },
1439 { .compatible =
"fsl,elo-dma", },
1445 .name =
"fsl-elo-dma",
1447 .of_match_table = fsldma_of_ids,
1449 .probe = fsldma_of_probe,
1450 .remove = fsldma_of_remove,
1457 static __init int fsldma_init(
void)
1459 pr_info(
"Freescale Elo / Elo Plus DMA driver\n");
1463 static void __exit fsldma_exit(
void)