22 #include <linux/module.h>
24 #include <linux/slab.h>
40 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
41 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
42 |ATC_DIF(AT_DMA_MEM_IF))
48 static unsigned int init_nr_desc_per_channel = 64;
51 "initial descriptors per channel (default: 64)");
96 desc->
txd.tx_submit = atc_tx_submit;
118 if (async_tx_test_ack(&desc->
txd)) {
124 "desc %p not ACKed\n", desc);
126 spin_unlock_irqrestore(&atchan->
lock, flags);
128 "scanned %u descriptors on freelist\n", i);
136 spin_unlock_irqrestore(&atchan->
lock, flags);
139 "not enough descriptors available\n");
159 dev_vdbg(chan2dev(&atchan->chan_common),
160 "moving child desc %
p to freelist\
n",
162 list_splice_init(&desc->
tx_list, &atchan->free_list);
163 dev_vdbg(chan2dev(&atchan->chan_common),
164 "moving desc %
p to freelist\n", desc);
165 list_add(&desc->
desc_node, &atchan->free_list);
166 spin_unlock_irqrestore(&atchan->lock, flags);
185 (*prev)->lli.dscr = desc->txd.phys;
205 if (atc_chan_is_enabled(atchan)) {
207 "BUG: Attempted to start non-idle channel\n");
209 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
220 vdbg_dump_regs(atchan);
229 vdbg_dump_regs(atchan);
244 "descriptor %u complete\n", txd->
cookie);
247 if (!atc_chan_is_cyclic(atchan))
248 dma_cookie_complete(txd);
282 if (!atc_chan_is_cyclic(atchan)) {
306 static void atc_complete_all(
struct at_dma_chan *atchan)
313 BUG_ON(atc_chan_is_enabled(atchan));
319 if (!list_empty(&atchan->
queue))
320 atc_dostart(atchan, atc_first_queued(atchan));
327 atc_chain_complete(atchan, desc);
341 dev_vdbg(chan2dev(&atchan->chan_common),
"cleanup descriptors\n");
357 atc_chain_complete(atchan, desc);
369 dev_vdbg(chan2dev(&atchan->chan_common),
"advance_work\n");
371 if (list_empty(&atchan->active_list) ||
372 list_is_singular(&atchan->active_list)) {
373 atc_complete_all(atchan);
375 atc_chain_complete(atchan, atc_first_active(atchan));
377 atc_dostart(atchan, atc_first_active(atchan));
388 static void atc_handle_error(
struct at_dma_chan *atchan)
398 bad_desc = atc_first_active(atchan);
407 atc_dostart(atchan, atc_first_active(atchan));
417 "Bad descriptor submitted for DMA!\n");
419 " cookie: %d\n", bad_desc->
txd.cookie);
420 atc_dump_lli(atchan, &bad_desc->
lli);
422 atc_dump_lli(atchan, &child->
lli);
425 atc_chain_complete(atchan, bad_desc);
436 struct at_desc *first = atc_first_active(atchan);
441 dev_vdbg(chan2dev(&atchan->chan_common),
442 "new cyclic period llp 0x%08x\n",
451 static void atc_tasklet(
unsigned long data)
458 atc_handle_error(atchan);
459 else if (atc_chan_is_cyclic(atchan))
460 atc_handle_cyclic(atchan);
462 atc_advance_work(atchan);
464 spin_unlock_irqrestore(&atchan->
lock, flags);
478 pending = status &
imr;
484 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
485 status, imr, pending);
487 for (i = 0; i < atdma->
dma_common.chancnt; i++) {
488 atchan = &atdma->
chan[
i];
497 tasklet_schedule(&atchan->
tasklet);
520 struct at_desc *desc = txd_to_at_desc(tx);
526 cookie = dma_cookie_assign(tx);
531 atc_dostart(atchan, desc);
539 spin_unlock_irqrestore(&atchan->
lock, flags);
554 size_t len,
unsigned long flags)
562 unsigned int src_width;
563 unsigned int dst_width;
567 dev_vdbg(chan2dev(chan),
"prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
568 dest, src, len, flags);
571 dev_dbg(chan2dev(chan),
"prep_dma_memcpy: length is zero!\n");
584 if (!((src | dest | len) & 3)) {
586 src_width = dst_width = 2;
587 }
else if (!((src | dest | len) & 1)) {
589 src_width = dst_width = 1;
592 src_width = dst_width = 0;
595 for (offset = 0; offset <
len; offset += xfer_count << src_width) {
596 xfer_count =
min_t(
size_t, (len - offset) >> src_width,
599 desc = atc_desc_get(atchan);
605 desc->
lli.ctrla = ctrla | xfer_count;
606 desc->
lli.ctrlb = ctrlb;
608 desc->
txd.cookie = 0;
610 atc_desc_chain(&first, &prev, desc);
625 atc_desc_put(atchan, first);
642 unsigned long flags,
void *
context)
652 unsigned int reg_width;
653 unsigned int mem_width;
658 dev_vdbg(chan2dev(chan),
"prep_slave_sg (%d): %s f0x%lx\n",
663 if (
unlikely(!atslave || !sg_len)) {
664 dev_dbg(chan2dev(chan),
"prep_slave_sg: sg length is zero!\n");
686 desc = atc_desc_get(atchan);
694 "prep_slave_sg: sg(%d) data length is zero\n", i);
703 desc->
lli.ctrla = ctrla
706 desc->
lli.ctrlb = ctrlb;
708 atc_desc_chain(&first, &prev, desc);
726 desc = atc_desc_get(atchan);
734 "prep_slave_sg: sg(%d) data length is zero\n", i);
743 desc->
lli.ctrla = ctrla
746 desc->
lli.ctrlb = ctrlb;
748 atc_desc_chain(&first, &prev, desc);
769 dev_err(chan2dev(chan),
"not enough descriptors available\n");
771 atc_desc_put(atchan, first);
785 if (
unlikely(period_len & ((1 << reg_width) - 1)))
787 if (
unlikely(buf_addr & ((1 << reg_width) - 1)))
803 unsigned int period_index,
dma_addr_t buf_addr,
804 unsigned int reg_width,
size_t period_len,
816 | period_len >> reg_width;
820 desc->
lli.saddr = buf_addr + (period_len * period_index);
822 desc->
lli.ctrla = ctrla;
832 desc->
lli.daddr = buf_addr + (period_len * period_index);
833 desc->
lli.ctrla = ctrla;
861 unsigned long flags,
void *context)
868 unsigned long was_cyclic;
869 unsigned int reg_width;
870 unsigned int periods = buf_len / period_len;
873 dev_vdbg(chan2dev(chan),
"prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
876 periods, buf_len, period_len);
878 if (
unlikely(!atslave || !buf_len || !period_len)) {
879 dev_dbg(chan2dev(chan),
"prep_dma_cyclic: length is zero!\n");
885 dev_dbg(chan2dev(chan),
"prep_dma_cyclic: channel in use!\n");
895 if (atc_dma_cyclic_check_values(reg_width, buf_addr,
896 period_len, direction))
900 for (i = 0; i < periods; i++) {
903 desc = atc_desc_get(atchan);
907 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
908 reg_width, period_len, direction))
911 atc_desc_chain(&first, &prev, desc);
915 prev->
lli.dscr = first->
txd.phys;
919 first->
len = buf_len;
924 dev_err(chan2dev(chan),
"not enough descriptors available\n");
925 atc_desc_put(atchan, first);
931 static int set_runtime_config(
struct dma_chan *chan,
959 dev_vdbg(chan2dev(chan),
"atc_control (%d)\n", cmd);
967 spin_unlock_irqrestore(&atchan->
lock, flags);
969 if (!atc_chan_is_paused(atchan))
977 spin_unlock_irqrestore(&atchan->
lock, flags);
1001 atc_chain_complete(atchan, desc);
1007 spin_unlock_irqrestore(&atchan->lock, flags);
1028 atc_tx_status(
struct dma_chan *chan,
1032 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1035 unsigned long flags;
1040 ret = dma_cookie_status(chan, cookie, txstate);
1042 atc_cleanup_descriptors(atchan);
1044 ret = dma_cookie_status(chan, cookie, txstate);
1048 last_used = chan->
cookie;
1050 spin_unlock_irqrestore(&atchan->
lock, flags);
1053 dma_set_residue(txstate, atc_first_active(atchan)->len);
1055 if (atc_chan_is_paused(atchan))
1058 dev_vdbg(chan2dev(chan),
"tx_status %d: cookie = %d (d%d, u%d)\n",
1059 ret, cookie, last_complete ? last_complete : 0,
1060 last_used ? last_used : 0);
1069 static void atc_issue_pending(
struct dma_chan *chan)
1071 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1072 unsigned long flags;
1074 dev_vdbg(chan2dev(chan),
"issue_pending\n");
1077 if (atc_chan_is_cyclic(atchan))
1081 if (!atc_chan_is_enabled(atchan)) {
1082 atc_advance_work(atchan);
1084 spin_unlock_irqrestore(&atchan->
lock, flags);
1094 static int atc_alloc_chan_resources(
struct dma_chan *chan)
1096 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1100 unsigned long flags;
1105 dev_vdbg(chan2dev(chan),
"alloc_chan_resources\n");
1108 if (atc_chan_is_enabled(atchan)) {
1109 dev_dbg(chan2dev(chan),
"DMA channel not idle ?\n");
1134 for (i = 0; i < init_nr_desc_per_channel; i++) {
1135 desc = atc_alloc_descriptor(chan,
GFP_KERNEL);
1138 "Only %d initial descriptors\n", i);
1146 list_splice(&tmp_list, &atchan->
free_list);
1147 dma_cookie_init(chan);
1148 spin_unlock_irqrestore(&atchan->
lock, flags);
1154 "alloc_chan_resources: allocated %d descriptors\n",
1164 static void atc_free_chan_resources(
struct dma_chan *chan)
1166 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1171 dev_dbg(chan2dev(chan),
"free_chan_resources: (descs allocated=%u)\n",
1177 BUG_ON(atc_chan_is_enabled(atchan));
1180 dev_vdbg(chan2dev(chan),
" freeing descriptor %p\n", desc);
1189 dev_vdbg(chan2dev(chan),
"free_chan_resources: done\n");
1203 #if defined(CONFIG_OF)
1204 static const struct of_device_id atmel_dma_dt_ids[] = {
1207 .data = &at91sam9rl_config,
1209 .compatible =
"atmel,at91sam9g45-dma",
1210 .data = &at91sam9g45_config,
1221 .name =
"at91sam9rl_dma",
1222 .driver_data = (
unsigned long) &at91sam9rl_config,
1224 .name =
"at91sam9g45_dma",
1225 .driver_data = (
unsigned long) &at91sam9g45_config,
1234 if (pdev->
dev.of_node) {
1249 static void at_dma_off(
struct at_dma *atdma)
1277 plat_dat = at_dma_get_driver_data(pdev);
1289 size =
sizeof(
struct at_dma);
1299 size = resource_size(io);
1312 if (IS_ERR(atdma->
clk)) {
1313 err = PTR_ERR(atdma->
clk);
1321 err =
request_irq(irq, at_dma_interrupt, 0,
"at_hdmac", atdma);
1325 platform_set_drvdata(pdev, atdma);
1332 dev_err(&pdev->
dev,
"No memory for descriptors dma pool\n");
1334 goto err_pool_create;
1353 atchan->
mask = 1 <<
i;
1356 INIT_LIST_HEAD(&atchan->
queue);
1360 (
unsigned long)atchan);
1361 atc_enable_chan_irq(atdma, i);
1365 atdma->
dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1366 atdma->
dma_common.device_free_chan_resources = atc_free_chan_resources;
1367 atdma->
dma_common.device_tx_status = atc_tx_status;
1368 atdma->
dma_common.device_issue_pending = atc_issue_pending;
1373 atdma->
dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1376 atdma->
dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1379 atdma->
dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1380 atdma->
dma_common.device_control = atc_control;
1385 dev_info(&pdev->
dev,
"Atmel AHB DMA Controller ( %s%s), %d channels\n",
1395 platform_set_drvdata(pdev,
NULL);
1412 struct at_dma *atdma = platform_get_drvdata(pdev);
1420 platform_set_drvdata(pdev,
NULL);
1425 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1428 atc_disable_chan_irq(atdma, chan->
chan_id);
1429 tasklet_disable(&atchan->
tasklet);
1451 struct at_dma *atdma = platform_get_drvdata(pdev);
1453 at_dma_off(platform_get_drvdata(pdev));
1457 static int at_dma_prepare(
struct device *
dev)
1460 struct at_dma *atdma = platform_get_drvdata(pdev);
1465 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1467 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1473 static void atc_suspend_cyclic(
struct at_dma_chan *atchan)
1479 if (!atc_chan_is_paused(atchan)) {
1481 "cyclic channel not paused, should be done by channel user\n");
1489 vdbg_dump_regs(atchan);
1492 static int at_dma_suspend_noirq(
struct device *dev)
1495 struct at_dma *atdma = platform_get_drvdata(pdev);
1501 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1503 if (atc_chan_is_cyclic(atchan))
1504 atc_suspend_cyclic(atchan);
1515 static void atc_resume_cyclic(
struct at_dma_chan *atchan)
1531 vdbg_dump_regs(atchan);
1534 static int at_dma_resume_noirq(
struct device *dev)
1537 struct at_dma *atdma = platform_get_drvdata(pdev);
1552 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1555 if (atc_chan_is_cyclic(atchan))
1556 atc_resume_cyclic(atchan);
1561 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1562 .prepare = at_dma_prepare,
1563 .suspend_noirq = at_dma_suspend_noirq,
1564 .resume_noirq = at_dma_resume_noirq,
1569 .shutdown = at_dma_shutdown,
1570 .id_table = atdma_devtypes,
1573 .pm = &at_dma_dev_pm_ops,
1578 static int __init at_dma_init(
void)
1584 static void __exit at_dma_exit(
void)