12 #include <linux/bitops.h>
22 #include <linux/module.h>
24 #include <linux/slab.h>
49 #define DWC_DEFAULT_CTLLO(_chan) ({ \
50 struct dw_dma_slave *__slave = (_chan->private); \
51 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
52 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
53 int _dms = dwc_get_dms(__slave); \
54 int _sms = dwc_get_sms(__slave); \
55 u8 _smsize = __slave ? _sconfig->src_maxburst : \
57 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
60 (DWC_CTLL_DST_MSIZE(_dmsize) \
61 | DWC_CTLL_SRC_MSIZE(_smsize) \
64 | DWC_CTLL_DMS(_dms) \
65 | DWC_CTLL_SMS(_sms)); \
73 #define NR_DESCS_PER_CHANNEL 64
87 return &chan->
dev->device;
91 return chan->
dev->device.parent;
109 if (async_tx_test_ack(&desc->
txd)) {
114 dev_dbg(chan2dev(&dwc->
chan),
"desc %p not ACKed\n", desc);
116 spin_unlock_irqrestore(&dwc->
lock, flags);
118 dev_vdbg(chan2dev(&dwc->
chan),
"scanned %u descriptors on freelist\n", i);
128 dma_sync_single_for_cpu(chan2parent(&dwc->
chan),
131 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
147 dwc_sync_desc_for_cpu(dwc, desc);
152 "moving child desc %
p to freelist\
n",
154 list_splice_init(&desc->
tx_list, &dwc->free_list);
155 dev_vdbg(chan2dev(&dwc->chan), "moving desc %
p to freelist\n", desc);
156 list_add(&desc->
desc_node, &dwc->free_list);
157 spin_unlock_irqrestore(&dwc->lock, flags);
163 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
168 if (dwc->initialized ==
true)
194 dwc->initialized =
true;
199 static inline unsigned int dwc_fast_fls(
unsigned long long v)
214 static inline void dwc_dump_chan_regs(
struct dw_dma_chan *dwc)
217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
236 static inline void dwc_do_single_block(
struct dw_dma_chan *dwc,
239 struct dw_dma *dw = to_dw_dma(dwc->
chan.device);
256 struct dw_dma *dw = to_dw_dma(dwc->
chan.device);
257 unsigned long was_soft_llp;
262 "BUG: Attempted to start non-idle channel\n");
263 dwc_dump_chan_regs(dwc);
274 "BUG: Attempted to start new LLP transfer "
275 "inside ongoing one\n");
284 dwc_do_single_block(dwc, first);
302 bool callback_required)
313 dma_cookie_complete(txd);
314 if (callback_required) {
319 dwc_sync_desc_for_cpu(dwc, desc);
323 async_tx_ack(&child->txd);
324 async_tx_ack(&desc->txd);
326 list_splice_init(&desc->tx_list, &dwc->free_list);
327 list_move(&desc->desc_node, &dwc->free_list);
329 if (!dwc->chan.private) {
342 desc->
len, DMA_TO_DEVICE);
345 desc->
len, DMA_TO_DEVICE);
349 spin_unlock_irqrestore(&dwc->
lock, flags);
351 if (callback_required && callback)
364 "BUG: XFER bit set, but channel not idle!\n");
367 dwc_chan_disable(dw, dwc);
375 if (!list_empty(&dwc->
queue)) {
377 dwc_dostart(dwc, dwc_first_active(dwc));
380 spin_unlock_irqrestore(&dwc->
lock, flags);
383 dwc_descriptor_complete(dwc, desc,
true);
398 if (status_xfer & dwc->mask) {
401 spin_unlock_irqrestore(&dwc->lock, flags);
403 dwc_complete_all(dw, dwc);
407 if (list_empty(&dwc->active_list)) {
408 spin_unlock_irqrestore(&dwc->lock, flags);
412 dev_vdbg(chan2dev(&dwc->chan),
"%s: llp=0x%llx\n", __func__,
413 (
unsigned long long)llp);
417 if (desc->
txd.phys == llp) {
418 spin_unlock_irqrestore(&dwc->lock, flags);
423 if (desc->
lli.llp == llp) {
425 spin_unlock_irqrestore(&dwc->lock, flags);
430 if (child->lli.llp == llp) {
432 spin_unlock_irqrestore(&dwc->lock, flags);
440 spin_unlock_irqrestore(&dwc->lock, flags);
441 dwc_descriptor_complete(dwc, desc,
true);
446 "BUG: All descriptors done, but channel not idle!\n");
449 dwc_chan_disable(dw, dwc);
451 if (!list_empty(&dwc->queue)) {
452 list_move(dwc->queue.next, &dwc->active_list);
453 dwc_dostart(dwc, dwc_first_active(dwc));
455 spin_unlock_irqrestore(&dwc->lock, flags);
461 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
471 dwc_scan_descriptors(dw, dwc);
480 bad_desc = dwc_first_active(dwc);
487 dwc_dostart(dwc, dwc_first_active(dwc));
497 "Bad descriptor submitted for DMA!\n");
499 " cookie: %d\n", bad_desc->
txd.cookie);
500 dwc_dump_lli(dwc, &bad_desc->
lli);
502 dwc_dump_lli(dwc, &child->lli);
504 spin_unlock_irqrestore(&dwc->lock, flags);
507 dwc_descriptor_complete(dwc, bad_desc, true);
528 u32 status_err,
u32 status_xfer)
534 void *callback_param;
536 dev_vdbg(chan2dev(&dwc->
chan),
"new cyclic period llp 0x%08x\n",
539 callback = dwc->
cdesc->period_callback;
540 callback_param = dwc->
cdesc->period_callback_param;
554 dev_err(chan2dev(&dwc->
chan),
"cyclic DMA unexpected %s "
555 "interrupt, stopping DMA transfer\n",
556 status_xfer ?
"xfer" :
"error");
560 dwc_dump_chan_regs(dwc);
562 dwc_chan_disable(dw, dwc);
572 for (i = 0; i < dwc->
cdesc->periods; i++)
573 dwc_dump_lli(dwc, &dwc->
cdesc->desc[i]->lli);
575 spin_unlock_irqrestore(&dwc->
lock, flags);
581 static void dw_dma_tasklet(
unsigned long data)
592 dev_vdbg(dw->
dma.dev,
"%s: status_err=%x\n", __func__, status_err);
594 for (i = 0; i < dw->
dma.chancnt; i++) {
597 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
598 else if (status_err & (1 << i))
599 dwc_handle_error(dw, dwc);
600 else if (status_xfer & (1 << i)) {
617 dwc_do_single_block(dwc, desc);
619 spin_unlock_irqrestore(&dwc->
lock, flags);
626 spin_unlock_irqrestore(&dwc->
lock, flags);
628 dwc_scan_descriptors(dw, dwc);
644 dev_vdbg(dw->
dma.dev,
"%s: status=0x%x\n", __func__,
657 "BUG: Unexpected interrupts pending: 0x%x\n",
667 tasklet_schedule(&dw->
tasklet);
676 struct dw_desc *desc = txd_to_dw_desc(tx);
682 cookie = dma_cookie_assign(tx);
690 dev_vdbg(chan2dev(tx->
chan),
"%s: started %u\n", __func__,
693 dwc_dostart(dwc, dwc_first_active(dwc));
695 dev_vdbg(chan2dev(tx->
chan),
"%s: queued %u\n", __func__,
701 spin_unlock_irqrestore(&dwc->
lock, flags);
708 size_t len,
unsigned long flags)
717 unsigned int src_width;
718 unsigned int dst_width;
723 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
724 (
unsigned long long)dest, (
unsigned long long)src,
728 dev_dbg(chan2dev(chan),
"%s: length is zero!\n", __func__);
732 data_width =
min_t(
unsigned int, dwc->
dw->data_width[dwc_get_sms(dws)],
733 dwc->
dw->data_width[dwc_get_dms(dws)]);
735 src_width = dst_width =
min_t(
unsigned int, data_width,
736 dwc_fast_fls(src | dest | len));
746 for (offset = 0; offset <
len; offset += xfer_count << src_width) {
747 xfer_count =
min_t(
size_t, (len - offset) >> src_width,
750 desc = dwc_desc_get(dwc);
756 desc->
lli.ctllo = ctllo;
757 desc->
lli.ctlhi = xfer_count;
762 prev->
lli.llp = desc->
txd.phys;
764 prev->
txd.phys,
sizeof(prev->
lli),
779 prev->
txd.phys,
sizeof(prev->
lli),
788 dwc_desc_put(dwc, first);
795 unsigned long flags,
void *
context)
804 unsigned int reg_width;
805 unsigned int mem_width;
811 dev_vdbg(chan2dev(chan),
"%s\n", __func__);
830 data_width = dwc->
dw->data_width[dwc_get_sms(dws)];
839 mem_width =
min_t(
unsigned int,
840 data_width, dwc_fast_fls(mem | len));
842 slave_sg_todev_fill_desc:
843 desc = dwc_desc_get(dwc);
846 "not enough descriptors available\n");
862 desc->
lli.ctlhi = dlen >> mem_width;
867 prev->
lli.llp = desc->
txd.phys;
879 goto slave_sg_todev_fill_desc;
893 data_width = dwc->
dw->data_width[dwc_get_dms(dws)];
902 mem_width =
min_t(
unsigned int,
903 data_width, dwc_fast_fls(mem | len));
905 slave_sg_fromdev_fill_desc:
906 desc = dwc_desc_get(dwc);
909 "not enough descriptors available\n");
924 desc->
lli.ctlhi = dlen >> reg_width;
929 prev->
lli.llp = desc->
txd.phys;
941 goto slave_sg_fromdev_fill_desc;
948 if (flags & DMA_PREP_INTERRUPT)
954 prev->
txd.phys,
sizeof(prev->
lli),
962 dwc_desc_put(dwc, first);
974 static inline void convert_burst(
u32 *maxburst)
977 *maxburst = fls(*maxburst) - 2;
1005 unsigned long flags;
1018 spin_unlock_irqrestore(&dwc->
lock, flags);
1029 spin_unlock_irqrestore(&dwc->
lock, flags);
1035 dwc_chan_disable(dw, dwc);
1043 spin_unlock_irqrestore(&dwc->
lock, flags);
1047 dwc_descriptor_complete(dwc, desc,
false);
1058 dwc_tx_status(
struct dma_chan *chan,
1065 ret = dma_cookie_status(chan, cookie, txstate);
1067 dwc_scan_descriptors(to_dw_dma(chan->
device), dwc);
1069 ret = dma_cookie_status(chan, cookie, txstate);
1073 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1081 static void dwc_issue_pending(
struct dma_chan *chan)
1085 if (!list_empty(&dwc->
queue))
1086 dwc_scan_descriptors(to_dw_dma(chan->
device), dwc);
1089 static int dwc_alloc_chan_resources(
struct dma_chan *chan)
1095 unsigned long flags;
1097 dev_vdbg(chan2dev(chan),
"%s\n", __func__);
1101 dev_dbg(chan2dev(chan),
"DMA channel not idle?\n");
1105 dma_cookie_init(chan);
1116 spin_unlock_irqrestore(&dwc->
lock, flags);
1121 "only allocated %d descriptors\n", i);
1126 INIT_LIST_HEAD(&desc->
tx_list);
1128 desc->
txd.tx_submit = dwc_tx_submit;
1131 sizeof(desc->
lli), DMA_TO_DEVICE);
1132 dwc_desc_put(dwc, desc);
1138 spin_unlock_irqrestore(&dwc->
lock, flags);
1140 dev_dbg(chan2dev(chan),
"%s: allocated %d descriptors\n", __func__, i);
1145 static void dwc_free_chan_resources(
struct dma_chan *chan)
1150 unsigned long flags;
1153 dev_dbg(chan2dev(chan),
"%s: descs allocated=%u\n", __func__,
1170 spin_unlock_irqrestore(&dwc->
lock, flags);
1173 dev_vdbg(chan2dev(chan),
" freeing descriptor %p\n", desc);
1175 sizeof(desc->
lli), DMA_TO_DEVICE);
1179 dev_vdbg(chan2dev(chan),
"%s: done\n", __func__);
1194 struct dw_dma *dw = to_dw_dma(dwc->
chan.device);
1195 unsigned long flags;
1198 dev_err(chan2dev(&dwc->
chan),
"missing prep for cyclic DMA\n");
1207 "BUG: Attempted to start non-idle channel\n");
1208 dwc_dump_chan_regs(dwc);
1209 spin_unlock_irqrestore(&dwc->
lock, flags);
1223 spin_unlock_irqrestore(&dwc->
lock, flags);
1238 struct dw_dma *dw = to_dw_dma(dwc->
chan.device);
1239 unsigned long flags;
1243 dwc_chan_disable(dw, dwc);
1245 spin_unlock_irqrestore(&dwc->
lock, flags);
1270 unsigned long was_cyclic;
1271 unsigned int reg_width;
1272 unsigned int periods;
1274 unsigned long flags;
1278 spin_unlock_irqrestore(&dwc->
lock, flags);
1280 "channel doesn't support LLP transfers\n");
1285 spin_unlock_irqrestore(&dwc->
lock, flags);
1287 "queue and/or active list are not empty\n");
1288 return ERR_PTR(-
EBUSY);
1292 spin_unlock_irqrestore(&dwc->
lock, flags);
1295 "channel already prepared for cyclic DMA\n");
1296 return ERR_PTR(-
EBUSY);
1299 retval = ERR_PTR(-
EINVAL);
1306 periods = buf_len / period_len;
1309 if (period_len > (dwc->
block_size << reg_width))
1311 if (
unlikely(period_len & ((1 << reg_width) - 1)))
1313 if (
unlikely(buf_addr & ((1 << reg_width) - 1)))
1318 retval = ERR_PTR(-
ENOMEM);
1331 for (i = 0; i < periods; i++) {
1332 desc = dwc_desc_get(dwc);
1334 goto out_err_desc_get;
1336 switch (direction) {
1339 desc->
lli.sar = buf_addr + (period_len *
i);
1353 desc->
lli.dar = buf_addr + (period_len *
i);
1371 desc->
lli.ctlhi = (period_len >> reg_width);
1372 cdesc->
desc[
i] = desc;
1375 last->
lli.llp = desc->
txd.phys;
1377 last->
txd.phys,
sizeof(last->
lli),
1385 last->
lli.llp = cdesc->
desc[0]->txd.phys;
1387 sizeof(last->
lli), DMA_TO_DEVICE);
1389 dev_dbg(chan2dev(&dwc->
chan),
"cyclic prepared buf 0x%llx len %zu "
1390 "period %zu periods %d\n", (
unsigned long long)buf_addr,
1391 buf_len, period_len, periods);
1400 dwc_desc_put(dwc, cdesc->
desc[i]);
1416 struct dw_dma *dw = to_dw_dma(dwc->
chan.device);
1419 unsigned long flags;
1428 dwc_chan_disable(dw, dwc);
1433 spin_unlock_irqrestore(&dwc->
lock, flags);
1435 for (i = 0; i < cdesc->
periods; i++)
1436 dwc_desc_put(dwc, cdesc->
desc[i]);
1447 static void dw_dma_off(
struct dw_dma *dw)
1461 for (i = 0; i < dw->
dma.chancnt; i++)
1462 dw->
chan[i].initialized =
false;
1473 unsigned int dw_params;
1474 unsigned int nr_channels;
1475 unsigned int max_blk_size = 0;
1480 pdata = dev_get_platdata(&pdev->
dev);
1510 if (IS_ERR(dw->
clk))
1511 return PTR_ERR(dw->
clk);
1512 clk_prepare_enable(dw->
clk);
1518 max_blk_size =
dma_readl(dw, MAX_BLK_SIZE);
1539 err = devm_request_irq(&pdev->
dev, irq, dw_dma_interrupt, 0,
1544 platform_set_drvdata(pdev, dw);
1548 INIT_LIST_HEAD(&dw->
dma.channels);
1549 for (i = 0; i < nr_channels; i++) {
1551 int r = nr_channels - i - 1;
1554 dma_cookie_init(&dwc->
chan);
1559 list_add(&dwc->
chan.device_node, &dw->
dma.channels);
1567 dwc->
ch_regs = &__dw_regs(dw)->CHAN[
i];
1572 INIT_LIST_HEAD(&dwc->
queue);
1581 unsigned int dwc_params;
1590 (4 << ((max_blk_size >> 4 *
i) & 0xf)) - 1;
1615 dw->
dma.dev = &pdev->
dev;
1616 dw->
dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1617 dw->
dma.device_free_chan_resources = dwc_free_chan_resources;
1619 dw->
dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1621 dw->
dma.device_prep_slave_sg = dwc_prep_slave_sg;
1622 dw->
dma.device_control = dwc_control;
1624 dw->
dma.device_tx_status = dwc_tx_status;
1625 dw->
dma.device_issue_pending = dwc_issue_pending;
1630 dev_name(&pdev->
dev), nr_channels);
1639 struct dw_dma *dw = platform_get_drvdata(pdev);
1658 struct dw_dma *dw = platform_get_drvdata(pdev);
1660 dw_dma_off(platform_get_drvdata(pdev));
1661 clk_disable_unprepare(dw->
clk);
1664 static int dw_suspend_noirq(
struct device *
dev)
1667 struct dw_dma *dw = platform_get_drvdata(pdev);
1669 dw_dma_off(platform_get_drvdata(pdev));
1670 clk_disable_unprepare(dw->
clk);
1675 static int dw_resume_noirq(
struct device *dev)
1678 struct dw_dma *dw = platform_get_drvdata(pdev);
1680 clk_prepare_enable(dw->
clk);
1685 static const struct dev_pm_ops dw_dev_pm_ops = {
1686 .suspend_noirq = dw_suspend_noirq,
1687 .resume_noirq = dw_resume_noirq,
1688 .freeze_noirq = dw_suspend_noirq,
1689 .thaw_noirq = dw_resume_noirq,
1690 .restore_noirq = dw_resume_noirq,
1691 .poweroff_noirq = dw_suspend_noirq,
1704 .shutdown = dw_shutdown,
1707 .pm = &dw_dev_pm_ops,
1712 static int __init dw_init(
void)
1718 static void __exit dw_exit(
void)