14 #include <linux/module.h>
16 #include <linux/slab.h>
38 #define channel64_readq(dc, name) \
39 __raw_readq(&(__dma_regs(dc)->name))
40 #define channel64_writeq(dc, name, val) \
41 __raw_writeq((val), &(__dma_regs(dc)->name))
42 #define channel64_readl(dc, name) \
43 __raw_readl(&(__dma_regs(dc)->name))
44 #define channel64_writel(dc, name, val) \
45 __raw_writel((val), &(__dma_regs(dc)->name))
47 #define channel32_readl(dc, name) \
48 __raw_readl(&(__dma_regs32(dc)->name))
49 #define channel32_writel(dc, name, val) \
50 __raw_writel((val), &(__dma_regs32(dc)->name))
52 #define channel_readq(dc, name) channel64_readq(dc, name)
53 #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
54 #define channel_readl(dc, name) \
56 channel64_readl(dc, name) : channel32_readl(dc, name))
57 #define channel_writel(dc, name, val) \
59 channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
63 if (
sizeof(__dma_regs(dc)->
CHAR) ==
sizeof(
u64))
71 if (
sizeof(__dma_regs(dc)->
CHAR) ==
sizeof(
u64))
77 static void channel64_clear_CHAR(
const struct txx9dmac_chan *dc)
79 #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR)
90 return channel64_read_CHAR(dc);
98 channel64_write_CHAR(dc, val);
115 #define dma64_readl(ddev, name) \
116 __raw_readl(&(__txx9dmac_regs(ddev)->name))
117 #define dma64_writel(ddev, name, val) \
118 __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
120 #define dma32_readl(ddev, name) \
121 __raw_readl(&(__txx9dmac_regs32(ddev)->name))
122 #define dma32_writel(ddev, name, val) \
123 __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
125 #define dma_readl(ddev, name) \
126 (__is_dmac64(ddev) ? \
127 dma64_readl(ddev, name) : dma32_readl(ddev, name))
128 #define dma_writel(ddev, name, val) \
129 (__is_dmac64(ddev) ? \
130 dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
134 return &chan->
dev->device;
138 return chan->
dev->device.parent;
162 #define TXX9_DMA_MAX_COUNT 0x04000000
164 #define TXX9_DMA_INITIAL_DESC_COUNT 64
185 if (!list_empty(&desc->
tx_list))
198 desc = kzalloc(
sizeof(*desc), flags);
201 INIT_LIST_HEAD(&desc->
tx_list);
203 desc->
txd.tx_submit = txx9dmac_tx_submit;
217 spin_lock_bh(&dc->
lock);
219 if (async_tx_test_ack(&desc->
txd)) {
224 dev_dbg(chan2dev(&dc->
chan),
"desc %p not ACKed\n", desc);
227 spin_unlock_bh(&dc->
lock);
229 dev_vdbg(chan2dev(&dc->
chan),
"scanned %u descriptors on freelist\n",
234 spin_lock_bh(&dc->
lock);
236 spin_unlock_bh(&dc->
lock);
239 "not enough descriptors available\n");
244 static void txx9dmac_sync_desc_for_cpu(
struct txx9dmac_chan *dc,
251 dma_sync_single_for_cpu(chan2parent(&dc->
chan),
252 child->
txd.
phys, ddev->descsize,
254 dma_sync_single_for_cpu(chan2parent(&dc->chan),
255 desc->
txd.phys, ddev->descsize,
269 txx9dmac_sync_desc_for_cpu(dc, desc);
271 spin_lock_bh(&dc->lock);
274 "moving child desc %
p to freelist\
n",
276 list_splice_init(&desc->
tx_list, &dc->free_list);
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %
p to freelist\n",
279 list_add(&desc->desc_node, &dc->free_list);
280 spin_unlock_bh(&dc->lock);
290 " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
291 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
292 (
u64)channel64_read_CHAR(dc),
302 " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
303 " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
318 channel64_clear_CHAR(dc);
341 first->
txd.cookie, first);
345 "BUG: Attempted to start non-idle channel\n");
346 txx9dmac_dump_regs(dc);
371 channel64_write_CHAR(dc, first->
txd.phys);
389 if (txx9_dma_have_SMPCHN()) {
411 dev_vdbg(chan2dev(&dc->
chan),
"descriptor %u %p complete\n",
414 dma_cookie_complete(txd);
418 txx9dmac_sync_desc_for_cpu(dc, desc);
420 list_move(&desc->desc_node, &dc->
free_list);
425 dmaaddr = is_dmac64(dc) ?
435 dmaaddr = is_dmac64(dc) ?
439 dmaaddr, desc->
len, DMA_TO_DEVICE);
442 dmaaddr, desc->
len, DMA_TO_DEVICE);
461 BUG_ON(!list_empty(list));
463 desc = txx9dmac_first_queued(dc);
465 desc_write_CHAR(dc, prev, desc->
txd.phys);
470 prev = txx9dmac_last_child(desc);
471 list_move_tail(&desc->desc_node, list);
474 !txx9dmac_chan_INTENT(dc))
476 }
while (!list_empty(&dc->
queue));
489 if (!list_empty(&dc->
queue)) {
491 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
495 txx9dmac_descriptor_complete(dc, desc);
502 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
503 dev_crit(chan2dev(&dc->chan),
504 " desc: ch%#llx s%#llx d%#llx c%#x\n",
505 (
u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
507 dev_crit(chan2dev(&dc->chan),
508 " desc: ch%#llx s%#llx d%#llx c%#x"
509 " si%#x di%#x cc%#x cs%#x\n",
510 (
u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
511 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
515 #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
516 dev_crit(chan2dev(&dc->chan),
517 " desc: ch%#x s%#x d%#x c%#x\n",
518 d->CHAR, d->SAR, d->DAR, d->CNTR);
520 dev_crit(chan2dev(&dc->chan),
521 " desc: ch%#x s%#x d%#x c%#x"
522 " si%#x di%#x cc%#x cs%#x\n",
523 d->CHAR, d->SAR, d->DAR, d->CNTR,
524 d->SAIR, d->DAIR, d->CCR, d->CSR);
540 dev_crit(chan2dev(&dc->
chan),
"Abnormal Chain Completion\n");
541 txx9dmac_dump_regs(dc);
543 bad_desc = txx9dmac_first_active(dc);
544 list_del_init(&bad_desc->desc_node);
555 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
557 dev_crit(chan2dev(&dc->
chan),
558 "Bad descriptor submitted for DMA! (cookie: %d)\n",
559 bad_desc->
txd.cookie);
560 txx9dmac_dump_desc(dc, &bad_desc->
hwdesc);
562 txx9dmac_dump_desc(dc, &child->
hwdesc);
564 txx9dmac_descriptor_complete(dc, bad_desc);
575 chain = channel64_read_CHAR(dc);
586 txx9dmac_complete_all(dc);
592 dev_vdbg(chan2dev(&dc->chan),
"scan_descriptors: char=%#llx\n",
596 if (desc_read_CHAR(dc, desc) == chain) {
604 if (desc_read_CHAR(dc, child) == chain) {
615 txx9dmac_descriptor_complete(dc, desc);
619 txx9dmac_handle_error(dc, csr);
624 "BUG: All descriptors done, but channel not idle!\n");
627 txx9dmac_reset_chan(dc);
629 if (!list_empty(&dc->queue)) {
630 txx9dmac_dequeue(dc, &dc->active_list);
631 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
635 static void txx9dmac_chan_tasklet(
unsigned long data)
643 dev_vdbg(chan2dev(&dc->
chan),
"tasklet: status=%x\n", csr);
645 spin_lock(&dc->
lock);
648 txx9dmac_scan_descriptors(dc);
649 spin_unlock(&dc->
lock);
659 dev_vdbg(chan2dev(&dc->
chan),
"interrupt: status=%#x\n",
662 tasklet_schedule(&dc->
tasklet);
672 static void txx9dmac_tasklet(
unsigned long data)
683 dev_vdbg(ddev->
chan[0]->dma.dev,
"tasklet: mcr=%x\n", mcr);
685 if ((mcr >> (24 + i)) & 0x11) {
690 spin_lock(&dc->
lock);
693 txx9dmac_scan_descriptors(dc);
694 spin_unlock(&dc->
lock);
702 static irqreturn_t txx9dmac_interrupt(
int irq,
void *dev_id)
706 dev_vdbg(ddev->
chan[0]->dma.dev,
"interrupt: status=%#x\n",
709 tasklet_schedule(&ddev->
tasklet);
727 spin_lock_bh(&dc->
lock);
728 cookie = dma_cookie_assign(tx);
730 dev_vdbg(chan2dev(tx->
chan),
"tx_submit: queued %u %p\n",
731 desc->
txd.cookie, desc);
734 spin_unlock_bh(&dc->
lock);
741 size_t len,
unsigned long flags)
751 dev_vdbg(chan2dev(chan),
"prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
752 (
u64)dest, (
u64)src, len, flags);
755 dev_dbg(chan2dev(chan),
"prep_dma_memcpy: length is zero!\n");
761 for (offset = 0; offset <
len; offset += xfer_count) {
767 if (__is_dmac64(ddev)) {
768 if (xfer_count > 0x100 &&
769 (xfer_count & 0xff) >= 0xfa &&
770 (xfer_count & 0xff) <= 0xff)
773 if (xfer_count > 0x80 &&
774 (xfer_count & 0x7f) >= 0x7e &&
775 (xfer_count & 0x7f) <= 0x7f)
779 desc = txx9dmac_desc_get(dc);
781 txx9dmac_desc_put(dc, first);
785 if (__is_dmac64(ddev)) {
788 desc->
hwdesc.CNTR = xfer_count;
789 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
795 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
809 desc_write_CHAR(dc, prev, desc->
txd.phys);
820 txx9dmac_desc_set_INTENT(ddev, prev);
822 desc_write_CHAR(dc, prev, 0);
836 unsigned long flags,
void *
context)
846 dev_vdbg(chan2dev(chan),
"prep_dma_slave\n");
863 desc = txx9dmac_desc_get(dc);
865 txx9dmac_desc_put(dc, first);
871 if (__is_dmac64(ddev)) {
897 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
903 desc_write_CHAR(dc, prev, desc->
txd.phys);
914 if (flags & DMA_PREP_INTERRUPT)
915 txx9dmac_desc_set_INTENT(ddev, prev);
917 desc_write_CHAR(dc, prev, 0);
939 dev_vdbg(chan2dev(chan),
"terminate_all\n");
940 spin_lock_bh(&dc->
lock);
942 txx9dmac_reset_chan(dc);
945 list_splice_init(&dc->
queue, &list);
948 spin_unlock_bh(&dc->
lock);
952 txx9dmac_descriptor_complete(dc, desc);
964 ret = dma_cookie_status(chan, cookie, txstate);
966 spin_lock_bh(&dc->
lock);
967 txx9dmac_scan_descriptors(dc);
968 spin_unlock_bh(&dc->
lock);
970 ret = dma_cookie_status(chan, cookie, txstate);
983 prev = txx9dmac_last_child(prev);
984 txx9dmac_dequeue(dc, &list);
986 desc_write_CHAR(dc, prev, desc->
txd.phys);
992 channel_read_CHAR(dc) == prev->
txd.phys)
994 channel_write_CHAR(dc, desc->
txd.phys);
998 static void txx9dmac_issue_pending(
struct dma_chan *chan)
1002 spin_lock_bh(&dc->
lock);
1005 txx9dmac_scan_descriptors(dc);
1006 if (!list_empty(&dc->
queue)) {
1009 txx9dmac_dostart(dc, txx9dmac_first_active(dc));
1010 }
else if (txx9_dma_have_SMPCHN()) {
1013 if (!(prev->
txd.flags & DMA_PREP_INTERRUPT) ||
1014 txx9dmac_chan_INTENT(dc))
1015 txx9dmac_chain_dynamic(dc, prev);
1019 spin_unlock_bh(&dc->
lock);
1022 static int txx9dmac_alloc_chan_resources(
struct dma_chan *chan)
1029 dev_vdbg(chan2dev(chan),
"alloc_chan_resources\n");
1033 dev_dbg(chan2dev(chan),
"DMA channel not idle?\n");
1037 dma_cookie_init(chan);
1040 txx9dmac_chan_set_SMPCHN(dc);
1043 if (chan->
device->device_prep_dma_memcpy) {
1053 txx9dmac_chan_set_INTENT(dc);
1056 spin_lock_bh(&dc->
lock);
1059 spin_unlock_bh(&dc->
lock);
1064 "only allocated %d descriptors\n", i);
1065 spin_lock_bh(&dc->
lock);
1068 txx9dmac_desc_put(dc, desc);
1070 spin_lock_bh(&dc->
lock);
1073 spin_unlock_bh(&dc->
lock);
1076 "alloc_chan_resources allocated %d descriptors\n", i);
1081 static void txx9dmac_free_chan_resources(
struct dma_chan *chan)
1088 dev_dbg(chan2dev(chan),
"free_chan_resources (descs allocated=%u)\n",
1096 spin_lock_bh(&dc->
lock);
1097 list_splice_init(&dc->
free_list, &list);
1099 spin_unlock_bh(&dc->
lock);
1102 dev_vdbg(chan2dev(chan),
" freeing descriptor %p\n", desc);
1108 dev_vdbg(chan2dev(chan),
"free_chan_resources done\n");
1133 dc->
dma.dev = &pdev->
dev;
1134 dc->
dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1135 dc->
dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1136 dc->
dma.device_control = txx9dmac_control;
1137 dc->
dma.device_tx_status = txx9dmac_tx_status;
1138 dc->
dma.device_issue_pending = txx9dmac_issue_pending;
1140 dc->
dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
1143 dc->
dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
1148 INIT_LIST_HEAD(&dc->
dma.channels);
1149 dc->
ddev = platform_get_drvdata(dmac_dev);
1150 if (dc->
ddev->irq < 0) {
1157 err = devm_request_irq(&pdev->
dev, dc->
irq,
1158 txx9dmac_chan_interrupt, 0, dev_name(&pdev->
dev), dc);
1166 dma_cookie_init(&dc->
chan);
1169 dc->
ch_regs = &__txx9dmac_regs(dc->
ddev)->CHAN[ch];
1171 dc->
ch_regs = &__txx9dmac_regs32(dc->
ddev)->CHAN[ch];
1175 INIT_LIST_HEAD(&dc->
queue);
1178 txx9dmac_reset_chan(dc);
1180 platform_set_drvdata(pdev, dc);
1185 dev_dbg(&pdev->
dev,
"TXx9 DMA Channel (dma%d%s%s)\n",
1221 dev_name(&pdev->
dev)))
1228 if (__is_dmac64(ddev))
1237 if (ddev->
irq >= 0) {
1239 (
unsigned long)ddev);
1240 err = devm_request_irq(&pdev->
dev, ddev->
irq,
1241 txx9dmac_interrupt, 0, dev_name(&pdev->
dev), ddev);
1251 platform_set_drvdata(pdev, ddev);
1257 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1267 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1272 static int txx9dmac_suspend_noirq(
struct device *
dev)
1275 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1281 static int txx9dmac_resume_noirq(
struct device *
dev)
1284 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
1296 static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
1297 .suspend_noirq = txx9dmac_suspend_noirq,
1298 .resume_noirq = txx9dmac_resume_noirq,
1302 .remove =
__exit_p(txx9dmac_chan_remove),
1304 .name =
"txx9dmac-chan",
1309 .remove =
__exit_p(txx9dmac_remove),
1310 .shutdown = txx9dmac_shutdown,
1313 .pm = &txx9dmac_dev_pm_ops,
1317 static int __init txx9dmac_init(
void)
1324 txx9dmac_chan_probe);
1332 static void __exit txx9dmac_exit(
void)