8 #include <linux/module.h>
10 #include <linux/types.h>
13 #include <linux/slab.h>
16 #include <linux/device.h>
32 #define DCSR_RUN (1 << 31)
33 #define DCSR_NODESC (1 << 30)
34 #define DCSR_STOPIRQEN (1 << 29)
35 #define DCSR_REQPEND (1 << 8)
36 #define DCSR_STOPSTATE (1 << 3)
37 #define DCSR_ENDINTR (1 << 2)
38 #define DCSR_STARTINTR (1 << 1)
39 #define DCSR_BUSERR (1 << 0)
41 #define DCSR_EORIRQEN (1 << 28)
42 #define DCSR_EORJMPEN (1 << 27)
43 #define DCSR_EORSTOPEN (1 << 26)
44 #define DCSR_SETCMPST (1 << 25)
45 #define DCSR_CLRCMPST (1 << 24)
46 #define DCSR_CMPST (1 << 10)
47 #define DCSR_EORINTR (1 << 9)
49 #define DRCMR_MAPVLD (1 << 7)
50 #define DRCMR_CHLNUM 0x1f
52 #define DDADR_DESCADDR 0xfffffff0
53 #define DDADR_STOP (1 << 0)
55 #define DCMD_INCSRCADDR (1 << 31)
56 #define DCMD_INCTRGADDR (1 << 30)
57 #define DCMD_FLOWSRC (1 << 29)
58 #define DCMD_FLOWTRG (1 << 28)
59 #define DCMD_STARTIRQEN (1 << 22)
60 #define DCMD_ENDIRQEN (1 << 21)
61 #define DCMD_ENDIAN (1 << 18)
62 #define DCMD_BURST8 (1 << 16)
63 #define DCMD_BURST16 (2 << 16)
64 #define DCMD_BURST32 (3 << 16)
65 #define DCMD_WIDTH1 (1 << 14)
66 #define DCMD_WIDTH2 (2 << 14)
67 #define DCMD_WIDTH4 (3 << 14)
68 #define DCMD_LENGTH 0x01fff
70 #define PDMA_ALIGNMENT 3
71 #define PDMA_MAX_DESC_BYTES 0x1000
125 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
126 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
127 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
128 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
144 reg = phy->
vchan->drcmr;
145 reg = (((
reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
170 if (dint &
BIT(phy->
idx)) {
185 if (clear_chan_irq(phy) == 0) {
186 tasklet_schedule(&phy->
vchan->tasklet);
192 static irqreturn_t mmp_pdma_int_handler(
int irq,
void *dev_id)
204 ret = mmp_pdma_chan_handler(irq, phy);
229 for (prio = 0; prio <= (((pdev->
dma_channels - 1) & 0xf) >> 2); prio++) {
231 if (prio != ((i & 0xf) >> 2))
273 dev_dbg(chan->
dev,
"DMA controller still busy\n");
288 chan->
phy = lookup_phy(chan);
325 cookie = dma_cookie_assign(&child->
async_tx);
328 append_pending_queue(chan, desc);
330 spin_unlock_irqrestore(&chan->
desc_lock, flags);
342 dev_err(chan->
dev,
"out of memory for link descriptor\n");
346 memset(desc, 0,
sizeof(*desc));
347 INIT_LIST_HEAD(&desc->
tx_list);
350 desc->
async_tx.tx_submit = mmp_pdma_tx_submit;
364 static int mmp_pdma_alloc_chan_resources(
struct dma_chan *dchan)
376 dev_err(chan->
dev,
"unable to allocate descriptor pool\n");
388 static void mmp_pdma_free_desc_list(
struct mmp_pdma_chan *chan,
399 static void mmp_pdma_free_chan_resources(
struct dma_chan *dchan)
407 spin_unlock_irqrestore(&chan->
desc_lock, flags);
421 mmp_pdma_prep_memcpy(
struct dma_chan *dchan,
423 size_t len,
unsigned long flags)
454 new->desc.dsadr = dma_src;
455 new->desc.dtadr = dma_dst;
460 prev->
desc.ddadr =
new->async_tx.phys;
462 new->async_tx.cookie = 0;
463 async_tx_ack(&new->async_tx);
492 mmp_pdma_free_desc_list(chan, &first->
tx_list);
499 unsigned long flags,
void *
context)
508 if ((sgl ==
NULL) || (sg_len == 0))
527 new->desc.dsadr =
addr;
531 new->desc.dtadr =
addr;
537 prev->
desc.ddadr =
new->async_tx.phys;
539 new->async_tx.cookie = 0;
540 async_tx_ack(&new->async_tx);
563 mmp_pdma_free_desc_list(chan, &first->
tx_list);
574 u32 maxburst = 0, addr = 0;
582 disable_chan(chan->
phy);
590 spin_unlock_irqrestore(&chan->
desc_lock, flags);
615 else if (maxburst == 16)
617 else if (maxburst == 32)
641 ret = dma_cookie_status(dchan, cookie, txstate);
642 spin_unlock_irqrestore(&chan->
desc_lock, flags);
651 static void mmp_pdma_issue_pending(
struct dma_chan *dchan)
657 start_pending_queue(chan);
658 spin_unlock_irqrestore(&chan->
desc_lock, flags);
666 static void dma_do_tasklet(
unsigned long data)
683 dma_cookie_complete(&desc->
async_tx);
685 dev_dbg(chan->
dev,
"completed_cookie=%d\n", cookie);
698 start_pending_queue(chan);
699 spin_unlock_irqrestore(&chan->
desc_lock, flags);
739 ret = devm_request_irq(pdev->
dev, irq,
742 dev_err(pdev->
dev,
"channel request irq fail!\n");
762 { .compatible =
"marvell,pdma-1.0", },
774 int dma_channels = 0, irq_num = 0;
791 of_property_read_u32(pdev->
dev->of_node,
792 "#dma-channels", &dma_channels);
799 for (i = 0; i < dma_channels; i++) {
809 INIT_LIST_HEAD(&pdev->
device.channels);
811 if (irq_num != dma_channels) {
814 ret = devm_request_irq(pdev->
dev, irq,
820 for (i = 0; i < dma_channels; i++) {
822 ret = mmp_pdma_chan_init(pdev, i, irq);
831 pdev->
device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
832 pdev->
device.device_free_chan_resources = mmp_pdma_free_chan_resources;
833 pdev->
device.device_tx_status = mmp_pdma_tx_status;
834 pdev->
device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
835 pdev->
device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
836 pdev->
device.device_issue_pending = mmp_pdma_issue_pending;
837 pdev->
device.device_control = mmp_pdma_control;
840 if (pdev->
dev->coherent_dma_mask)
864 .of_match_table = mmp_pdma_dt_ids,
866 .id_table = mmp_pdma_id_table,
867 .probe = mmp_pdma_probe,