15 #include <linux/kernel.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
27 #define CPDMA_TXIDVER 0x00
28 #define CPDMA_TXCONTROL 0x04
29 #define CPDMA_TXTEARDOWN 0x08
30 #define CPDMA_RXIDVER 0x10
31 #define CPDMA_RXCONTROL 0x14
32 #define CPDMA_SOFTRESET 0x1c
33 #define CPDMA_RXTEARDOWN 0x18
34 #define CPDMA_TXINTSTATRAW 0x80
35 #define CPDMA_TXINTSTATMASKED 0x84
36 #define CPDMA_TXINTMASKSET 0x88
37 #define CPDMA_TXINTMASKCLEAR 0x8c
38 #define CPDMA_MACINVECTOR 0x90
39 #define CPDMA_MACEOIVECTOR 0x94
40 #define CPDMA_RXINTSTATRAW 0xa0
41 #define CPDMA_RXINTSTATMASKED 0xa4
42 #define CPDMA_RXINTMASKSET 0xa8
43 #define CPDMA_RXINTMASKCLEAR 0xac
44 #define CPDMA_DMAINTSTATRAW 0xb0
45 #define CPDMA_DMAINTSTATMASKED 0xb4
46 #define CPDMA_DMAINTMASKSET 0xb8
47 #define CPDMA_DMAINTMASKCLEAR 0xbc
48 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 #define CPDMA_DMACONTROL 0x20
52 #define CPDMA_DMASTATUS 0x24
53 #define CPDMA_RXBUFFOFS 0x28
54 #define CPDMA_EM_CONTROL 0x2c
57 #define CPDMA_DESC_SOP BIT(31)
58 #define CPDMA_DESC_EOP BIT(30)
59 #define CPDMA_DESC_OWNER BIT(29)
60 #define CPDMA_DESC_EOQ BIT(28)
61 #define CPDMA_DESC_TD_COMPLETE BIT(27)
62 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
96 static const char *cpdma_state_str[] = {
"idle",
"active",
"teardown" };
124 #define dmaregs params.dmaregs
125 #define num_chan params.num_chan
128 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
129 #define chan_read(chan, fld) __raw_readl((chan)->fld)
130 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
131 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
132 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
133 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
200 spin_unlock_irqrestore(&pool->
lock, flags);
230 if (index < pool->num_desc) {
236 spin_unlock_irqrestore(&pool->
lock, flags);
245 index = ((
unsigned long)desc - (
unsigned long)pool->
iomap) /
250 spin_unlock_irqrestore(&pool->
lock, flags);
266 ctlr->
pool = cpdma_desc_pool_create(ctlr->
dev,
267 ctlr->
params.desc_mem_phys,
268 ctlr->
params.desc_hw_addr,
269 ctlr->
params.desc_mem_size,
289 spin_unlock_irqrestore(&ctlr->
lock, flags);
293 if (ctlr->
params.has_soft_reset) {
304 for (i = 0; i < ctlr->num_chan; i++) {
323 spin_unlock_irqrestore(&ctlr->
lock, flags);
335 spin_unlock_irqrestore(&ctlr->
lock, flags);
354 spin_unlock_irqrestore(&ctlr->
lock, flags);
367 dev_info(dev,
"CPDMA: state: %s", cpdma_state_str[ctlr->
state]);
371 dev_info(dev,
"CPDMA: txcontrol: %x",
373 dev_info(dev,
"CPDMA: txteardown: %x",
377 dev_info(dev,
"CPDMA: rxcontrol: %x",
379 dev_info(dev,
"CPDMA: softreset: %x",
381 dev_info(dev,
"CPDMA: rxteardown: %x",
383 dev_info(dev,
"CPDMA: txintstatraw: %x",
385 dev_info(dev,
"CPDMA: txintstatmasked: %x",
387 dev_info(dev,
"CPDMA: txintmaskset: %x",
389 dev_info(dev,
"CPDMA: txintmaskclear: %x",
391 dev_info(dev,
"CPDMA: macinvector: %x",
393 dev_info(dev,
"CPDMA: maceoivector: %x",
395 dev_info(dev,
"CPDMA: rxintstatraw: %x",
397 dev_info(dev,
"CPDMA: rxintstatmasked: %x",
399 dev_info(dev,
"CPDMA: rxintmaskset: %x",
401 dev_info(dev,
"CPDMA: rxintmaskclear: %x",
403 dev_info(dev,
"CPDMA: dmaintstatraw: %x",
405 dev_info(dev,
"CPDMA: dmaintstatmasked: %x",
407 dev_info(dev,
"CPDMA: dmaintmaskset: %x",
409 dev_info(dev,
"CPDMA: dmaintmaskclear: %x",
412 if (!ctlr->
params.has_ext_regs) {
413 dev_info(dev,
"CPDMA: dmacontrol: %x",
415 dev_info(dev,
"CPDMA: dmastatus: %x",
417 dev_info(dev,
"CPDMA: rxbuffofs: %x",
425 spin_unlock_irqrestore(&ctlr->
lock, flags);
447 cpdma_desc_pool_destroy(ctlr->
pool);
448 spin_unlock_irqrestore(&ctlr->
lock, flags);
461 spin_unlock_irqrestore(&ctlr->
lock, flags);
473 spin_unlock_irqrestore(&ctlr->
lock, flags);
528 spin_unlock_irqrestore(&ctlr->
lock, flags);
532 spin_unlock_irqrestore(&ctlr->
lock, flags);
552 spin_unlock_irqrestore(&ctlr->
lock, flags);
566 spin_unlock_irqrestore(&chan->
lock, flags);
577 dev_info(dev,
"channel %d (%s %d) state %s",
587 dev_info(dev,
"\tstats head_enqueue: %d\n",
588 chan->
stats.head_enqueue);
589 dev_info(dev,
"\tstats tail_enqueue: %d\n",
590 chan->
stats.tail_enqueue);
591 dev_info(dev,
"\tstats pad_enqueue: %d\n",
592 chan->
stats.pad_enqueue);
593 dev_info(dev,
"\tstats misqueued: %d\n",
594 chan->
stats.misqueued);
595 dev_info(dev,
"\tstats desc_alloc_fail: %d\n",
596 chan->
stats.desc_alloc_fail);
597 dev_info(dev,
"\tstats pad_alloc_fail: %d\n",
598 chan->
stats.pad_alloc_fail);
599 dev_info(dev,
"\tstats runt_receive_buff: %d\n",
600 chan->
stats.runt_receive_buff);
601 dev_info(dev,
"\tstats runt_transmit_buff: %d\n",
602 chan->
stats.runt_transmit_buff);
603 dev_info(dev,
"\tstats empty_dequeue: %d\n",
604 chan->
stats.empty_dequeue);
605 dev_info(dev,
"\tstats busy_dequeue: %d\n",
606 chan->
stats.busy_dequeue);
607 dev_info(dev,
"\tstats good_dequeue: %d\n",
608 chan->
stats.good_dequeue);
609 dev_info(dev,
"\tstats requeue: %d\n",
610 chan->
stats.requeue);
611 dev_info(dev,
"\tstats teardown_dequeue: %d\n",
612 chan->
stats.teardown_dequeue);
614 spin_unlock_irqrestore(&chan->
lock, flags);
627 desc_dma = desc_phys(pool, desc);
631 chan->
stats.head_enqueue++;
642 chan->
stats.tail_enqueue++;
650 chan->
stats.misqueued++;
671 desc = cpdma_desc_alloc(ctlr->
pool, 1);
673 chan->
stats.desc_alloc_fail++;
678 if (len < ctlr->
params.min_packet_size) {
679 len = ctlr->
params.min_packet_size;
680 chan->
stats.runt_transmit_buff++;
694 __cpdma_chan_submit(chan, desc);
702 spin_unlock_irqrestore(&chan->
lock, flags);
707 static void __cpdma_chan_free(
struct cpdma_chan *chan,
717 token = (
void *)
desc_read(desc, sw_token);
722 cpdma_desc_free(pool, desc, 1);
723 (*chan->
handler)(token, outlen, status);
726 static int __cpdma_chan_process(
struct cpdma_chan *chan)
739 chan->
stats.empty_dequeue++;
743 desc_dma = desc_phys(pool, desc);
746 outlen = status & 0x7ff;
748 chan->
stats.busy_dequeue++;
757 chan->
stats.good_dequeue++;
760 chan->
stats.requeue++;
764 spin_unlock_irqrestore(&chan->
lock, flags);
766 __cpdma_chan_free(chan, desc, outlen, status);
770 spin_unlock_irqrestore(&chan->
lock, flags);
781 while (used < quota) {
782 ret = __cpdma_chan_process(chan);
799 spin_unlock_irqrestore(&chan->
lock, flags);
803 spin_unlock_irqrestore(&chan->
lock, flags);
814 spin_unlock_irqrestore(&chan->
lock, flags);
829 spin_unlock_irqrestore(&chan->
lock, flags);
851 spin_unlock_irqrestore(&chan->
lock, flags);
853 ret = __cpdma_chan_process(chan);
865 chan->
head = desc_from_phys(pool, next_dma);
867 chan->
stats.teardown_dequeue++;
870 spin_unlock_irqrestore(&chan->
lock, flags);
871 __cpdma_chan_free(chan, desc, 0, -
ENOSYS);
876 spin_unlock_irqrestore(&chan->
lock, flags);
887 spin_unlock_irqrestore(&chan->
lock, flags);
893 spin_unlock_irqrestore(&chan->
lock, flags);
902 #define ACCESS_RO BIT(0)
903 #define ACCESS_WO BIT(1)
904 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
930 if (!ctlr->
params.has_ext_regs)
938 if (control < 0 || control >=
ARRAY_SIZE(controls))
948 spin_unlock_irqrestore(&ctlr->
lock, flags);
962 if (!ctlr->
params.has_ext_regs)
970 if (control < 0 || control >=
ARRAY_SIZE(controls))
979 val |= (value & info->
mask) << info->
shift;
984 spin_unlock_irqrestore(&ctlr->
lock, flags);