12 #include <linux/sched.h>
13 #include <linux/device.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
21 #include <linux/slab.h>
28 #define DMA_MAX_SIZE 0x1fff
29 #define DMA_CHUNK_SIZE 0x1000
32 #define DMA_DCSR_S 0x04
33 #define DMA_DCSR_C 0x08
34 #define DMA_DCSR_R 0x0c
41 #define DCSR_RUN (1 << 0)
42 #define DCSR_IE (1 << 1)
43 #define DCSR_ERROR (1 << 2)
44 #define DCSR_DONEA (1 << 3)
45 #define DCSR_STRTA (1 << 4)
46 #define DCSR_DONEB (1 << 5)
47 #define DCSR_STRTB (1 << 6)
48 #define DCSR_BIU (1 << 7)
50 #define DDAR_RW (1 << 0)
51 #define DDAR_E (1 << 1)
52 #define DDAR_BS (1 << 2)
53 #define DDAR_DW (1 << 3)
54 #define DDAR_Ser0UDCTr (0x0 << 4)
55 #define DDAR_Ser0UDCRc (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr (0x6 << 4)
61 #define DDAR_Ser2ICPRc (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr (0xe << 4)
69 #define DDAR_Ser4SSPRc (0xf << 4)
116 #ifdef CONFIG_PM_SLEEP
160 dev_vdbg(p->
dev->slave.dev,
"pchan %u: txd %p[%x]: starting: DDAR:%x\n",
191 if (txn && txn->
ddar == txd->
ddar) {
193 sa11x0_dma_start_desc(p, txn);
218 writel_relaxed(sg->
addr, base + dbsx);
219 writel_relaxed(sg->
len, base + dbtx);
222 dev_dbg(p->
dev->slave.dev,
"pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
235 vchan_cookie_complete(&txd->
vd);
241 tasklet_schedule(&p->
dev->task);
244 vchan_cyclic_callback(&txd->
vd);
251 sa11x0_dma_start_sg(p, c);
272 dev_err(d->
slave.dev,
"pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
295 sa11x0_dma_complete(p, c);
297 sa11x0_dma_complete(p, c);
299 spin_unlock_irqrestore(&c->
vc.lock, flags);
313 sa11x0_dma_start_desc(p, txd);
327 sa11x0_dma_start_sg(p, c);
328 sa11x0_dma_start_sg(p, c);
332 static void sa11x0_dma_tasklet(
unsigned long arg)
337 unsigned pch, pch_alloc = 0;
342 spin_lock_irq(&c->
vc.lock);
345 sa11x0_dma_start_txd(c);
355 spin_unlock_irq(&c->
vc.lock);
358 spin_lock_irq(&d->
lock);
365 list_del_init(&c->
node);
367 pch_alloc |= 1 << pch;
375 spin_unlock_irq(&d->
lock);
378 if (pch_alloc & (1 << pch)) {
382 spin_lock_irq(&c->
vc.lock);
385 sa11x0_dma_start_txd(c);
386 spin_unlock_irq(&c->
vc.lock);
394 static int sa11x0_dma_alloc_chan_resources(
struct dma_chan *
chan)
399 static void sa11x0_dma_free_chan_resources(
struct dma_chan *
chan)
406 list_del_init(&c->
node);
407 spin_unlock_irqrestore(&d->
lock, flags);
409 vchan_free_chan_resources(&c->
vc);
438 ret = dma_cookie_status(&c->
vc.chan, cookie, state);
475 for (i = 0; i < txd->
sglen; i++) {
477 i, txd->
sg[i].addr, txd->
sg[i].len);
478 if (addr >= txd->
sg[i].addr &&
479 addr < txd->sg[i].
addr + txd->
sg[i].len) {
482 len = txd->
sg[
i].len -
483 (addr - txd->
sg[
i].addr);
491 for (; i < txd->
sglen; i++) {
493 i, txd->
sg[i].addr, txd->
sg[i].len);
494 bytes += txd->
sg[
i].len;
499 spin_unlock_irqrestore(&c->
vc.lock, flags);
511 static void sa11x0_dma_issue_pending(
struct dma_chan *chan)
518 if (vchan_issue_pending(&c->
vc)) {
521 if (list_empty(&c->
node)) {
523 tasklet_schedule(&d->
task);
526 spin_unlock(&d->
lock);
530 spin_unlock_irqrestore(&c->
vc.lock, flags);
540 unsigned i,
j = sglen;
545 dev_err(chan->
device->dev,
"vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
561 dev_dbg(chan->
device->dev,
"vchan %p: bad buffer alignment: %08x\n",
567 txd = kzalloc(
sizeof(*txd) + j *
sizeof(txd->
sg[0]),
GFP_ATOMIC);
597 txd->
sg[
j].len = tlen;
609 dev_dbg(chan->
device->dev,
"vchan %p: txd %p: size %u nr %u\n",
612 return vchan_tx_prep(&c->
vc, &txd->
vd, flags);
625 dev_err(chan->
device->dev,
"vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
631 sglen = size * sgperiod /
period;
637 txd = kzalloc(
sizeof(*txd) + sglen *
sizeof(txd->
sg[0]),
GFP_ATOMIC);
643 for (i = k = 0; i < size /
period; i++) {
644 size_t tlen, len =
period;
646 for (j = 0; j < sgperiod; j++, k++) {
655 txd->
sg[
k].len = tlen;
693 (maxburst != 4 && maxburst != 8))
701 dev_dbg(c->
vc.chan.device->dev,
"vchan %p: dma_slave_config addr %x width %u burst %u\n",
702 &c->
vc, addr, width, maxburst);
704 c->
ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
727 vchan_get_all_descriptors(&c->
vc, &
head);
750 spin_unlock(&d->
lock);
751 tasklet_schedule(&d->
task);
753 spin_unlock_irqrestore(&c->
vc.lock, flags);
769 list_del_init(&c->
node);
770 spin_unlock(&d->
lock);
773 spin_unlock_irqrestore(&c->
vc.lock, flags);
786 }
else if (!list_empty(&c->
vc.desc_issued)) {
789 spin_unlock(&d->
lock);
792 spin_unlock_irqrestore(&c->
vc.lock, flags);
809 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
812 CD(Ser0UDCRc, DDAR_RW),
814 CD(Ser1SDLCRc, DDAR_RW),
816 CD(Ser1UARTRc, DDAR_RW),
818 CD(Ser2ICPRc, DDAR_RW),
820 CD(Ser3UARTRc, DDAR_RW),
822 CD(Ser4MCP0Rc, DDAR_RW),
824 CD(Ser4MCP1Rc, DDAR_RW),
826 CD(Ser4SSPRc, DDAR_RW),
843 for (i = 0; i < dmadev->
chancnt; i++) {
848 dev_err(dev,
"no memory for channel %u\n", i);
855 INIT_LIST_HEAD(&c->
node);
857 c->
vc.desc_free = sa11x0_dma_free_desc;
872 return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->
dev), data);
883 static void sa11x0_dma_free_channels(
struct dma_device *dmadev)
933 ret = sa11x0_dma_request_irq(pdev, i, p);
937 sa11x0_dma_free_irq(pdev, i, &d->
phy[i]);
945 d->
slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
946 d->
slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
947 ret = sa11x0_dma_init_dmadev(&d->
slave, &pdev->
dev);
949 dev_warn(d->
slave.dev,
"failed to register slave async device: %d\n",
954 platform_set_drvdata(pdev, d);
958 sa11x0_dma_free_channels(&d->
slave);
960 sa11x0_dma_free_irq(pdev, i, &d->
phy[i]);
977 sa11x0_dma_free_channels(&d->
slave);
979 sa11x0_dma_free_irq(pdev, pch, &d->
phy[pch]);
987 #ifdef CONFIG_PM_SLEEP
988 static int sa11x0_dma_suspend(
struct device *dev)
995 u32 dcsr, saved_dcsr;
1003 saved_dcsr &= DCSR_RUN |
DCSR_IE;
1018 p->dcsr = saved_dcsr;
1026 static int sa11x0_dma_resume(
struct device *dev)
1059 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend,
1061 .resume_noirq = sa11x0_dma_resume,
1062 .freeze_noirq = sa11x0_dma_suspend,
1063 .thaw_noirq = sa11x0_dma_resume,
1064 .poweroff_noirq = sa11x0_dma_suspend,
1065 .restore_noirq = sa11x0_dma_resume,
1070 .name =
"sa11x0-dma",
1072 .pm = &sa11x0_dma_pm_ops,
1074 .probe = sa11x0_dma_probe,
1080 if (chan->
device->dev->driver == &sa11x0_dma_driver.
driver) {
1082 const char *p =
param;
1090 static int __init sa11x0_dma_init(
void)
1096 static void __exit sa11x0_dma_exit(
void)