26 #include <linux/pci.h>
30 #include <linux/module.h>
37 #define INTEL_MID_DMAC1_ID 0x0814
38 #define INTEL_MID_DMAC2_ID 0x0813
39 #define INTEL_MID_GP_DMAC2_ID 0x0827
40 #define INTEL_MFLD_DMAC1_ID 0x0830
41 #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008
42 #define LNW_PERIPHRAL_MASK_SIZE 0x10
43 #define LNW_PERIPHRAL_STATUS 0x0
44 #define LNW_PERIPHRAL_MASK 0x8
53 #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \
54 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \
55 .max_chan = (_max_chan), \
56 .ch_base = (_ch_base), \
57 .block_size = (_block_size), \
58 .pimr_mask = (_pimr_mask), \
71 static int get_ch_index(
int *
status,
unsigned int base)
75 if (*status & (1 << (i + base))) {
76 *status = *status & ~(1 << (i + base));
77 pr_debug(
"MDMA: index %d New status %x\n", i, *status);
93 static int get_block_ts(
int len,
int tx_width,
int block_size)
95 int byte_width = 0, block_ts = 0;
110 block_ts = len/byte_width;
111 if (block_ts > block_size)
170 dmac1_unmask_periphral_intr(midc);
210 spin_lock_bh(&midc->
lock);
212 if (async_tx_test_ack(&desc->
txd)) {
218 spin_unlock_bh(&midc->
lock);
233 spin_lock_bh(&midc->
lock);
235 spin_unlock_bh(&midc->
lock);
254 pr_err(
"ERR_MDMA: channel is busy in start\n");
267 pr_debug(
"MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n",
291 void *param_txd =
NULL;
293 dma_cookie_complete(txd);
306 spin_unlock_bh(&midc->
lock);
308 pr_debug(
"MDMA: TXD callback set ... calling\n");
309 callback_txd(param_txd);
322 spin_lock_bh(&midc->
lock);
342 midc_descriptor_complete(midc, desc);
372 pr_debug(
"MDMA: Entered midc_lli_fill_sg\n");
375 lli_bloc_desc = desc->
lli;
382 if (i != sglen - 1) {
383 lli_next = lli_next +
388 pr_debug(
"MDMA: LLI is configured in circular mode\n");
392 ctl_lo.ctlx.llp_dst_en = 0;
393 ctl_lo.ctlx.llp_src_en = 0;
399 midc->
dma->block_size);
403 lli_bloc_desc->
sar = sg_phy_addr;
407 lli_bloc_desc->
dar = sg_phy_addr;
410 lli_bloc_desc->
llp = lli_next;
419 desc->
sar = desc->
lli->sar;
420 desc->
dar = desc->
lli->dar;
438 spin_lock_bh(&midc->
lock);
439 cookie = dma_cookie_assign(tx);
446 midc_dostart(midc, desc);
447 spin_unlock_bh(&midc->
lock);
458 static void intel_mid_dma_issue_pending(
struct dma_chan *
chan)
462 spin_lock_bh(&midc->
lock);
463 if (!list_empty(&midc->
queue))
464 midc_scan_descriptors(to_middma_device(chan->
device), midc);
465 spin_unlock_bh(&midc->
lock);
483 ret = dma_cookie_status(chan, cookie, txstate);
485 spin_lock_bh(&midc->
lock);
486 midc_scan_descriptors(to_middma_device(chan->
device), midc);
487 spin_unlock_bh(&midc->
lock);
489 ret = dma_cookie_status(chan, cookie, txstate);
495 static int dma_slave_control(
struct dma_chan *chan,
unsigned long arg)
503 pr_debug(
"MDMA: slave control called\n");
505 mid_slave = to_intel_mid_dma_slave(slave);
520 static int intel_mid_dma_device_control(
struct dma_chan *chan,
529 return dma_slave_control(chan, arg);
534 spin_lock_bh(&midc->
lock);
535 if (midc->
busy ==
false) {
536 spin_unlock_bh(&midc->
lock);
546 disable_dma_interrupt(midc);
549 spin_unlock_bh(&midc->
lock);
588 pr_debug(
"MDMA: Prep for memcpy\n");
593 midc = to_intel_mid_dma_chan(chan);
599 pr_debug(
"MDMA:called for DMA %x CH %d Length %zu\n",
600 midc->
dma->pci_id, midc->
ch_id, len);
601 pr_debug(
"MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n",
608 cfg_lo.cfgx.hs_sel_dst = 1;
609 cfg_lo.cfgx.hs_sel_src = 1;
611 cfg_lo.cfg_lo = 0x00000;
619 if (midc->
dma->pimr_mask) {
620 cfg_hi.cfgx.protctl = 0x0;
621 cfg_hi.cfgx.fifo_mode = 1;
636 cfg_hi.cfgx.protctl = 0x1;
647 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->
dma->block_size);
648 pr_debug(
"MDMA:calc len %d for block size %d\n",
649 ctl_hi.ctlx.block_ts, midc->
dma->block_size);
683 pr_debug(
"MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n",
686 enable_dma_interrupt(midc);
688 desc = midc_desc_get(midc);
695 desc->
cfg_lo = cfg_lo.cfg_lo;
706 pr_err(
"ERR_MDMA: Failed to get desc\n");
707 midc_desc_put(midc, desc);
724 unsigned long flags,
void *
context)
732 pr_debug(
"MDMA: Prep for slave SG\n");
735 pr_err(
"MDMA: Invalid SG length\n");
738 midc = to_intel_mid_dma_chan(chan);
744 if (!midc->
dma->pimr_mask) {
747 txd = intel_mid_dma_prep_memcpy(chan,
754 pr_warn(
"MDMA: SG list is not supported by this controller\n");
759 pr_debug(
"MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags);
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0,
sg_dma_len(sgl), flags);
764 pr_err(
"MDMA: Prep memcpy failed\n");
768 desc = to_intel_mid_dma_desc(txd);
771 ctl_lo.ctlx.llp_dst_en = 1;
772 ctl_lo.ctlx.llp_src_en = 1;
777 desc->
lli_pool = pci_pool_create(
"intel_mid_dma_lli_pool",
782 pr_err(
"MID_DMA:LLI pool create failed\n");
788 pr_err(
"MID_DMA: LLI alloc failed\n");
793 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags);
797 pr_debug(
"MDMA:Enabled Block interrupt\n");
808 static void intel_mid_dma_free_chan_resources(
struct dma_chan *chan)
814 if (
true == midc->
busy) {
816 pr_err(
"ERR_MDMA: trying to free ch in use\n");
818 spin_lock_bh(&midc->
lock);
822 pci_pool_free(mid->
dma_pool, desc, desc->
txd.phys);
826 pci_pool_free(mid->
dma_pool, desc, desc->
txd.phys);
830 pci_pool_free(mid->
dma_pool, desc, desc->
txd.phys);
832 spin_unlock_bh(&midc->
lock);
838 pm_runtime_put(&mid->
pdev->dev);
848 static int intel_mid_dma_alloc_chan_resources(
struct dma_chan *chan)
856 pm_runtime_get_sync(&mid->
pdev->dev);
860 pr_err(
"ERR_MDMA: resume failed");
868 pr_err(
"ERR_MDMA: ch not idle\n");
869 pm_runtime_put(&mid->
pdev->dev);
872 dma_cookie_init(chan);
874 spin_lock_bh(&midc->
lock);
876 spin_unlock_bh(&midc->
lock);
879 pr_err(
"ERR_MDMA: desc failed\n");
880 pm_runtime_put(&mid->
pdev->dev);
885 desc->
txd.tx_submit = intel_mid_dma_tx_submit;
888 spin_lock_bh(&midc->
lock);
892 spin_unlock_bh(&midc->
lock);
895 pr_debug(
"MID_DMA: Desc alloc done ret: %d desc\n", i);
909 midc_scan_descriptors(mid, midc);
919 static void dma_tasklet(
unsigned long data)
928 pr_err(
"ERR_MDMA: tasklet Null param\n");
934 status = raw_tfr | raw_block;
938 i = get_ch_index(&status, mid->
chan_base);
940 pr_err(
"ERR_MDMA:Invalid ch index %x\n", i);
945 pr_err(
"ERR_MDMA:Null param midc\n");
948 pr_debug(
"MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
949 status, midc->
ch_id, i);
952 spin_lock_bh(&midc->
lock);
959 midc_scan_descriptors(mid, midc);
960 pr_debug(
"MDMA:Scan of desc... complete, unmasking\n");
967 spin_unlock_bh(&midc->
lock);
974 i = get_ch_index(&status, mid->
chan_base);
976 pr_err(
"ERR_MDMA:Invalid ch index %x\n", i);
981 pr_err(
"ERR_MDMA:Null param midc\n");
984 pr_debug(
"MDMA:Tx complete interrupt %x, Ch No %d Index %d\n",
985 status, midc->
ch_id, i);
988 spin_lock_bh(&midc->
lock);
989 midc_handle_error(mid, midc);
992 spin_unlock_bh(&midc->
lock);
994 pr_debug(
"MDMA:Exiting takslet...\n");
998 static void dma_tasklet1(
unsigned long data)
1001 return dma_tasklet(data);
1004 static void dma_tasklet2(
unsigned long data)
1007 return dma_tasklet(data);
1018 static irqreturn_t intel_mid_dma_interrupt(
int irq,
void *data)
1021 u32 tfr_status, err_status;
1022 int call_tasklet = 0;
1026 if (!tfr_status && !err_status)
1030 pr_debug(
"MDMA:Got an interrupt on irq %d\n", irq);
1037 pr_debug(
"MDMA: Calling tasklet %x\n", tfr_status);
1047 tasklet_schedule(&mid->
tasklet);
1052 static irqreturn_t intel_mid_dma_interrupt1(
int irq,
void *data)
1054 return intel_mid_dma_interrupt(irq, data);
1057 static irqreturn_t intel_mid_dma_interrupt2(
int irq,
void *data)
1059 return intel_mid_dma_interrupt(irq, data);
1075 dma->
dma_pool = pci_pool_create(
"intel_mid_dma_desc_pool", pdev,
1079 pr_err(
"ERR_MDMA:pci_pool_create failed\n");
1084 INIT_LIST_HEAD(&dma->
common.channels);
1090 pr_err(
"ERR_MDMA:Can't map periphral intr space !!\n");
1101 for (i = 0; i < dma->
max_chan; i++) {
1105 dma_cookie_init(&midch->
chan);
1116 INIT_LIST_HEAD(&midch->
queue);
1130 disable_dma_interrupt(midch);
1142 dma->
common.device_alloc_chan_resources =
1143 intel_mid_dma_alloc_chan_resources;
1144 dma->
common.device_free_chan_resources =
1145 intel_mid_dma_free_chan_resources;
1147 dma->
common.device_tx_status = intel_mid_dma_tx_status;
1148 dma->
common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->
common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->
common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->
common.device_control = intel_mid_dma_device_control;
1158 pr_debug(
"MDMA:Requesting irq shared for DMAC1\n");
1165 pr_debug(
"MDMA:Requesting irq for DMAC2\n");
1174 pr_err(
"ERR_MDMA:device_register failed: %d\n", err);
1178 pr_debug(
"setting up tasklet1 for DMAC1\n");
1181 pr_debug(
"setting up tasklet2 for DMAC2\n");
1194 pr_err(
"ERR_MDMA:setup_dma failed: %d\n", err);
1206 static void middma_shutdown(
struct pci_dev *pdev)
1211 pci_pool_destroy(device->
dma_pool);
1232 u32 base_addr, bar_size;
1238 pr_debug(
"MDMA: CH %d, base %d, block len %d, Periphral mask %x\n",
1244 goto err_enable_device;
1248 goto err_request_regions;
1252 goto err_set_dma_mask;
1254 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
1256 goto err_set_dma_mask;
1258 device = kzalloc(
sizeof(*device),
GFP_KERNEL);
1260 pr_err(
"ERR_MDMA:kzalloc failed probe\n");
1270 pr_err(
"ERR_MDMA:ioremap failed\n");
1274 pci_set_drvdata(pdev, device);
1281 err = mid_setup_dma(pdev);
1285 pm_runtime_put_noidle(&pdev->
dev);
1298 err_request_regions:
1300 pr_err(
"ERR_MDMA:Probe failed %d\n", err);
1315 pm_runtime_get_noresume(&pdev->
dev);
1317 middma_shutdown(pdev);
1333 static int dma_suspend(
struct device *
dev)
1338 pr_debug(
"MDMA: dma_suspend called\n");
1340 for (i = 0; i < device->
max_chan; i++) {
1341 if (device->
ch[i].in_use)
1344 dmac1_mask_periphral_intr(device);
1365 pr_debug(
"MDMA: dma_resume called\n");
1370 pr_err(
"MDMA: device can't be enabled for %x\n", pci->
device);
1378 static int dma_runtime_suspend(
struct device *dev)
1387 static int dma_runtime_resume(
struct device *dev)
1397 static int dma_runtime_idle(
struct device *dev)
1403 for (i = 0; i < device->
max_chan; i++) {
1404 if (device->
ch[i].in_use)
1423 static const struct dev_pm_ops intel_mid_dma_pm = {
1424 .runtime_suspend = dma_runtime_suspend,
1425 .runtime_resume = dma_runtime_resume,
1426 .runtime_idle = dma_runtime_idle,
1427 .suspend = dma_suspend,
1431 static struct pci_driver intel_mid_dma_pci_driver = {
1432 .name =
"Intel MID DMA",
1433 .id_table = intel_mid_dma_ids,
1434 .probe = intel_mid_dma_probe,
1438 .pm = &intel_mid_dma_pm,
1443 static int __init intel_mid_dma_init(
void)
1445 pr_debug(
"INFO_MDMA: LNW DMA Driver Version %s\n",
1447 return pci_register_driver(&intel_mid_dma_pci_driver);
1451 static void __exit intel_mid_dma_exit(
void)