12 #include <linux/types.h>
16 #include <linux/wait.h>
17 #include <linux/sched.h>
19 #include <linux/device.h>
21 #include <linux/slab.h>
25 #include <linux/module.h>
41 #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
42 #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
44 #define HW_APBHX_CTRL0 0x000
45 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
46 #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
47 #define BP_APBH_CTRL0_RESET_CHANNEL 16
48 #define HW_APBHX_CTRL1 0x010
49 #define HW_APBHX_CTRL2 0x020
50 #define HW_APBHX_CHANNEL_CTRL 0x030
51 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
56 #define HW_APBHX_CHn_NXTCMDAR(d, n) \
57 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
58 #define HW_APBHX_CHn_SEMA(d, n) \
59 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
76 #define BP_CCW_COMMAND 0
77 #define BM_CCW_COMMAND (3 << 0)
78 #define CCW_CHAIN (1 << 2)
79 #define CCW_IRQ (1 << 3)
80 #define CCW_DEC_SEM (1 << 6)
81 #define CCW_WAIT4END (1 << 7)
82 #define CCW_HALT_ON_TERM (1 << 8)
83 #define CCW_TERM_FLUSH (1 << 9)
84 #define BP_CCW_PIO_NUM 12
85 #define BM_CCW_PIO_NUM (0xf << 12)
87 #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field)
89 #define MXS_DMA_CMD_NO_XFER 0
90 #define MXS_DMA_CMD_WRITE 1
91 #define MXS_DMA_CMD_READ 2
92 #define MXS_DMA_CMD_DMA_SENSE 3
98 #define MAX_XFER_BYTES 0xff00
100 #define MXS_PIO_WORDS 16
104 #define CCW_BLOCK_SIZE (4 * PAGE_SIZE)
105 #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw))
118 #define MXS_DMA_SG_LOOP (1 << 0)
121 #define MXS_DMA_CHANNELS 16
122 #define MXS_DMA_CHANNELS_MASK 0xffff
167 .name =
"imx23-dma-apbh",
170 .name =
"imx23-dma-apbx",
173 .name =
"imx28-dma-apbh",
176 .name =
"imx28-dma-apbx",
184 { .compatible =
"fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
185 { .compatible =
"fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
186 { .compatible =
"fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
187 { .compatible =
"fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
215 static void mxs_dma_reset_chan(
struct mxs_dma_chan *mxs_chan)
218 int chan_id = mxs_chan->
chan.chan_id;
228 static void mxs_dma_enable_chan(
struct mxs_dma_chan *mxs_chan)
231 int chan_id = mxs_chan->
chan.chan_id;
241 static void mxs_dma_disable_chan(
struct mxs_dma_chan *mxs_chan)
246 static void mxs_dma_pause_chan(
struct mxs_dma_chan *mxs_chan)
249 int chan_id = mxs_chan->
chan.chan_id;
262 static void mxs_dma_resume_chan(
struct mxs_dma_chan *mxs_chan)
265 int chan_id = mxs_chan->
chan.chan_id;
280 return dma_cookie_assign(tx);
283 static void mxs_dma_tasklet(
unsigned long data)
287 if (mxs_chan->
desc.callback)
288 mxs_chan->
desc.callback(mxs_chan->
desc.callback_param);
323 "%s: error in channel %d\n", __func__,
326 mxs_dma_reset_chan(mxs_chan);
337 dma_cookie_complete(&mxs_chan->
desc);
340 tasklet_schedule(&mxs_chan->
tasklet);
346 static int mxs_dma_alloc_chan_resources(
struct dma_chan *
chan)
361 if (!mxs_chan->
ccw) {
370 0,
"mxs-dma", mxs_dma);
375 ret = clk_prepare_enable(mxs_dma->
clk);
379 mxs_dma_reset_chan(mxs_chan);
382 mxs_chan->
desc.tx_submit = mxs_dma_tx_submit;
385 async_tx_ack(&mxs_chan->
desc);
398 static void mxs_dma_free_chan_resources(
struct dma_chan *chan)
403 mxs_dma_disable_chan(mxs_chan);
410 clk_disable_unprepare(mxs_dma->
clk);
452 if (sg_len + (append ? idx : 0) >
NUM_CCW) {
454 "maximum number of sg exceeded: %d > %d\n",
468 ccw = &mxs_chan->
ccw[idx - 1];
478 ccw = &mxs_chan->
ccw[idx++];
481 for (j = 0; j < sg_len;)
496 dev_err(mxs_dma->
dma_device.dev,
"maximum bytes for sg entry exceeded: %d > %d\n",
501 ccw = &mxs_chan->
ccw[idx++];
515 if (i + 1 == sg_len) {
526 return &mxs_chan->
desc;
536 unsigned long flags,
void *context)
540 int num_periods = buf_len / period_len;
551 "maximum number of sg exceeded: %d > %d\n",
558 "maximum period size exceeded: %d > %d\n",
563 while (
buf < buf_len) {
566 if (i + 1 == num_periods)
569 ccw->
next = mxs_chan->
ccw_phys +
sizeof(*ccw) * (i + 1);
582 dma_addr += period_len;
589 return &mxs_chan->
desc;
604 mxs_dma_reset_chan(mxs_chan);
605 mxs_dma_disable_chan(mxs_chan);
608 mxs_dma_pause_chan(mxs_chan);
611 mxs_dma_resume_chan(mxs_chan);
632 static void mxs_dma_issue_pending(
struct dma_chan *chan)
636 mxs_dma_enable_chan(mxs_chan);
643 ret = clk_prepare_enable(mxs_dma->
clk);
664 clk_disable_unprepare(mxs_dma->
clk);
677 mxs_dma = kzalloc(
sizeof(*mxs_dma),
GFP_KERNEL);
683 id_entry = of_id->
data;
687 dma_type = (
struct mxs_dma_type *)id_entry->driver_data;
696 goto err_request_region;
700 if (!mxs_dma->
base) {
706 if (IS_ERR(mxs_dma->
clk)) {
707 ret = PTR_ERR(mxs_dma->
clk);
714 INIT_LIST_HEAD(&mxs_dma->
dma_device.channels);
722 dma_cookie_init(&mxs_chan->
chan);
725 (
unsigned long) mxs_chan);
733 ret = mxs_dma_init(mxs_dma);
743 mxs_dma->
dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources;
744 mxs_dma->
dma_device.device_free_chan_resources = mxs_dma_free_chan_resources;
745 mxs_dma->
dma_device.device_tx_status = mxs_dma_tx_status;
746 mxs_dma->
dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
747 mxs_dma->
dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
748 mxs_dma->
dma_device.device_control = mxs_dma_control;
749 mxs_dma->
dma_device.device_issue_pending = mxs_dma_issue_pending;
775 .of_match_table = mxs_dma_dt_ids,
777 .id_table = mxs_dma_ids,
780 static int __init mxs_dma_module_init(
void)