Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tmio_mmc_dma.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/mmc/tmio_mmc_dma.c
3  *
4  * Copyright (C) 2010-2011 Guennadi Liakhovetski
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * DMA function for TMIO MMC implementations
11  */
12 
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/tmio.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
21 
22 #include "tmio_mmc.h"
23 
24 #define TMIO_MMC_MIN_DMA_LEN 8
25 
27 {
28  if (!host->chan_tx || !host->chan_rx)
29  return;
30 
31 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
32  /* Switch DMA mode on or off - SuperH specific? */
33  sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
34 #endif
35 }
36 
38 {
39  tmio_mmc_enable_dma(host, false);
40 
41  if (host->chan_rx)
42  dmaengine_terminate_all(host->chan_rx);
43  if (host->chan_tx)
44  dmaengine_terminate_all(host->chan_tx);
45 
46  tmio_mmc_enable_dma(host, true);
47 }
48 
49 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
50 {
51  struct scatterlist *sg = host->sg_ptr, *sg_tmp;
53  struct dma_chan *chan = host->chan_rx;
54  struct tmio_mmc_data *pdata = host->pdata;
56  int ret, i;
57  bool aligned = true, multiple = true;
58  unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
59 
60  for_each_sg(sg, sg_tmp, host->sg_len, i) {
61  if (sg_tmp->offset & align)
62  aligned = false;
63  if (sg_tmp->length & align) {
64  multiple = false;
65  break;
66  }
67  }
68 
69  if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
70  (align & PAGE_MASK))) || !multiple) {
71  ret = -EINVAL;
72  goto pio;
73  }
74 
75  if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
76  host->force_pio = true;
77  return;
78  }
79 
81 
82  /* The only sg element can be unaligned, use our bounce buffer then */
83  if (!aligned) {
84  sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
85  host->sg_ptr = &host->bounce_sg;
86  sg = host->sg_ptr;
87  }
88 
89  ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
90  if (ret > 0)
91  desc = dmaengine_prep_slave_sg(chan, sg, ret,
93 
94  if (desc) {
95  cookie = dmaengine_submit(desc);
96  if (cookie < 0) {
97  desc = NULL;
98  ret = cookie;
99  }
100  }
101  dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
102  __func__, host->sg_len, ret, cookie, host->mrq);
103 
104 pio:
105  if (!desc) {
106  /* DMA failed, fall back to PIO */
107  if (ret >= 0)
108  ret = -EIO;
109  host->chan_rx = NULL;
110  dma_release_channel(chan);
111  /* Free the Tx channel too */
112  chan = host->chan_tx;
113  if (chan) {
114  host->chan_tx = NULL;
115  dma_release_channel(chan);
116  }
117  dev_warn(&host->pdev->dev,
118  "DMA failed: %d, falling back to PIO\n", ret);
119  tmio_mmc_enable_dma(host, false);
120  }
121 
122  dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
123  desc, cookie, host->sg_len);
124 }
125 
126 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
127 {
128  struct scatterlist *sg = host->sg_ptr, *sg_tmp;
129  struct dma_async_tx_descriptor *desc = NULL;
130  struct dma_chan *chan = host->chan_tx;
131  struct tmio_mmc_data *pdata = host->pdata;
133  int ret, i;
134  bool aligned = true, multiple = true;
135  unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
136 
137  for_each_sg(sg, sg_tmp, host->sg_len, i) {
138  if (sg_tmp->offset & align)
139  aligned = false;
140  if (sg_tmp->length & align) {
141  multiple = false;
142  break;
143  }
144  }
145 
146  if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
147  (align & PAGE_MASK))) || !multiple) {
148  ret = -EINVAL;
149  goto pio;
150  }
151 
152  if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
153  host->force_pio = true;
154  return;
155  }
156 
158 
159  /* The only sg element can be unaligned, use our bounce buffer then */
160  if (!aligned) {
161  unsigned long flags;
162  void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
163  sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
164  memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
165  tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
166  host->sg_ptr = &host->bounce_sg;
167  sg = host->sg_ptr;
168  }
169 
170  ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
171  if (ret > 0)
172  desc = dmaengine_prep_slave_sg(chan, sg, ret,
174 
175  if (desc) {
176  cookie = dmaengine_submit(desc);
177  if (cookie < 0) {
178  desc = NULL;
179  ret = cookie;
180  }
181  }
182  dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
183  __func__, host->sg_len, ret, cookie, host->mrq);
184 
185 pio:
186  if (!desc) {
187  /* DMA failed, fall back to PIO */
188  if (ret >= 0)
189  ret = -EIO;
190  host->chan_tx = NULL;
191  dma_release_channel(chan);
192  /* Free the Rx channel too */
193  chan = host->chan_rx;
194  if (chan) {
195  host->chan_rx = NULL;
196  dma_release_channel(chan);
197  }
198  dev_warn(&host->pdev->dev,
199  "DMA failed: %d, falling back to PIO\n", ret);
200  tmio_mmc_enable_dma(host, false);
201  }
202 
203  dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
204  desc, cookie);
205 }
206 
208  struct mmc_data *data)
209 {
210  if (data->flags & MMC_DATA_READ) {
211  if (host->chan_rx)
212  tmio_mmc_start_dma_rx(host);
213  } else {
214  if (host->chan_tx)
215  tmio_mmc_start_dma_tx(host);
216  }
217 }
218 
219 static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
220 {
221  struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
222  struct dma_chan *chan = NULL;
223 
224  spin_lock_irq(&host->lock);
225 
226  if (host && host->data) {
227  if (host->data->flags & MMC_DATA_READ)
228  chan = host->chan_rx;
229  else
230  chan = host->chan_tx;
231  }
232 
233  spin_unlock_irq(&host->lock);
234 
236 
237  if (chan)
238  dma_async_issue_pending(chan);
239 }
240 
241 static void tmio_mmc_tasklet_fn(unsigned long arg)
242 {
243  struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
244 
245  spin_lock_irq(&host->lock);
246 
247  if (!host->data)
248  goto out;
249 
250  if (host->data->flags & MMC_DATA_READ)
251  dma_unmap_sg(host->chan_rx->device->dev,
252  host->sg_ptr, host->sg_len,
254  else
255  dma_unmap_sg(host->chan_tx->device->dev,
256  host->sg_ptr, host->sg_len,
257  DMA_TO_DEVICE);
258 
259  tmio_mmc_do_data_irq(host);
260 out:
261  spin_unlock_irq(&host->lock);
262 }
263 
264 /* It might be necessary to make filter MFD specific */
265 static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
266 {
267  dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
268  chan->private = arg;
269  return true;
270 }
271 
272 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
273 {
274  /* We can only either use DMA for both Tx and Rx or not use it at all */
275  if (!pdata->dma)
276  return;
277 
278  if (!host->chan_tx && !host->chan_rx) {
280 
281  dma_cap_zero(mask);
282  dma_cap_set(DMA_SLAVE, mask);
283 
284  host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
285  pdata->dma->chan_priv_tx);
286  dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
287  host->chan_tx);
288 
289  if (!host->chan_tx)
290  return;
291 
292  host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
293  pdata->dma->chan_priv_rx);
294  dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
295  host->chan_rx);
296 
297  if (!host->chan_rx)
298  goto ereqrx;
299 
301  if (!host->bounce_buf)
302  goto ebouncebuf;
303 
304  tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
305  tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
306  }
307 
308  tmio_mmc_enable_dma(host, true);
309 
310  return;
311 
312 ebouncebuf:
314  host->chan_rx = NULL;
315 ereqrx:
317  host->chan_tx = NULL;
318 }
319 
321 {
322  if (host->chan_tx) {
323  struct dma_chan *chan = host->chan_tx;
324  host->chan_tx = NULL;
325  dma_release_channel(chan);
326  }
327  if (host->chan_rx) {
328  struct dma_chan *chan = host->chan_rx;
329  host->chan_rx = NULL;
330  dma_release_channel(chan);
331  }
332  if (host->bounce_buf) {
333  free_pages((unsigned long)host->bounce_buf, 0);
334  host->bounce_buf = NULL;
335  }
336 }