Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dmabounce.c
Go to the documentation of this file.
1 /*
2  * arch/arm/common/dmabounce.c
3  *
4  * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5  * limited DMA windows. These functions utilize bounce buffers to
6  * copy data to/from buffers located outside the DMA region. This
7  * only works for systems in which DMA memory is at the bottom of
8  * RAM, the remainder of memory is at the top and the DMA memory
9  * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10  * DMA windows will require custom implementations that reserve memory
11  * areas at early bootup.
12  *
13  * Original version by Brad Parker ([email protected])
14  * Re-written by Christopher Hoover <[email protected]>
15  * Made generic by Deepak Saxena <[email protected]>
16  *
17  * Copyright (C) 2002 Hewlett Packard Company.
18  * Copyright (C) 2004 MontaVista Software, Inc.
19  *
20  * This program is free software; you can redistribute it and/or
21  * modify it under the terms of the GNU General Public License
22  * version 2 as published by the Free Software Foundation.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/page-flags.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmapool.h>
32 #include <linux/list.h>
33 #include <linux/scatterlist.h>
34 
35 #include <asm/cacheflush.h>
36 
37 #undef STATS
38 
39 #ifdef STATS
40 #define DO_STATS(X) do { X ; } while (0)
41 #else
42 #define DO_STATS(X) do { } while (0)
43 #endif
44 
45 /* ************************************************** */
46 
47 struct safe_buffer {
48  struct list_head node;
49 
50  /* original request */
51  void *ptr;
52  size_t size;
53  int direction;
54 
55  /* safe buffer info */
57  void *safe;
59 };
60 
62  unsigned long size;
63  struct dma_pool *pool;
64 #ifdef STATS
65  unsigned long allocs;
66 #endif
67 };
68 
70  struct device *dev;
72 #ifdef STATS
73  unsigned long total_allocs;
74  unsigned long map_op_count;
75  unsigned long bounce_count;
76  int attr_res;
77 #endif
80 
82 
84 };
85 
86 #ifdef STATS
87 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
88  char *buf)
89 {
90  struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
91  return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
92  device_info->small.allocs,
93  device_info->large.allocs,
94  device_info->total_allocs - device_info->small.allocs -
95  device_info->large.allocs,
96  device_info->total_allocs,
97  device_info->map_op_count,
98  device_info->bounce_count);
99 }
100 
101 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
102 #endif
103 
104 
105 /* allocate a 'safe' buffer and keep track of it */
106 static inline struct safe_buffer *
107 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
108  size_t size, enum dma_data_direction dir)
109 {
110  struct safe_buffer *buf;
111  struct dmabounce_pool *pool;
112  struct device *dev = device_info->dev;
113  unsigned long flags;
114 
115  dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
116  __func__, ptr, size, dir);
117 
118  if (size <= device_info->small.size) {
119  pool = &device_info->small;
120  } else if (size <= device_info->large.size) {
121  pool = &device_info->large;
122  } else {
123  pool = NULL;
124  }
125 
126  buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
127  if (buf == NULL) {
128  dev_warn(dev, "%s: kmalloc failed\n", __func__);
129  return NULL;
130  }
131 
132  buf->ptr = ptr;
133  buf->size = size;
134  buf->direction = dir;
135  buf->pool = pool;
136 
137  if (pool) {
138  buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
139  &buf->safe_dma_addr);
140  } else {
141  buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
142  GFP_ATOMIC);
143  }
144 
145  if (buf->safe == NULL) {
146  dev_warn(dev,
147  "%s: could not alloc dma memory (size=%d)\n",
148  __func__, size);
149  kfree(buf);
150  return NULL;
151  }
152 
153 #ifdef STATS
154  if (pool)
155  pool->allocs++;
156  device_info->total_allocs++;
157 #endif
158 
159  write_lock_irqsave(&device_info->lock, flags);
160  list_add(&buf->node, &device_info->safe_buffers);
161  write_unlock_irqrestore(&device_info->lock, flags);
162 
163  return buf;
164 }
165 
166 /* determine if a buffer is from our "safe" pool */
167 static inline struct safe_buffer *
168 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
169 {
170  struct safe_buffer *b, *rb = NULL;
171  unsigned long flags;
172 
173  read_lock_irqsave(&device_info->lock, flags);
174 
175  list_for_each_entry(b, &device_info->safe_buffers, node)
176  if (b->safe_dma_addr <= safe_dma_addr &&
177  b->safe_dma_addr + b->size > safe_dma_addr) {
178  rb = b;
179  break;
180  }
181 
182  read_unlock_irqrestore(&device_info->lock, flags);
183  return rb;
184 }
185 
186 static inline void
187 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188 {
189  unsigned long flags;
190 
191  dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
192 
193  write_lock_irqsave(&device_info->lock, flags);
194 
195  list_del(&buf->node);
196 
197  write_unlock_irqrestore(&device_info->lock, flags);
198 
199  if (buf->pool)
200  dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
201  else
202  dma_free_coherent(device_info->dev, buf->size, buf->safe,
203  buf->safe_dma_addr);
204 
205  kfree(buf);
206 }
207 
208 /* ************************************************** */
209 
210 static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
211  dma_addr_t dma_addr, const char *where)
212 {
213  if (!dev || !dev->archdata.dmabounce)
214  return NULL;
215  if (dma_mapping_error(dev, dma_addr)) {
216  dev_err(dev, "Trying to %s invalid mapping\n", where);
217  return NULL;
218  }
219  return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220 }
221 
222 static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223 {
224  if (!dev || !dev->archdata.dmabounce)
225  return 0;
226 
227  if (dev->dma_mask) {
228  unsigned long limit, mask = *dev->dma_mask;
229 
230  limit = (mask + 1) & ~mask;
231  if (limit && size > limit) {
232  dev_err(dev, "DMA mapping too big (requested %#x "
233  "mask %#Lx)\n", size, *dev->dma_mask);
234  return -E2BIG;
235  }
236 
237  /* Figure out if we need to bounce from the DMA mask. */
238  if ((dma_addr | (dma_addr + size - 1)) & ~mask)
239  return 1;
240  }
241 
242  return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
243 }
244 
245 static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
246  enum dma_data_direction dir)
247 {
248  struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
249  struct safe_buffer *buf;
250 
251  if (device_info)
252  DO_STATS ( device_info->map_op_count++ );
253 
254  buf = alloc_safe_buffer(device_info, ptr, size, dir);
255  if (buf == NULL) {
256  dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257  __func__, ptr);
258  return DMA_ERROR_CODE;
259  }
260 
261  dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
262  __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
263  buf->safe, buf->safe_dma_addr);
264 
265  if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
266  dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
267  __func__, ptr, buf->safe, size);
268  memcpy(buf->safe, ptr, size);
269  }
270 
271  return buf->safe_dma_addr;
272 }
273 
274 static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
275  size_t size, enum dma_data_direction dir)
276 {
277  BUG_ON(buf->size != size);
278  BUG_ON(buf->direction != dir);
279 
280  dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
281  __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
282  buf->safe, buf->safe_dma_addr);
283 
284  DO_STATS(dev->archdata.dmabounce->bounce_count++);
285 
286  if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
287  void *ptr = buf->ptr;
288 
289  dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
290  __func__, buf->safe, ptr, size);
291  memcpy(ptr, buf->safe, size);
292 
293  /*
294  * Since we may have written to a page cache page,
295  * we need to ensure that the data will be coherent
296  * with user mappings.
297  */
298  __cpuc_flush_dcache_area(ptr, size);
299  }
300  free_safe_buffer(dev->archdata.dmabounce, buf);
301 }
302 
303 /* ************************************************** */
304 
305 /*
306  * see if a buffer address is in an 'unsafe' range. if it is
307  * allocate a 'safe' buffer and copy the unsafe buffer into it.
308  * substitute the safe buffer for the unsafe one.
309  * (basically move the buffer from an unsafe area to a safe one)
310  */
311 static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
312  unsigned long offset, size_t size, enum dma_data_direction dir,
313  struct dma_attrs *attrs)
314 {
316  int ret;
317 
318  dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
319  __func__, page, offset, size, dir);
320 
321  dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
322 
323  ret = needs_bounce(dev, dma_addr, size);
324  if (ret < 0)
325  return DMA_ERROR_CODE;
326 
327  if (ret == 0) {
328  arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
329  return dma_addr;
330  }
331 
332  if (PageHighMem(page)) {
333  dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
334  return DMA_ERROR_CODE;
335  }
336 
337  return map_single(dev, page_address(page) + offset, size, dir);
338 }
339 
340 /*
341  * see if a mapped address was really a "safe" buffer and if so, copy
342  * the data from the safe buffer back to the unsafe buffer and free up
343  * the safe buffer. (basically return things back to the way they
344  * should be)
345  */
346 static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
347  enum dma_data_direction dir, struct dma_attrs *attrs)
348 {
349  struct safe_buffer *buf;
350 
351  dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
352  __func__, dma_addr, size, dir);
353 
354  buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355  if (!buf) {
356  arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
357  return;
358  }
359 
360  unmap_single(dev, buf, size, dir);
361 }
362 
363 static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
364  size_t sz, enum dma_data_direction dir)
365 {
366  struct safe_buffer *buf;
367  unsigned long off;
368 
369  dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
370  __func__, addr, sz, dir);
371 
372  buf = find_safe_buffer_dev(dev, addr, __func__);
373  if (!buf)
374  return 1;
375 
376  off = addr - buf->safe_dma_addr;
377 
378  BUG_ON(buf->direction != dir);
379 
380  dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
381  __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
382  buf->safe, buf->safe_dma_addr);
383 
384  DO_STATS(dev->archdata.dmabounce->bounce_count++);
385 
386  if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
387  dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
388  __func__, buf->safe + off, buf->ptr + off, sz);
389  memcpy(buf->ptr + off, buf->safe + off, sz);
390  }
391  return 0;
392 }
393 
394 static void dmabounce_sync_for_cpu(struct device *dev,
395  dma_addr_t handle, size_t size, enum dma_data_direction dir)
396 {
397  if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398  return;
399 
400  arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401 }
402 
403 static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
404  size_t sz, enum dma_data_direction dir)
405 {
406  struct safe_buffer *buf;
407  unsigned long off;
408 
409  dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
410  __func__, addr, sz, dir);
411 
412  buf = find_safe_buffer_dev(dev, addr, __func__);
413  if (!buf)
414  return 1;
415 
416  off = addr - buf->safe_dma_addr;
417 
418  BUG_ON(buf->direction != dir);
419 
420  dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
421  __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
422  buf->safe, buf->safe_dma_addr);
423 
424  DO_STATS(dev->archdata.dmabounce->bounce_count++);
425 
426  if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
427  dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
428  __func__,buf->ptr + off, buf->safe + off, sz);
429  memcpy(buf->safe + off, buf->ptr + off, sz);
430  }
431  return 0;
432 }
433 
434 static void dmabounce_sync_for_device(struct device *dev,
435  dma_addr_t handle, size_t size, enum dma_data_direction dir)
436 {
437  if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438  return;
439 
440  arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441 }
442 
443 static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444 {
445  if (dev->archdata.dmabounce)
446  return 0;
447 
448  return arm_dma_ops.set_dma_mask(dev, dma_mask);
449 }
450 
451 static struct dma_map_ops dmabounce_ops = {
452  .alloc = arm_dma_alloc,
453  .free = arm_dma_free,
454  .mmap = arm_dma_mmap,
455  .get_sgtable = arm_dma_get_sgtable,
456  .map_page = dmabounce_map_page,
457  .unmap_page = dmabounce_unmap_page,
458  .sync_single_for_cpu = dmabounce_sync_for_cpu,
459  .sync_single_for_device = dmabounce_sync_for_device,
460  .map_sg = arm_dma_map_sg,
461  .unmap_sg = arm_dma_unmap_sg,
462  .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
463  .sync_sg_for_device = arm_dma_sync_sg_for_device,
464  .set_dma_mask = dmabounce_set_mask,
465 };
466 
467 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
468  const char *name, unsigned long size)
469 {
470  pool->size = size;
471  DO_STATS(pool->allocs = 0);
472  pool->pool = dma_pool_create(name, dev, size,
473  0 /* byte alignment */,
474  0 /* no page-crossing issues */);
475 
476  return pool->pool ? 0 : -ENOMEM;
477 }
478 
479 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
480  unsigned long large_buffer_size,
481  int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
482 {
483  struct dmabounce_device_info *device_info;
484  int ret;
485 
486  device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
487  if (!device_info) {
488  dev_err(dev,
489  "Could not allocated dmabounce_device_info\n");
490  return -ENOMEM;
491  }
492 
493  ret = dmabounce_init_pool(&device_info->small, dev,
494  "small_dmabounce_pool", small_buffer_size);
495  if (ret) {
496  dev_err(dev,
497  "dmabounce: could not allocate DMA pool for %ld byte objects\n",
498  small_buffer_size);
499  goto err_free;
500  }
501 
502  if (large_buffer_size) {
503  ret = dmabounce_init_pool(&device_info->large, dev,
504  "large_dmabounce_pool",
505  large_buffer_size);
506  if (ret) {
507  dev_err(dev,
508  "dmabounce: could not allocate DMA pool for %ld byte objects\n",
509  large_buffer_size);
510  goto err_destroy;
511  }
512  }
513 
514  device_info->dev = dev;
515  INIT_LIST_HEAD(&device_info->safe_buffers);
516  rwlock_init(&device_info->lock);
517  device_info->needs_bounce = needs_bounce_fn;
518 
519 #ifdef STATS
520  device_info->total_allocs = 0;
521  device_info->map_op_count = 0;
522  device_info->bounce_count = 0;
523  device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
524 #endif
525 
526  dev->archdata.dmabounce = device_info;
527  set_dma_ops(dev, &dmabounce_ops);
528 
529  dev_info(dev, "dmabounce: registered device\n");
530 
531  return 0;
532 
533  err_destroy:
534  dma_pool_destroy(device_info->small.pool);
535  err_free:
536  kfree(device_info);
537  return ret;
538 }
540 
542 {
543  struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
544 
545  dev->archdata.dmabounce = NULL;
546  set_dma_ops(dev, NULL);
547 
548  if (!device_info) {
549  dev_warn(dev,
550  "Never registered with dmabounce but attempting"
551  "to unregister!\n");
552  return;
553  }
554 
555  if (!list_empty(&device_info->safe_buffers)) {
556  dev_err(dev,
557  "Removing from dmabounce with pending buffers!\n");
558  BUG();
559  }
560 
561  if (device_info->small.pool)
562  dma_pool_destroy(device_info->small.pool);
563  if (device_info->large.pool)
564  dma_pool_destroy(device_info->large.pool);
565 
566 #ifdef STATS
567  if (device_info->attr_res == 0)
568  device_remove_file(dev, &dev_attr_dmabounce_stats);
569 #endif
570 
571  kfree(device_info);
572 
573  dev_info(dev, "dmabounce: device unregistered\n");
574 }
576 
577 MODULE_AUTHOR("Christopher Hoover <[email protected]>, Deepak Saxena <[email protected]>");
578 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
579 MODULE_LICENSE("GPL");