25 #include <linux/module.h>
27 #include <linux/slab.h>
29 #include <linux/device.h>
32 #include <linux/list.h>
35 #include <asm/cacheflush.h>
40 #define DO_STATS(X) do { X ; } while (0)
42 #define DO_STATS(X) do { } while (0)
73 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
91 return sprintf(buf,
"%lu %lu %lu %lu %lu %lu\n",
92 device_info->
small.allocs,
93 device_info->
large.allocs,
94 device_info->total_allocs - device_info->
small.allocs -
95 device_info->
large.allocs,
96 device_info->total_allocs,
97 device_info->map_op_count,
98 device_info->bounce_count);
115 dev_dbg(dev,
"%s(ptr=%p, size=%d, dir=%d)\n",
116 __func__, ptr, size, dir);
118 if (size <= device_info->small.size) {
119 pool = &device_info->
small;
120 }
else if (size <= device_info->large.size) {
121 pool = &device_info->
large;
128 dev_warn(dev,
"%s: kmalloc failed\n", __func__);
147 "%s: could not alloc dma memory (size=%d)\n",
156 device_info->total_allocs++;
176 if (b->safe_dma_addr <= safe_dma_addr &&
177 b->safe_dma_addr + b->size > safe_dma_addr) {
191 dev_dbg(device_info->
dev,
"%s(buf=%p)\n", __func__, buf);
213 if (!dev || !dev->
archdata.dmabounce)
216 dev_err(dev,
"Trying to %s invalid mapping\n", where);
219 return find_safe_buffer(dev->
archdata.dmabounce, dma_addr);
222 static int needs_bounce(
struct device *dev,
dma_addr_t dma_addr,
size_t size)
224 if (!dev || !dev->
archdata.dmabounce)
230 limit = (mask + 1) & ~mask;
231 if (limit && size > limit) {
232 dev_err(dev,
"DMA mapping too big (requested %#x "
233 "mask %#Lx)\n", size, *dev->
dma_mask);
238 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
242 return !!dev->
archdata.dmabounce->needs_bounce(dev, dma_addr, size);
245 static inline dma_addr_t map_single(
struct device *dev,
void *ptr,
size_t size,
252 DO_STATS ( device_info->map_op_count++ );
254 buf = alloc_safe_buffer(device_info, ptr, size, dir);
256 dev_err(dev,
"%s: unable to map unsafe buffer %p!\n",
261 dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
266 dev_dbg(dev,
"%s: copy unsafe %p to safe %p, size %d\n",
267 __func__, ptr, buf->
safe, size);
280 dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
287 void *ptr = buf->
ptr;
289 dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
290 __func__, buf->
safe, ptr, size);
300 free_safe_buffer(dev->
archdata.dmabounce, buf);
318 dev_dbg(dev,
"%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
319 __func__, page, offset, size, dir);
323 ret = needs_bounce(dev, dma_addr, size);
328 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
332 if (PageHighMem(page)) {
333 dev_err(dev,
"DMA buffer bouncing of HIGHMEM pages is not supported\n");
337 return map_single(dev,
page_address(page) + offset, size, dir);
346 static void dmabounce_unmap_page(
struct device *dev,
dma_addr_t dma_addr,
size_t size,
351 dev_dbg(dev,
"%s(dma=%#x,size=%d,dir=%x)\n",
352 __func__, dma_addr, size, dir);
354 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
356 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
360 unmap_single(dev, buf, size, dir);
369 dev_dbg(dev,
"%s(dma=%#x,sz=%zx,dir=%x)\n",
370 __func__, addr, sz, dir);
372 buf = find_safe_buffer_dev(dev, addr, __func__);
380 dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
387 dev_dbg(dev,
"%s: copy back safe %p to unsafe %p size %d\n",
388 __func__, buf->
safe + off, buf->
ptr + off, sz);
394 static void dmabounce_sync_for_cpu(
struct device *dev,
397 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
400 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
409 dev_dbg(dev,
"%s(dma=%#x,sz=%zx,dir=%x)\n",
410 __func__, addr, sz, dir);
412 buf = find_safe_buffer_dev(dev, addr, __func__);
420 dev_dbg(dev,
"%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
427 dev_dbg(dev,
"%s: copy out unsafe %p to safe %p, size %d\n",
428 __func__,buf->
ptr + off, buf->
safe + off, sz);
434 static void dmabounce_sync_for_device(
struct device *dev,
437 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
440 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
443 static int dmabounce_set_mask(
struct device *dev,
u64 dma_mask)
456 .map_page = dmabounce_map_page,
457 .unmap_page = dmabounce_unmap_page,
458 .sync_single_for_cpu = dmabounce_sync_for_cpu,
459 .sync_single_for_device = dmabounce_sync_for_device,
464 .set_dma_mask = dmabounce_set_mask,
468 const char *
name,
unsigned long size)
480 unsigned long large_buffer_size,
489 "Could not allocated dmabounce_device_info\n");
493 ret = dmabounce_init_pool(&device_info->
small, dev,
494 "small_dmabounce_pool", small_buffer_size);
497 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
502 if (large_buffer_size) {
503 ret = dmabounce_init_pool(&device_info->
large, dev,
504 "large_dmabounce_pool",
508 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
520 device_info->total_allocs = 0;
521 device_info->map_op_count = 0;
522 device_info->bounce_count = 0;
526 dev->
archdata.dmabounce = device_info;
527 set_dma_ops(dev, &dmabounce_ops);
529 dev_info(dev,
"dmabounce: registered device\n");
546 set_dma_ops(dev,
NULL);
550 "Never registered with dmabounce but attempting"
557 "Removing from dmabounce with pending buffers!\n");
561 if (device_info->
small.pool)
563 if (device_info->
large.pool)
567 if (device_info->attr_res == 0)
573 dev_info(dev,
"dmabounce: device unregistered\n");
578 MODULE_DESCRIPTION(
"Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");