Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-coherent.c
Go to the documentation of this file.
1 /*
2  * Coherent per-device memory handling.
3  * Borrowed from i386
4  */
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/dma-mapping.h>
9 
11  void *virt_base;
14  int size;
15  int flags;
16  unsigned long *bitmap;
17 };
18 
20  dma_addr_t device_addr, size_t size, int flags)
21 {
22  void __iomem *mem_base = NULL;
23  int pages = size >> PAGE_SHIFT;
24  int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
25 
26  if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
27  goto out;
28  if (!size)
29  goto out;
30  if (dev->dma_mem)
31  goto out;
32 
33  /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
34 
35  mem_base = ioremap(bus_addr, size);
36  if (!mem_base)
37  goto out;
38 
39  dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
40  if (!dev->dma_mem)
41  goto out;
42  dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
43  if (!dev->dma_mem->bitmap)
44  goto free1_out;
45 
46  dev->dma_mem->virt_base = mem_base;
47  dev->dma_mem->device_base = device_addr;
48  dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
49  dev->dma_mem->size = pages;
50  dev->dma_mem->flags = flags;
51 
52  if (flags & DMA_MEMORY_MAP)
53  return DMA_MEMORY_MAP;
54 
55  return DMA_MEMORY_IO;
56 
57  free1_out:
58  kfree(dev->dma_mem);
59  out:
60  if (mem_base)
61  iounmap(mem_base);
62  return 0;
63 }
65 
67 {
68  struct dma_coherent_mem *mem = dev->dma_mem;
69 
70  if (!mem)
71  return;
72  dev->dma_mem = NULL;
73  iounmap(mem->virt_base);
74  kfree(mem->bitmap);
75  kfree(mem);
76 }
78 
80  dma_addr_t device_addr, size_t size)
81 {
82  struct dma_coherent_mem *mem = dev->dma_mem;
83  int pos, err;
84 
85  size += device_addr & ~PAGE_MASK;
86 
87  if (!mem)
88  return ERR_PTR(-EINVAL);
89 
90  pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
91  err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
92  if (err != 0)
93  return ERR_PTR(err);
94  return mem->virt_base + (pos << PAGE_SHIFT);
95 }
97 
114  dma_addr_t *dma_handle, void **ret)
115 {
116  struct dma_coherent_mem *mem;
117  int order = get_order(size);
118  int pageno;
119 
120  if (!dev)
121  return 0;
122  mem = dev->dma_mem;
123  if (!mem)
124  return 0;
125 
126  *ret = NULL;
127 
128  if (unlikely(size > (mem->size << PAGE_SHIFT)))
129  goto err;
130 
131  pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
132  if (unlikely(pageno < 0))
133  goto err;
134 
135  /*
136  * Memory was found in the per-device area.
137  */
138  *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
139  *ret = mem->virt_base + (pageno << PAGE_SHIFT);
140  memset(*ret, 0, size);
141 
142  return 1;
143 
144 err:
145  /*
146  * In the case where the allocation can not be satisfied from the
147  * per-device area, try to fall back to generic memory if the
148  * constraints allow it.
149  */
150  return mem->flags & DMA_MEMORY_EXCLUSIVE;
151 }
153 
168 {
169  struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
170 
171  if (mem && vaddr >= mem->virt_base && vaddr <
172  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
173  int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
174 
175  bitmap_release_region(mem->bitmap, page, order);
176  return 1;
177  }
178  return 0;
179 }
181 
198  void *vaddr, size_t size, int *ret)
199 {
200  struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
201 
202  if (mem && vaddr >= mem->virt_base && vaddr + size <=
203  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
204  unsigned long off = vma->vm_pgoff;
205  int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
206  int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
207  int count = size >> PAGE_SHIFT;
208 
209  *ret = -ENXIO;
210  if (off < count && user_count <= count - off) {
211  unsigned pfn = mem->pfn_base + start + off;
212  *ret = remap_pfn_range(vma, vma->vm_start, pfn,
213  user_count << PAGE_SHIFT,
214  vma->vm_page_prot);
215  }
216  return 1;
217  }
218  return 0;
219 }