Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-mapping.c
Go to the documentation of this file.
1 /*
2  * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3  *
4  * Copyright (c) 2006 SUSE Linux Products GmbH
5  * Copyright (c) 2006 Tejun Heo <[email protected]>
6  *
7  * This file is released under the GPLv2.
8  */
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
14 
15 /*
16  * Managed DMA API
17  */
18 struct dma_devres {
19  size_t size;
20  void *vaddr;
22 };
23 
24 static void dmam_coherent_release(struct device *dev, void *res)
25 {
26  struct dma_devres *this = res;
27 
28  dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
29 }
30 
31 static void dmam_noncoherent_release(struct device *dev, void *res)
32 {
33  struct dma_devres *this = res;
34 
35  dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
36 }
37 
38 static int dmam_match(struct device *dev, void *res, void *match_data)
39 {
40  struct dma_devres *this = res, *match = match_data;
41 
42  if (this->vaddr == match->vaddr) {
43  WARN_ON(this->size != match->size ||
44  this->dma_handle != match->dma_handle);
45  return 1;
46  }
47  return 0;
48 }
49 
63 void * dmam_alloc_coherent(struct device *dev, size_t size,
65 {
66  struct dma_devres *dr;
67  void *vaddr;
68 
69  dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
70  if (!dr)
71  return NULL;
72 
73  vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
74  if (!vaddr) {
75  devres_free(dr);
76  return NULL;
77  }
78 
79  dr->vaddr = vaddr;
80  dr->dma_handle = *dma_handle;
81  dr->size = size;
82 
83  devres_add(dev, dr);
84 
85  return vaddr;
86 }
88 
98 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
100 {
101  struct dma_devres match_data = { size, vaddr, dma_handle };
102 
103  dma_free_coherent(dev, size, vaddr, dma_handle);
104  WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
105  &match_data));
106 }
108 
122 void *dmam_alloc_noncoherent(struct device *dev, size_t size,
124 {
125  struct dma_devres *dr;
126  void *vaddr;
127 
128  dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
129  if (!dr)
130  return NULL;
131 
132  vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
133  if (!vaddr) {
134  devres_free(dr);
135  return NULL;
136  }
137 
138  dr->vaddr = vaddr;
139  dr->dma_handle = *dma_handle;
140  dr->size = size;
141 
142  devres_add(dev, dr);
143 
144  return vaddr;
145 }
147 
157 void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
159 {
160  struct dma_devres match_data = { size, vaddr, dma_handle };
161 
162  dma_free_noncoherent(dev, size, vaddr, dma_handle);
163  WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
164  &match_data));
165 }
167 
168 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
169 
170 static void dmam_coherent_decl_release(struct device *dev, void *res)
171 {
173 }
174 
188 int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
189  dma_addr_t device_addr, size_t size, int flags)
190 {
191  void *res;
192  int rc;
193 
194  res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
195  if (!res)
196  return -ENOMEM;
197 
198  rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
199  flags);
200  if (rc == 0)
201  devres_add(dev, res);
202  else
203  devres_free(res);
204 
205  return rc;
206 }
207 EXPORT_SYMBOL(dmam_declare_coherent_memory);
208 
215 void dmam_release_declared_memory(struct device *dev)
216 {
217  WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
218 }
219 EXPORT_SYMBOL(dmam_release_declared_memory);
220 
221 /*
222  * Create scatter-list for the already allocated DMA buffer.
223  */
224 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
225  void *cpu_addr, dma_addr_t handle, size_t size)
226 {
227  struct page *page = virt_to_page(cpu_addr);
228  int ret;
229 
230  ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
231  if (unlikely(ret))
232  return ret;
233 
234  sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
235  return 0;
236 }
238 
239 #endif
240 
241 /*
242  * Create userspace mapping for the DMA-coherent memory.
243  */
244 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
245  void *cpu_addr, dma_addr_t dma_addr, size_t size)
246 {
247  int ret = -ENXIO;
248 #ifdef CONFIG_MMU
249  unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
250  unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
251  unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
252  unsigned long off = vma->vm_pgoff;
253 
255 
256  if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
257  return ret;
258 
259  if (off < count && user_count <= (count - off)) {
260  ret = remap_pfn_range(vma, vma->vm_start,
261  pfn + off,
262  user_count << PAGE_SHIFT,
263  vma->vm_page_prot);
264  }
265 #endif /* CONFIG_MMU */
266 
267  return ret;
268 }