Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-mapping.h
Go to the documentation of this file.
1 #ifndef __ASM_AVR32_DMA_MAPPING_H
2 #define __ASM_AVR32_DMA_MAPPING_H
3 
4 #include <linux/mm.h>
5 #include <linux/device.h>
6 #include <linux/scatterlist.h>
7 #include <asm/processor.h>
8 #include <asm/cacheflush.h>
9 #include <asm/io.h>
10 
11 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
12  int direction);
13 
14 /*
15  * Return whether the given device DMA address mask can be supported
16  * properly. For example, if your device can only drive the low 24-bits
17  * during bus mastering, then you would pass 0x00ffffff as the mask
18  * to this function.
19  */
20 static inline int dma_supported(struct device *dev, u64 mask)
21 {
22  /* Fix when needed. I really don't know of any limitations */
23  return 1;
24 }
25 
26 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
27 {
28  if (!dev->dma_mask || !dma_supported(dev, dma_mask))
29  return -EIO;
30 
31  *dev->dma_mask = dma_mask;
32  return 0;
33 }
34 
35 /*
36  * dma_map_single can't fail as it is implemented now.
37  */
38 static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
39 {
40  return 0;
41 }
42 
54 extern void *dma_alloc_coherent(struct device *dev, size_t size,
55  dma_addr_t *handle, gfp_t gfp);
56 
70 extern void dma_free_coherent(struct device *dev, size_t size,
71  void *cpu_addr, dma_addr_t handle);
72 
84 extern void *dma_alloc_writecombine(struct device *dev, size_t size,
85  dma_addr_t *handle, gfp_t gfp);
86 
100 extern void dma_free_writecombine(struct device *dev, size_t size,
101  void *cpu_addr, dma_addr_t handle);
102 
116 static inline dma_addr_t
117 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
119 {
120  dma_cache_sync(dev, cpu_addr, size, direction);
121  return virt_to_bus(cpu_addr);
122 }
123 
138 static inline void
139 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
141 {
142 
143 }
144 
159 static inline dma_addr_t
160 dma_map_page(struct device *dev, struct page *page,
161  unsigned long offset, size_t size,
163 {
164  return dma_map_single(dev, page_address(page) + offset,
165  size, direction);
166 }
167 
182 static inline void
183 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
184  enum dma_data_direction direction)
185 {
186  dma_unmap_single(dev, dma_address, size, direction);
187 }
188 
211 static inline int
212 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
213  enum dma_data_direction direction)
214 {
215  int i;
216 
217  for (i = 0; i < nents; i++) {
218  char *virt;
219 
220  sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset;
221  virt = sg_virt(&sg[i]);
222  dma_cache_sync(dev, virt, sg[i].length, direction);
223  }
224 
225  return nents;
226 }
227 
239 static inline void
240 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
241  enum dma_data_direction direction)
242 {
243 
244 }
245 
263 static inline void
264 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
265  size_t size, enum dma_data_direction direction)
266 {
267  /*
268  * No need to do anything since the CPU isn't supposed to
269  * touch this memory after we flushed it at mapping- or
270  * sync-for-device time.
271  */
272 }
273 
274 static inline void
276  size_t size, enum dma_data_direction direction)
277 {
278  dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
279 }
280 
281 static inline void
282 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
283  unsigned long offset, size_t size,
284  enum dma_data_direction direction)
285 {
286  /* just sync everything, that's all the pci API can do */
287  dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
288 }
289 
290 static inline void
291 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
292  unsigned long offset, size_t size,
293  enum dma_data_direction direction)
294 {
295  /* just sync everything, that's all the pci API can do */
296  dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
297 }
298 
312 static inline void
313 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
314  int nents, enum dma_data_direction direction)
315 {
316  /*
317  * No need to do anything since the CPU isn't supposed to
318  * touch this memory after we flushed it at mapping- or
319  * sync-for-device time.
320  */
321 }
322 
323 static inline void
324 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
325  int nents, enum dma_data_direction direction)
326 {
327  int i;
328 
329  for (i = 0; i < nents; i++) {
330  dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
331  }
332 }
333 
334 /* Now for the API extensions over the pci_ one */
335 
336 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
337 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
338 
339 #endif /* __ASM_AVR32_DMA_MAPPING_H */