Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma-mapping-common.h
Go to the documentation of this file.
1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
3 
4 #include <linux/kmemcheck.h>
5 #include <linux/bug.h>
6 #include <linux/scatterlist.h>
7 #include <linux/dma-debug.h>
8 #include <linux/dma-attrs.h>
9 
10 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
11  size_t size,
12  enum dma_data_direction dir,
13  struct dma_attrs *attrs)
14 {
15  struct dma_map_ops *ops = get_dma_ops(dev);
17 
18  kmemcheck_mark_initialized(ptr, size);
19  BUG_ON(!valid_dma_direction(dir));
20  addr = ops->map_page(dev, virt_to_page(ptr),
21  (unsigned long)ptr & ~PAGE_MASK, size,
22  dir, attrs);
24  (unsigned long)ptr & ~PAGE_MASK, size,
25  dir, addr, true);
26  return addr;
27 }
28 
29 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
30  size_t size,
31  enum dma_data_direction dir,
32  struct dma_attrs *attrs)
33 {
34  struct dma_map_ops *ops = get_dma_ops(dev);
35 
36  BUG_ON(!valid_dma_direction(dir));
37  if (ops->unmap_page)
38  ops->unmap_page(dev, addr, size, dir, attrs);
39  debug_dma_unmap_page(dev, addr, size, dir, true);
40 }
41 
42 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
43  int nents, enum dma_data_direction dir,
44  struct dma_attrs *attrs)
45 {
46  struct dma_map_ops *ops = get_dma_ops(dev);
47  int i, ents;
48  struct scatterlist *s;
49 
50  for_each_sg(sg, s, nents, i)
51  kmemcheck_mark_initialized(sg_virt(s), s->length);
52  BUG_ON(!valid_dma_direction(dir));
53  ents = ops->map_sg(dev, sg, nents, dir, attrs);
54  debug_dma_map_sg(dev, sg, nents, ents, dir);
55 
56  return ents;
57 }
58 
60  int nents, enum dma_data_direction dir,
61  struct dma_attrs *attrs)
62 {
63  struct dma_map_ops *ops = get_dma_ops(dev);
64 
65  BUG_ON(!valid_dma_direction(dir));
66  debug_dma_unmap_sg(dev, sg, nents, dir);
67  if (ops->unmap_sg)
68  ops->unmap_sg(dev, sg, nents, dir, attrs);
69 }
70 
71 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
72  size_t offset, size_t size,
73  enum dma_data_direction dir)
74 {
75  struct dma_map_ops *ops = get_dma_ops(dev);
77 
78  kmemcheck_mark_initialized(page_address(page) + offset, size);
79  BUG_ON(!valid_dma_direction(dir));
80  addr = ops->map_page(dev, page, offset, size, dir, NULL);
81  debug_dma_map_page(dev, page, offset, size, dir, addr, false);
82 
83  return addr;
84 }
85 
86 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
87  size_t size, enum dma_data_direction dir)
88 {
89  struct dma_map_ops *ops = get_dma_ops(dev);
90 
91  BUG_ON(!valid_dma_direction(dir));
92  if (ops->unmap_page)
93  ops->unmap_page(dev, addr, size, dir, NULL);
94  debug_dma_unmap_page(dev, addr, size, dir, false);
95 }
96 
97 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
98  size_t size,
99  enum dma_data_direction dir)
100 {
101  struct dma_map_ops *ops = get_dma_ops(dev);
102 
103  BUG_ON(!valid_dma_direction(dir));
104  if (ops->sync_single_for_cpu)
105  ops->sync_single_for_cpu(dev, addr, size, dir);
106  debug_dma_sync_single_for_cpu(dev, addr, size, dir);
107 }
108 
109 static inline void dma_sync_single_for_device(struct device *dev,
110  dma_addr_t addr, size_t size,
111  enum dma_data_direction dir)
112 {
113  struct dma_map_ops *ops = get_dma_ops(dev);
114 
115  BUG_ON(!valid_dma_direction(dir));
116  if (ops->sync_single_for_device)
117  ops->sync_single_for_device(dev, addr, size, dir);
118  debug_dma_sync_single_for_device(dev, addr, size, dir);
119 }
120 
121 static inline void dma_sync_single_range_for_cpu(struct device *dev,
122  dma_addr_t addr,
123  unsigned long offset,
124  size_t size,
125  enum dma_data_direction dir)
126 {
127  const struct dma_map_ops *ops = get_dma_ops(dev);
128 
129  BUG_ON(!valid_dma_direction(dir));
130  if (ops->sync_single_for_cpu)
131  ops->sync_single_for_cpu(dev, addr + offset, size, dir);
132  debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
133 }
134 
135 static inline void dma_sync_single_range_for_device(struct device *dev,
136  dma_addr_t addr,
137  unsigned long offset,
138  size_t size,
139  enum dma_data_direction dir)
140 {
141  const struct dma_map_ops *ops = get_dma_ops(dev);
142 
143  BUG_ON(!valid_dma_direction(dir));
144  if (ops->sync_single_for_device)
145  ops->sync_single_for_device(dev, addr + offset, size, dir);
146  debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
147 }
148 
149 static inline void
150 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
151  int nelems, enum dma_data_direction dir)
152 {
153  struct dma_map_ops *ops = get_dma_ops(dev);
154 
155  BUG_ON(!valid_dma_direction(dir));
156  if (ops->sync_sg_for_cpu)
157  ops->sync_sg_for_cpu(dev, sg, nelems, dir);
158  debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
159 }
160 
161 static inline void
162 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
163  int nelems, enum dma_data_direction dir)
164 {
165  struct dma_map_ops *ops = get_dma_ops(dev);
166 
167  BUG_ON(!valid_dma_direction(dir));
168  if (ops->sync_sg_for_device)
169  ops->sync_sg_for_device(dev, sg, nelems, dir);
170  debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
171 
172 }
173 
174 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
175 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
176 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
177 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
178 
179 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
180  void *cpu_addr, dma_addr_t dma_addr, size_t size);
181 
195 static inline int
196 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
197  dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
198 {
199  struct dma_map_ops *ops = get_dma_ops(dev);
200  BUG_ON(!ops);
201  if (ops->mmap)
202  return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
203  return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
204 }
205 
206 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
207 
208 static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
209  void *cpu_addr, dma_addr_t dma_addr, size_t size)
210 {
211  DEFINE_DMA_ATTRS(attrs);
212  dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
213  return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
214 }
215 
216 int
217 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
218  void *cpu_addr, dma_addr_t dma_addr, size_t size);
219 
220 static inline int
221 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
222  dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
223 {
224  struct dma_map_ops *ops = get_dma_ops(dev);
225  BUG_ON(!ops);
226  if (ops->get_sgtable)
227  return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
228  attrs);
229  return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
230 }
231 
232 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
233 
234 #endif