Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7 
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
15 #include <asm/vio.h>
16 #include <asm/bug.h>
17 #include <asm/machdep.h>
18 
19 /*
20  * Generic direct DMA implementation
21  *
22  * This implementation supports a per-device offset that can be applied if
23  * the address at which memory is visible to devices is not 0. Platform code
24  * can set archdata.dma_data to an unsigned long holding the offset. By
25  * default the offset is PCI_DRAM_OFFSET.
26  */
27 
28 
29 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31  struct dma_attrs *attrs)
32 {
33  void *ret;
34 #ifdef CONFIG_NOT_COHERENT_CACHE
35  ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
36  if (ret == NULL)
37  return NULL;
38  *dma_handle += get_dma_offset(dev);
39  return ret;
40 #else
41  struct page *page;
42  int node = dev_to_node(dev);
43 
44  /* ignore region specifiers */
45  flag &= ~(__GFP_HIGHMEM);
46 
47  page = alloc_pages_node(node, flag, get_order(size));
48  if (page == NULL)
49  return NULL;
50  ret = page_address(page);
51  memset(ret, 0, size);
52  *dma_handle = __pa(ret) + get_dma_offset(dev);
53 
54  return ret;
55 #endif
56 }
57 
58 void dma_direct_free_coherent(struct device *dev, size_t size,
60  struct dma_attrs *attrs)
61 {
62 #ifdef CONFIG_NOT_COHERENT_CACHE
63  __dma_free_coherent(size, vaddr);
64 #else
65  free_pages((unsigned long)vaddr, get_order(size));
66 #endif
67 }
68 
70  void *cpu_addr, dma_addr_t handle, size_t size,
71  struct dma_attrs *attrs)
72 {
73  unsigned long pfn;
74 
75 #ifdef CONFIG_NOT_COHERENT_CACHE
77  pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
78 #else
79  pfn = page_to_pfn(virt_to_page(cpu_addr));
80 #endif
81  return remap_pfn_range(vma, vma->vm_start,
82  pfn + vma->vm_pgoff,
83  vma->vm_end - vma->vm_start,
84  vma->vm_page_prot);
85 }
86 
87 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
88  int nents, enum dma_data_direction direction,
89  struct dma_attrs *attrs)
90 {
91  struct scatterlist *sg;
92  int i;
93 
94  for_each_sg(sgl, sg, nents, i) {
95  sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
96  sg->dma_length = sg->length;
97  __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
98  }
99 
100  return nents;
101 }
102 
103 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
104  int nents, enum dma_data_direction direction,
105  struct dma_attrs *attrs)
106 {
107 }
108 
109 static int dma_direct_dma_supported(struct device *dev, u64 mask)
110 {
111 #ifdef CONFIG_PPC64
112  /* Could be improved so platforms can set the limit in case
113  * they have limited DMA windows
114  */
115  return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
116 #else
117  return 1;
118 #endif
119 }
120 
121 static u64 dma_direct_get_required_mask(struct device *dev)
122 {
123  u64 end, mask;
124 
125  end = memblock_end_of_DRAM() + get_dma_offset(dev);
126 
127  mask = 1ULL << (fls64(end) - 1);
128  mask += mask - 1;
129 
130  return mask;
131 }
132 
133 static inline dma_addr_t dma_direct_map_page(struct device *dev,
134  struct page *page,
135  unsigned long offset,
136  size_t size,
137  enum dma_data_direction dir,
138  struct dma_attrs *attrs)
139 {
140  BUG_ON(dir == DMA_NONE);
141  __dma_sync_page(page, offset, size, dir);
142  return page_to_phys(page) + offset + get_dma_offset(dev);
143 }
144 
145 static inline void dma_direct_unmap_page(struct device *dev,
147  size_t size,
148  enum dma_data_direction direction,
149  struct dma_attrs *attrs)
150 {
151 }
152 
153 #ifdef CONFIG_NOT_COHERENT_CACHE
154 static inline void dma_direct_sync_sg(struct device *dev,
155  struct scatterlist *sgl, int nents,
156  enum dma_data_direction direction)
157 {
158  struct scatterlist *sg;
159  int i;
160 
161  for_each_sg(sgl, sg, nents, i)
162  __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
163 }
164 
165 static inline void dma_direct_sync_single(struct device *dev,
166  dma_addr_t dma_handle, size_t size,
167  enum dma_data_direction direction)
168 {
169  __dma_sync(bus_to_virt(dma_handle), size, direction);
170 }
171 #endif
172 
174  .alloc = dma_direct_alloc_coherent,
175  .free = dma_direct_free_coherent,
176  .mmap = dma_direct_mmap_coherent,
177  .map_sg = dma_direct_map_sg,
178  .unmap_sg = dma_direct_unmap_sg,
179  .dma_supported = dma_direct_dma_supported,
180  .map_page = dma_direct_map_page,
181  .unmap_page = dma_direct_unmap_page,
182  .get_required_mask = dma_direct_get_required_mask,
183 #ifdef CONFIG_NOT_COHERENT_CACHE
184  .sync_single_for_cpu = dma_direct_sync_single,
185  .sync_single_for_device = dma_direct_sync_single,
186  .sync_sg_for_cpu = dma_direct_sync_sg,
187  .sync_sg_for_device = dma_direct_sync_sg,
188 #endif
189 };
190 EXPORT_SYMBOL(dma_direct_ops);
191 
192 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
193 
194 int dma_set_mask(struct device *dev, u64 dma_mask)
195 {
196  struct dma_map_ops *dma_ops = get_dma_ops(dev);
197 
198  if (ppc_md.dma_set_mask)
199  return ppc_md.dma_set_mask(dev, dma_mask);
200  if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
201  return dma_ops->set_dma_mask(dev, dma_mask);
202  if (!dev->dma_mask || !dma_supported(dev, dma_mask))
203  return -EIO;
204  *dev->dma_mask = dma_mask;
205  return 0;
206 }
207 EXPORT_SYMBOL(dma_set_mask);
208 
210 {
211  struct dma_map_ops *dma_ops = get_dma_ops(dev);
212 
213  if (ppc_md.dma_get_required_mask)
214  return ppc_md.dma_get_required_mask(dev);
215 
216  if (unlikely(dma_ops == NULL))
217  return 0;
218 
219  if (dma_ops->get_required_mask)
220  return dma_ops->get_required_mask(dev);
221 
222  return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
223 }
225 
226 static int __init dma_init(void)
227 {
229 #ifdef CONFIG_PCI
231 #endif
232 #ifdef CONFIG_IBMVIO
234 #endif
235 
236  return 0;
237 }
239