Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8 
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <asm/bug.h>
15 
16 /*
17  * Generic direct DMA implementation
18  *
19  * This implementation supports a per-device offset that can be applied if
20  * the address at which memory is visible to devices is not 0. Platform code
21  * can set archdata.dma_data to an unsigned long holding the offset. By
22  * default the offset is PCI_DRAM_OFFSET.
23  */
24 
25 static unsigned long get_dma_direct_offset(struct device *dev)
26 {
27  if (likely(dev))
28  return (unsigned long)dev->archdata.dma_data;
29 
30  return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
31 }
32 
33 #define NOT_COHERENT_CACHE
34 
35 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
37  struct dma_attrs *attrs)
38 {
39 #ifdef NOT_COHERENT_CACHE
40  return consistent_alloc(flag, size, dma_handle);
41 #else
42  void *ret;
43  struct page *page;
44  int node = dev_to_node(dev);
45 
46  /* ignore region specifiers */
47  flag &= ~(__GFP_HIGHMEM);
48 
49  page = alloc_pages_node(node, flag, get_order(size));
50  if (page == NULL)
51  return NULL;
52  ret = page_address(page);
53  memset(ret, 0, size);
54  *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
55 
56  return ret;
57 #endif
58 }
59 
60 static void dma_direct_free_coherent(struct device *dev, size_t size,
62  struct dma_attrs *attrs)
63 {
64 #ifdef NOT_COHERENT_CACHE
65  consistent_free(size, vaddr);
66 #else
67  free_pages((unsigned long)vaddr, get_order(size));
68 #endif
69 }
70 
71 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
72  int nents, enum dma_data_direction direction,
73  struct dma_attrs *attrs)
74 {
75  struct scatterlist *sg;
76  int i;
77 
78  /* FIXME this part of code is untested */
79  for_each_sg(sgl, sg, nents, i) {
80  sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
81  __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
82  sg->length, direction);
83  }
84 
85  return nents;
86 }
87 
88 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
89  int nents, enum dma_data_direction direction,
90  struct dma_attrs *attrs)
91 {
92 }
93 
94 static int dma_direct_dma_supported(struct device *dev, u64 mask)
95 {
96  return 1;
97 }
98 
99 static inline dma_addr_t dma_direct_map_page(struct device *dev,
100  struct page *page,
101  unsigned long offset,
102  size_t size,
103  enum dma_data_direction direction,
104  struct dma_attrs *attrs)
105 {
106  __dma_sync(page_to_phys(page) + offset, size, direction);
107  return page_to_phys(page) + offset + get_dma_direct_offset(dev);
108 }
109 
110 static inline void dma_direct_unmap_page(struct device *dev,
112  size_t size,
113  enum dma_data_direction direction,
114  struct dma_attrs *attrs)
115 {
116 /* There is not necessary to do cache cleanup
117  *
118  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
119  * dma_address is physical address
120  */
121  __dma_sync(dma_address, size, direction);
122 }
123 
124 static inline void
125 dma_direct_sync_single_for_cpu(struct device *dev,
126  dma_addr_t dma_handle, size_t size,
127  enum dma_data_direction direction)
128 {
129  /*
130  * It's pointless to flush the cache as the memory segment
131  * is given to the CPU
132  */
133 
134  if (direction == DMA_FROM_DEVICE)
135  __dma_sync(dma_handle, size, direction);
136 }
137 
138 static inline void
139 dma_direct_sync_single_for_device(struct device *dev,
140  dma_addr_t dma_handle, size_t size,
141  enum dma_data_direction direction)
142 {
143  /*
144  * It's pointless to invalidate the cache if the device isn't
145  * supposed to write to the relevant region
146  */
147 
148  if (direction == DMA_TO_DEVICE)
149  __dma_sync(dma_handle, size, direction);
150 }
151 
152 static inline void
153 dma_direct_sync_sg_for_cpu(struct device *dev,
154  struct scatterlist *sgl, int nents,
155  enum dma_data_direction direction)
156 {
157  struct scatterlist *sg;
158  int i;
159 
160  /* FIXME this part of code is untested */
161  if (direction == DMA_FROM_DEVICE)
162  for_each_sg(sgl, sg, nents, i)
163  __dma_sync(sg->dma_address, sg->length, direction);
164 }
165 
166 static inline void
167 dma_direct_sync_sg_for_device(struct device *dev,
168  struct scatterlist *sgl, int nents,
169  enum dma_data_direction direction)
170 {
171  struct scatterlist *sg;
172  int i;
173 
174  /* FIXME this part of code is untested */
175  if (direction == DMA_TO_DEVICE)
176  for_each_sg(sgl, sg, nents, i)
177  __dma_sync(sg->dma_address, sg->length, direction);
178 }
179 
181  .alloc = dma_direct_alloc_coherent,
182  .free = dma_direct_free_coherent,
183  .map_sg = dma_direct_map_sg,
184  .unmap_sg = dma_direct_unmap_sg,
185  .dma_supported = dma_direct_dma_supported,
186  .map_page = dma_direct_map_page,
187  .unmap_page = dma_direct_unmap_page,
188  .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
189  .sync_single_for_device = dma_direct_sync_single_for_device,
190  .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
191  .sync_sg_for_device = dma_direct_sync_sg_for_device,
192 };
194 
195 /* Number of entries preallocated for DMA-API debugging */
196 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
197 
198 static int __init dma_init(void)
199 {
201 
202  return 0;
203 }