8 #include <linux/device.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
17 #include <asm/machdep.h>
34 #ifdef CONFIG_NOT_COHERENT_CACHE
38 *dma_handle += get_dma_offset(dev);
42 int node = dev_to_node(dev);
47 page = alloc_pages_node(node, flag,
get_order(size));
52 *dma_handle =
__pa(ret) + get_dma_offset(dev);
62 #ifdef CONFIG_NOT_COHERENT_CACHE
75 #ifdef CONFIG_NOT_COHERENT_CACHE
96 sg->dma_length = sg->
length;
109 static int dma_direct_dma_supported(
struct device *dev,
u64 mask)
121 static u64 dma_direct_get_required_mask(
struct device *dev)
127 mask = 1ULL << (fls64(end) - 1);
142 return page_to_phys(page) + offset + get_dma_offset(dev);
145 static inline void dma_direct_unmap_page(
struct device *dev,
153 #ifdef CONFIG_NOT_COHERENT_CACHE
154 static inline void dma_direct_sync_sg(
struct device *dev,
177 .map_sg = dma_direct_map_sg,
178 .unmap_sg = dma_direct_unmap_sg,
179 .dma_supported = dma_direct_dma_supported,
180 .map_page = dma_direct_map_page,
181 .unmap_page = dma_direct_unmap_page,
182 .get_required_mask = dma_direct_get_required_mask,
183 #ifdef CONFIG_NOT_COHERENT_CACHE
184 .sync_single_for_cpu = dma_direct_sync_single,
185 .sync_single_for_device = dma_direct_sync_single,
186 .sync_sg_for_cpu = dma_direct_sync_sg,
187 .sync_sg_for_device = dma_direct_sync_sg,
192 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
199 return ppc_md.dma_set_mask(dev, dma_mask);
213 if (
ppc_md.dma_get_required_mask)
214 return ppc_md.dma_get_required_mask(dev);
219 if (dma_ops->get_required_mask)
220 return dma_ops->get_required_mask(dev);