9 #include <linux/device.h>
13 #include <linux/export.h>
25 static unsigned long get_dma_direct_offset(
struct device *
dev)
28 return (
unsigned long)dev->
archdata.dma_data;
33 #define NOT_COHERENT_CACHE
39 #ifdef NOT_COHERENT_CACHE
44 int node = dev_to_node(dev);
49 page = alloc_pages_node(node, flag,
get_order(size));
54 *dma_handle =
virt_to_phys(ret) + get_dma_direct_offset(dev);
64 #ifdef NOT_COHERENT_CACHE
94 static int dma_direct_dma_supported(
struct device *dev,
u64 mask)
107 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
110 static inline void dma_direct_unmap_page(
struct device *dev,
125 dma_direct_sync_single_for_cpu(
struct device *dev,
139 dma_direct_sync_single_for_device(
struct device *dev,
153 dma_direct_sync_sg_for_cpu(
struct device *dev,
177 __dma_sync(sg->dma_address, sg->length, direction);
183 .map_sg = dma_direct_map_sg,
184 .unmap_sg = dma_direct_unmap_sg,
185 .dma_supported = dma_direct_dma_supported,
186 .map_page = dma_direct_map_page,
187 .unmap_page = dma_direct_unmap_page,
188 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
189 .sync_single_for_device = dma_direct_sync_single_for_device,
190 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
191 .sync_sg_for_device = dma_direct_sync_sg_for_device,
196 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)