32 #include <linux/types.h>
37 #define BAD_DMA_ADDRESS ((u64) 0)
56 BUG_ON(!valid_dma_direction(direction));
60 static void qib_dma_unmap_single(
struct ib_device *dev,
u64 addr,
size_t size,
63 BUG_ON(!valid_dma_direction(direction));
67 unsigned long offset,
size_t size,
72 BUG_ON(!valid_dma_direction(direction));
88 static void qib_dma_unmap_page(
struct ib_device *dev,
u64 addr,
size_t size,
91 BUG_ON(!valid_dma_direction(direction));
102 BUG_ON(!valid_dma_direction(direction));
115 static void qib_unmap_sg(
struct ib_device *dev,
119 BUG_ON(!valid_dma_direction(direction));
131 static unsigned int qib_sg_dma_len(
struct ib_device *dev,
137 static void qib_sync_single_for_cpu(
struct ib_device *dev,
u64 addr,
142 static void qib_sync_single_for_device(
struct ib_device *dev,
u64 addr,
148 static void *qib_dma_alloc_coherent(
struct ib_device *dev,
size_t size,
158 *dma_handle = (
u64) addr;
162 static void qib_dma_free_coherent(
struct ib_device *dev,
size_t size,
163 void *cpu_addr,
u64 dma_handle)
169 .mapping_error = qib_mapping_error,
170 .map_single = qib_dma_map_single,
171 .unmap_single = qib_dma_unmap_single,
172 .map_page = qib_dma_map_page,
173 .unmap_page = qib_dma_unmap_page,
174 .map_sg = qib_map_sg,
175 .unmap_sg = qib_unmap_sg,
176 .dma_address = qib_sg_dma_address,
177 .dma_len = qib_sg_dma_len,
178 .sync_single_for_cpu = qib_sync_single_for_cpu,
179 .sync_single_for_device = qib_sync_single_for_device,
180 .alloc_coherent = qib_dma_alloc_coherent,
181 .free_coherent = qib_dma_free_coherent