13 #include <linux/module.h>
16 #include <asm/sn/intr.h>
21 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
22 #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
39 if (mask < 0x7fffffff)
55 if (!sn_dma_supported(dev, dma_mask))
78 static void *sn_dma_alloc_coherent(
struct device *
dev,
size_t size,
95 struct page *
p = alloc_pages_exact_node(node,
108 memset(cpuaddr, 0x0, size);
111 phys_addr =
__pa(cpuaddr);
140 static void sn_dma_free_coherent(
struct device *dev,
size_t size,
void *
cpu_addr,
148 provider->
dma_unmap(pdev, dma_handle, 0);
177 unsigned long offset,
size_t size,
192 phys_addr =
__pa(cpu_addr);
197 dma_addr = provider->
dma_map(pdev, phys_addr, size,
228 provider->
dma_unmap(pdev, dma_addr, dir);
301 dma_addr = provider->
dma_map(pdev, phys_addr,
306 if (!sg->dma_address) {
313 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
317 sg->dma_length = sg->length;
323 static void sn_dma_sync_single_for_cpu(
struct device *dev,
dma_addr_t dma_handle,
329 static void sn_dma_sync_single_for_device(
struct device *dev,
dma_addr_t dma_handle,
342 static void sn_dma_sync_sg_for_device(
struct device *dev,
struct scatterlist *sg,
383 port, size,
__pa(val));
401 ret = ia64_sn_probe_mem(addr, (
long)size, (
void *)val);
429 port, size,
__pa(&val));
449 addr = (
unsigned long *)paddr;
453 *(
volatile u8 *)(addr) = (
u8)(val);
456 *(
volatile u16 *)(addr) = (
u16)(val);
459 *(
volatile u32 *)(addr) = (
u32)(val);
470 .alloc = sn_dma_alloc_coherent,
471 .free = sn_dma_free_coherent,
472 .map_page = sn_dma_map_page,
473 .unmap_page = sn_dma_unmap_page,
474 .map_sg = sn_dma_map_sg,
475 .unmap_sg = sn_dma_unmap_sg,
476 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
477 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
478 .sync_single_for_device = sn_dma_sync_single_for_device,
479 .sync_sg_for_device = sn_dma_sync_sg_for_device,
480 .mapping_error = sn_dma_mapping_error,
481 .dma_supported = sn_dma_supported,