25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
33 #include <linux/export.h>
35 #include <asm/tlbflush.h>
45 #define CONSISTENT_BASE (IOREMAP_TOP)
46 #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
47 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
109 if ((addr + size) < addr)
123 new->vm_start =
addr;
124 new->vm_end = addr +
size;
126 spin_unlock_irqrestore(&consistent_lock, flags);
130 spin_unlock_irqrestore(&consistent_lock, flags);
169 dev_warn(dev,
"coherent DMA mask is unset\n");
174 dev_warn(dev,
"coherent DMA mask %#llx is smaller "
175 "than system GFP_DMA mask %#llx\n",
176 mask, (
unsigned long long)ISA_DMA_THRESHOLD);
183 limit = (mask + 1) & ~mask;
184 if ((limit && size >= limit) ||
194 if (mask != 0xffffffff)
214 c = ppc_vm_region_alloc(&consistent_head, size,
218 struct page *
end = page + (1 <<
order);
228 SetPageReserved(page);
265 c = ppc_vm_region_find(&consistent_head, (
unsigned long)vaddr);
291 ClearPageReserved(page);
302 spin_unlock_irqrestore(&consistent_lock, flags);
308 spin_unlock_irqrestore(&consistent_lock, flags);
320 unsigned long start = (
unsigned long)vaddr;
321 unsigned long end = start +
size;
337 clean_dcache_range(start, end);
346 #ifdef CONFIG_HIGHMEM
356 static inline void __dma_sync_page_highmem(
struct page *
page,
359 size_t seg_size =
min((
size_t)(
PAGE_SIZE - offset), size);
360 size_t cur_size = seg_size;
371 __dma_sync((
void *)start, seg_size, direction);
379 cur_size += seg_size;
381 }
while (seg_nr < nr_segs);
392 size_t size,
int direction)
394 #ifdef CONFIG_HIGHMEM
395 __dma_sync_page_highmem(page, offset, size, direction);