11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/bitops.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cachetype.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
30 #if __LINUX_ARM_ARCH__ < 6
41 unsigned long pfn,
pte_t *ptep)
59 pte_val(entry) &= ~L_PTE_MT_MASK;
60 pte_val(entry) |= shared_pte_mask;
92 static int adjust_pte(
struct vm_area_struct *vma,
unsigned long address,
103 if (pgd_none_or_clear_bad(pgd))
107 if (pud_none_or_clear_bad(pud))
111 if (pmd_none_or_clear_bad(pmd))
119 ptl = pte_lockptr(vma->
vm_mm, pmd);
123 ret = do_adjust_pte(vma, address, pfn, pte);
133 unsigned long addr,
pte_t *ptep,
unsigned long pfn)
149 vma_interval_tree_foreach(mpnt, &mapping->
i_mmap, pgoff, pgoff) {
155 if (mpnt->
vm_mm != mm || mpnt == vma)
157 if (!(mpnt->
vm_flags & VM_MAYSHARE))
160 aliases += adjust_pte(mpnt, mpnt->
vm_start + offset, pfn);
164 do_adjust_pte(vma, addr, pfn, ptep);
183 unsigned long pfn =
pte_pfn(*ptep);
198 mapping = page_mapping(page);
203 make_coherent(mapping, vma, addr, ptep, pfn);
205 __flush_icache_all();
216 static int __init check_writebuffer(
unsigned long *
p1,
unsigned long *p2)
218 register unsigned long zero = 0,
one = 1,
val;
242 unsigned long *
p1, *p2;
250 v = check_writebuffer(p1, p2);
251 reason =
"enabling work-around";
253 reason =
"unable to map memory\n";
260 reason =
"unable to grab page\n";
264 printk(
"failed, %s\n", reason);