13 #include <linux/shm.h>
14 #include <linux/mman.h>
18 #include <linux/mempolicy.h>
19 #include <linux/personality.h>
25 #include <linux/perf_event.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
40 int dirty_accountable)
45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
52 ptent = ptep_modify_prot_start(mm, addr, pte);
59 if (dirty_accountable &&
pte_dirty(ptent))
62 ptep_modify_prot_commit(mm, addr, pte, ptent);
66 if (is_write_migration_entry(entry)) {
71 make_migration_entry_read(&entry);
73 swp_entry_to_pte(entry));
76 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
78 pte_unmap_unlock(pte - 1, ptl);
82 unsigned long addr,
unsigned long end,
pgprot_t newprot,
83 int dirty_accountable)
91 if (pmd_trans_huge(*pmd)) {
98 if (pmd_none_or_clear_bad(pmd))
100 change_pte_range(vma->
vm_mm, pmd, addr, next, newprot,
102 }
while (pmd++, addr = next, addr != end);
106 unsigned long addr,
unsigned long end,
pgprot_t newprot,
107 int dirty_accountable)
115 if (pud_none_or_clear_bad(pud))
117 change_pmd_range(vma, pud, addr, next, newprot,
119 }
while (pud++, addr = next, addr != end);
123 unsigned long addr,
unsigned long end,
pgprot_t newprot,
124 int dirty_accountable)
135 next = pgd_addr_end(addr, end);
136 if (pgd_none_or_clear_bad(pgd))
138 change_pud_range(vma, pgd, addr, next, newprot,
140 }
while (pgd++, addr = next, addr != end);
146 unsigned long start,
unsigned long end,
unsigned long newflags)
149 unsigned long oldflags = vma->
vm_flags;
151 unsigned long charged = 0;
154 int dirty_accountable = 0;
156 if (newflags == oldflags) {
167 if (newflags & VM_WRITE) {
168 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
169 VM_SHARED|VM_NORESERVE))) {
173 newflags |= VM_ACCOUNT;
181 *pprev =
vma_merge(mm, *pprev, start, end, newflags,
213 dirty_accountable = 1;
216 mmu_notifier_invalidate_range_start(mm, start, end);
217 if (is_vm_hugetlb_page(vma))
220 change_protection(vma, start, end, vma->
vm_page_prot, dirty_accountable);
221 mmu_notifier_invalidate_range_end(mm, start, end);
222 vm_stat_account(mm, oldflags, vma->
vm_file, -nrpages);
223 vm_stat_account(mm, newflags, vma->
vm_file, nrpages);
228 vm_unacct_memory(charged);
261 vm_flags = calc_vm_prot_bits(prot);
275 if (!(vma->
vm_flags & VM_GROWSDOWN))
291 for (nstart = start ; ; ) {
292 unsigned long newflags;
296 newflags = vm_flags | (vma->
vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
299 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
316 if (nstart < prev->
vm_end)
322 if (!vma || vma->
vm_start != nstart) {