12 #include <linux/shm.h>
14 #include <linux/mman.h>
16 #include <linux/capability.h>
23 #include <asm/uaccess.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
36 if (pgd_none_or_clear_bad(pgd))
40 if (pud_none_or_clear_bad(pud))
72 unsigned long old_addr,
unsigned long old_end,
74 unsigned long new_addr,
bool need_rmap_locks)
100 if (need_rmap_locks) {
102 mapping = vma->
vm_file->f_mapping;
107 anon_vma_lock(anon_vma);
115 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
117 new_ptl = pte_lockptr(mm, new_pmd);
118 if (new_ptl != old_ptl)
122 for (; old_addr < old_end; old_pte++, old_addr +=
PAGE_SIZE,
127 pte = move_pte(pte, new_vma->
vm_page_prot, old_addr, new_addr);
132 if (new_ptl != old_ptl)
133 spin_unlock(new_ptl);
135 pte_unmap_unlock(old_pte - 1, old_ptl);
137 anon_vma_unlock(anon_vma);
142 #define LATENCY_LIMIT (64 * PAGE_SIZE)
146 unsigned long new_addr,
unsigned long len,
147 bool need_rmap_locks)
150 pmd_t *old_pmd, *new_pmd;
151 bool need_flush =
false;
152 unsigned long mmun_start;
153 unsigned long mmun_end;
155 old_end = old_addr + len;
158 mmun_start = old_addr;
160 mmu_notifier_invalidate_range_start(vma->
vm_mm, mmun_start, mmun_end);
162 for (; old_addr < old_end; old_addr +=
extent, new_addr +=
extent) {
166 extent = next - old_addr;
167 if (extent > old_end - old_addr)
168 extent = old_end - old_addr;
169 old_pmd = get_old_pmd(vma->
vm_mm, old_addr);
172 new_pmd = alloc_new_pmd(vma->
vm_mm, vma, new_addr);
175 if (pmd_trans_huge(*old_pmd)) {
193 if (extent > next - new_addr)
194 extent = next - new_addr;
197 move_ptes(vma, old_pmd, old_addr, old_addr + extent,
198 new_vma, new_pmd, new_addr, need_rmap_locks);
204 mmu_notifier_invalidate_range_end(vma->
vm_mm, mmun_start, mmun_end);
206 return len + old_addr - old_end;
210 unsigned long old_addr,
unsigned long old_len,
211 unsigned long new_len,
unsigned long new_addr)
216 unsigned long new_pgoff;
217 unsigned long moved_len;
218 unsigned long excess = 0;
219 unsigned long hiwater_vm;
222 bool need_rmap_locks;
238 err =
ksm_madvise(vma, old_addr, old_addr + old_len,
244 new_vma =
copy_vma(&vma, new_addr, new_len, new_pgoff,
251 if (moved_len < old_len) {
266 if (vm_flags & VM_ACCOUNT) {
270 old_addr + old_len < vma->
vm_end)
286 if (
do_munmap(mm, old_addr, old_len) < 0) {
297 vma->
vm_next->vm_flags |= VM_ACCOUNT;
300 if (vm_flags & VM_LOCKED) {
302 if (new_len > old_len)
311 unsigned long old_len,
unsigned long new_len,
unsigned long *
p)
319 if (is_vm_hugetlb_page(vma))
323 if (old_len > vma->
vm_end - addr)
327 if (new_len > old_len) {
330 if (vma->
vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
339 unsigned long locked, lock_limit;
342 locked += new_len - old_len;
351 unsigned long charged = (new_len - old_len) >>
PAGE_SHIFT;
369 static unsigned long mremap_to(
unsigned long addr,
370 unsigned long old_len,
unsigned long new_addr,
371 unsigned long new_len)
376 unsigned long charged = 0;
377 unsigned long map_flags;
388 if ((new_addr <= addr) && (new_addr+new_len) > addr)
391 if ((addr <= new_addr) && (addr+old_len) > new_addr)
398 if (old_len >= new_len) {
399 ret =
do_munmap(mm, addr+new_len, old_len - new_len);
400 if (ret && old_len != new_len)
405 vma = vma_to_resize(addr, old_len, new_len, &charged);
421 ret = move_vma(vma, addr, old_len, new_len, new_addr);
425 vm_unacct_memory(charged);
452 unsigned long, new_len,
unsigned long,
flags,
453 unsigned long, new_addr)
457 unsigned long ret = -
EINVAL;
458 unsigned long charged = 0;
481 ret = mremap_to(addr, old_len, new_addr, new_len);
490 if (old_len >= new_len) {
491 ret =
do_munmap(mm, addr+new_len, old_len - new_len);
492 if (ret && old_len != new_len)
501 vma = vma_to_resize(addr, old_len, new_len, &charged);
509 if (old_len == vma->
vm_end - addr) {
511 if (vma_expandable(vma, new_len - old_len)) {
537 unsigned long map_flags = 0;
550 ret = move_vma(vma, addr, old_len, new_len, new_addr);
554 vm_unacct_memory(charged);