49 #include <linux/slab.h>
54 #include <linux/export.h>
61 #include <asm/tlbflush.h>
66 static struct kmem_cache *anon_vma_chain_cachep;
68 static inline struct anon_vma *anon_vma_alloc(
void)
79 anon_vma->
root = anon_vma;
85 static inline void anon_vma_free(
struct anon_vma *anon_vma)
106 if (mutex_is_locked(&anon_vma->
root->mutex)) {
107 anon_vma_lock(anon_vma);
108 anon_vma_unlock(anon_vma);
126 struct anon_vma *anon_vma)
131 anon_vma_interval_tree_insert(avc, &anon_vma->
rb_root);
163 struct anon_vma *anon_vma = vma->
anon_vma;
169 struct anon_vma *allocated;
178 anon_vma = anon_vma_alloc();
180 goto out_enomem_free_avc;
181 allocated = anon_vma;
184 anon_vma_lock(anon_vma);
189 anon_vma_chain_link(vma, avc, anon_vma);
194 anon_vma_unlock(anon_vma);
197 put_anon_vma(allocated);
199 anon_vma_chain_free(avc);
204 anon_vma_chain_free(avc);
217 static inline struct anon_vma *lock_anon_vma_root(
struct anon_vma *
root,
struct anon_vma *anon_vma)
219 struct anon_vma *new_root = anon_vma->
root;
220 if (new_root != root) {
229 static inline void unlock_anon_vma_root(
struct anon_vma *root)
242 struct anon_vma *root =
NULL;
245 struct anon_vma *anon_vma;
249 unlock_anon_vma_root(root);
256 root = lock_anon_vma_root(root, anon_vma);
257 anon_vma_chain_link(dst, avc, anon_vma);
259 unlock_anon_vma_root(root);
275 struct anon_vma *anon_vma;
289 anon_vma = anon_vma_alloc();
294 goto out_error_free_anon_vma;
306 get_anon_vma(anon_vma->
root);
309 anon_vma_lock(anon_vma);
310 anon_vma_chain_link(vma, avc, anon_vma);
311 anon_vma_unlock(anon_vma);
315 out_error_free_anon_vma:
316 put_anon_vma(anon_vma);
325 struct anon_vma *root =
NULL;
332 struct anon_vma *anon_vma = avc->
anon_vma;
334 root = lock_anon_vma_root(root, anon_vma);
345 anon_vma_chain_free(avc);
347 unlock_anon_vma_root(root);
355 struct anon_vma *anon_vma = avc->
anon_vma;
357 put_anon_vma(anon_vma);
360 anon_vma_chain_free(avc);
364 static void anon_vma_ctor(
void *
data)
366 struct anon_vma *anon_vma =
data;
405 struct anon_vma *anon_vma =
NULL;
406 unsigned long anon_mapping;
410 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
412 if (!page_mapped(page))
415 anon_vma = (
struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
428 if (!page_mapped(page)) {
429 put_anon_vma(anon_vma);
447 struct anon_vma *anon_vma =
NULL;
448 struct anon_vma *root_anon_vma;
449 unsigned long anon_mapping;
453 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
455 if (!page_mapped(page))
458 anon_vma = (
struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
466 if (!page_mapped(page)) {
479 if (!page_mapped(page)) {
480 put_anon_vma(anon_vma);
487 anon_vma_lock(anon_vma);
495 anon_vma_unlock(anon_vma);
509 anon_vma_unlock(anon_vma);
515 static inline unsigned long
520 if (
unlikely(is_vm_hugetlb_page(vma)))
529 unsigned long address = __vma_address(page, vma);
544 if (PageAnon(page)) {
545 struct anon_vma *page__anon_vma = page_anon_vma(page);
550 if (!vma->
anon_vma || !page__anon_vma ||
559 address = __vma_address(page, vma);
600 if (pmd_trans_huge(*pmd))
610 ptl = pte_lockptr(mm, pmd);
617 pte_unmap_unlock(pte, ptl);
636 address = __vma_address(page, vma);
639 pte = page_check_address(page, vma->
vm_mm, address, &ptl, 1);
642 pte_unmap_unlock(pte, ptl);
652 unsigned long address,
unsigned int *mapcount,
658 if (
unlikely(PageTransHuge(page))) {
676 *vm_flags |= VM_LOCKED;
692 pte = page_check_address(page, mm, address, &ptl, 0);
697 pte_unmap_unlock(pte, ptl);
699 *vm_flags |= VM_LOCKED;
711 if (
likely(!VM_SequentialReadHint(vma)))
714 pte_unmap_unlock(pte, ptl);
725 static int page_referenced_anon(
struct page *page,
729 unsigned int mapcount;
730 struct anon_vma *anon_vma;
739 mapcount = page_mapcount(page);
741 anon_vma_interval_tree_foreach(avc, &anon_vma->
rb_root, pgoff, pgoff) {
749 if (memcg && !mm_match_cgroup(vma->
vm_mm, memcg))
752 &mapcount, vm_flags);
774 static int page_referenced_file(
struct page *page,
776 unsigned long *vm_flags)
778 unsigned int mapcount;
797 BUG_ON(!PageLocked(page));
805 mapcount = page_mapcount(page);
807 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
814 if (memcg && !mm_match_cgroup(vma->
vm_mm, memcg))
817 &mapcount, vm_flags);
839 unsigned long *vm_flags)
845 if (page_mapped(page) && page_rmapping(page)) {
846 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
847 we_locked = trylock_page(page);
856 else if (PageAnon(page))
857 referenced += page_referenced_anon(page, memcg,
860 referenced += page_referenced_file(page, memcg,
872 static int page_mkclean_one(
struct page *page,
struct vm_area_struct *vma,
873 unsigned long address)
880 pte = page_check_address(page, mm, address, &ptl, 1);
895 pte_unmap_unlock(pte, ptl);
898 mmu_notifier_invalidate_page(mm, address);
903 static int page_mkclean_file(
struct address_space *mapping,
struct page *page)
912 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
915 ret += page_mkclean_one(page, vma, address);
926 BUG_ON(!PageLocked(page));
928 if (page_mapped(page)) {
931 ret = page_mkclean_file(mapping, page);
952 struct anon_vma *anon_vma = vma->
anon_vma;
958 anon_vma = (
void *) anon_vma + PAGE_MAPPING_ANON;
969 static void __page_set_anon_rmap(
struct page *page,
972 struct anon_vma *anon_vma = vma->
anon_vma;
985 anon_vma = anon_vma->
root;
987 anon_vma = (
void *) anon_vma + PAGE_MAPPING_ANON;
989 page->
index = linear_page_index(vma, address);
998 static void __page_check_anon_rmap(
struct page *page,
1001 #ifdef CONFIG_DEBUG_VM
1015 BUG_ON(page->
index != linear_page_index(vma, address));
1042 struct vm_area_struct *vma,
unsigned long address,
int exclusive)
1046 if (!PageTransHuge(page))
1049 __inc_zone_page_state(page,
1058 __page_set_anon_rmap(page, vma, address, exclusive);
1060 __page_check_anon_rmap(page, vma, address);
1077 SetPageSwapBacked(page);
1079 if (!PageTransHuge(page))
1083 __page_set_anon_rmap(page, vma, address, 1);
1084 if (!mlocked_vma_newpage(vma, page))
1099 unsigned long flags;
1101 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1106 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1118 bool anon = PageAnon(page);
1120 unsigned long flags;
1128 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1152 if (mapping && !mapping_cap_account_dirty(mapping) &&
1163 if (!PageTransHuge(page))
1166 __dec_zone_page_state(page,
1171 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1187 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1203 pte = page_check_address(page, mm, address, &ptl, 0);
1235 update_hiwater_rss(mm);
1243 swp_entry_to_pte(make_hwpoison_entry(page)));
1244 }
else if (PageAnon(page)) {
1245 swp_entry_t entry = { .val = page_private(page) };
1247 if (PageSwapCache(page)) {
1257 if (list_empty(&mm->
mmlist)) {
1259 if (list_empty(&mm->
mmlist))
1274 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1281 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1289 pte_unmap_unlock(pte, ptl);
1291 mmu_notifier_invalidate_page(mm, address);
1296 pte_unmap_unlock(pte, ptl);
1341 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1342 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1344 static int try_to_unmap_cluster(
unsigned long cursor,
unsigned int *mapcount,
1356 unsigned long mmun_start;
1357 unsigned long mmun_end;
1383 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1390 locked_vma = (vma->
vm_flags & VM_LOCKED);
1395 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1398 update_hiwater_rss(mm);
1404 BUG_ON(!page || PageAnon(page));
1408 if (page == check_page)
1421 if (page->
index != linear_page_index(vma, address))
1433 pte_unmap_unlock(pte - 1, ptl);
1434 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1442 int maybe_stack = vma->
vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1447 if ((vma->
vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1448 VM_STACK_INCOMPLETE_SETUP)
1470 static int try_to_unmap_anon(
struct page *page,
enum ttu_flags flags)
1472 struct anon_vma *anon_vma;
1482 anon_vma_interval_tree_foreach(avc, &anon_vma->
rb_root, pgoff, pgoff) {
1523 static int try_to_unmap_file(
struct page *page,
enum ttu_flags flags)
1529 unsigned long cursor;
1530 unsigned long max_nl_cursor = 0;
1531 unsigned long max_nl_size = 0;
1532 unsigned int mapcount;
1535 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
1556 if (cursor > max_nl_cursor)
1557 max_nl_cursor = cursor;
1559 if (cursor > max_nl_size)
1560 max_nl_size = cursor;
1563 if (max_nl_size == 0) {
1575 mapcount = page_mapcount(page);
1581 if (max_nl_cursor == 0)
1588 while ( cursor < max_nl_cursor &&
1590 if (try_to_unmap_cluster(cursor, &mapcount,
1595 if ((
int)mapcount <= 0)
1602 }
while (max_nl_cursor <= max_nl_size);
1634 BUG_ON(!PageLocked(page));
1639 else if (PageAnon(page))
1640 ret = try_to_unmap_anon(page, flags);
1642 ret = try_to_unmap_file(page, flags);
1665 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1669 else if (PageAnon(page))
1677 struct anon_vma *root = anon_vma->
root;
1680 anon_vma_free(root);
1682 anon_vma_free(anon_vma);
1685 #ifdef CONFIG_MIGRATION
1690 static int rmap_walk_anon(
struct page *page,
int (*rmap_one)(
struct page *,
1693 struct anon_vma *anon_vma;
1704 anon_vma = page_anon_vma(page);
1707 anon_vma_lock(anon_vma);
1708 anon_vma_interval_tree_foreach(avc, &anon_vma->
rb_root, pgoff, pgoff) {
1711 ret = rmap_one(page, vma, address, arg);
1715 anon_vma_unlock(anon_vma);
1719 static int rmap_walk_file(
struct page *page,
int (*rmap_one)(
struct page *,
1730 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
1732 ret = rmap_one(page, vma, address, arg);
1745 int rmap_walk(
struct page *page,
int (*rmap_one)(
struct page *,
1751 return rmap_walk_ksm(page, rmap_one, arg);
1752 else if (PageAnon(page))
1753 return rmap_walk_anon(page, rmap_one, arg);
1755 return rmap_walk_file(page, rmap_one, arg);
1759 #ifdef CONFIG_HUGETLB_PAGE
1765 static void __hugepage_set_anon_rmap(
struct page *page,
1766 struct vm_area_struct *vma,
unsigned long address,
int exclusive)
1768 struct anon_vma *anon_vma = vma->
anon_vma;
1775 anon_vma = anon_vma->
root;
1777 anon_vma = (
void *) anon_vma + PAGE_MAPPING_ANON;
1779 page->
index = linear_page_index(vma, address);
1782 void hugepage_add_anon_rmap(
struct page *page,
1785 struct anon_vma *anon_vma = vma->
anon_vma;
1788 BUG_ON(!PageLocked(page));
1793 __hugepage_set_anon_rmap(page, vma, address, 0);
1796 void hugepage_add_new_anon_rmap(
struct page *page,
1801 __hugepage_set_anon_rmap(page, vma, address, 1);