4 #include <linux/errno.h>
10 #include <linux/list.h>
12 #include <linux/rbtree.h>
28 #ifndef CONFIG_DISCONTIGMEM
33 extern unsigned long totalram_pages;
40 #define sysctl_legacy_va_layout 0
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
47 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
50 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
67 extern unsigned int kobjsize(
const void *objp);
73 #define VM_NONE 0x00000000
75 #define VM_READ 0x00000001
76 #define VM_WRITE 0x00000002
77 #define VM_EXEC 0x00000004
78 #define VM_SHARED 0x00000008
81 #define VM_MAYREAD 0x00000010
82 #define VM_MAYWRITE 0x00000020
83 #define VM_MAYEXEC 0x00000040
84 #define VM_MAYSHARE 0x00000080
86 #define VM_GROWSDOWN 0x00000100
87 #define VM_PFNMAP 0x00000400
88 #define VM_DENYWRITE 0x00000800
90 #define VM_LOCKED 0x00002000
91 #define VM_IO 0x00004000
94 #define VM_SEQ_READ 0x00008000
95 #define VM_RAND_READ 0x00010000
97 #define VM_DONTCOPY 0x00020000
98 #define VM_DONTEXPAND 0x00040000
99 #define VM_ACCOUNT 0x00100000
100 #define VM_NORESERVE 0x00200000
101 #define VM_HUGETLB 0x00400000
102 #define VM_NONLINEAR 0x00800000
103 #define VM_ARCH_1 0x01000000
104 #define VM_DONTDUMP 0x04000000
106 #define VM_MIXEDMAP 0x10000000
107 #define VM_HUGEPAGE 0x20000000
108 #define VM_NOHUGEPAGE 0x40000000
109 #define VM_MERGEABLE 0x80000000
111 #if defined(CONFIG_X86)
112 # define VM_PAT VM_ARCH_1
113 #elif defined(CONFIG_PPC)
114 # define VM_SAO VM_ARCH_1
115 #elif defined(CONFIG_PARISC)
116 # define VM_GROWSUP VM_ARCH_1
117 #elif defined(CONFIG_IA64)
118 # define VM_GROWSUP VM_ARCH_1
119 #elif !defined(CONFIG_MMU)
120 # define VM_MAPPED_COPY VM_ARCH_1
124 # define VM_GROWSUP VM_NONE
128 #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
130 #ifndef VM_STACK_DEFAULT_FLAGS
131 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
134 #ifdef CONFIG_STACK_GROWSUP
135 #define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
137 #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
140 #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
141 #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
142 #define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
143 #define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
144 #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
150 #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
158 #define FAULT_FLAG_WRITE 0x01
159 #define FAULT_FLAG_NONLINEAR 0x02
160 #define FAULT_FLAG_MKWRITE 0x04
161 #define FAULT_FLAG_ALLOW_RETRY 0x08
162 #define FAULT_FLAG_RETRY_NOWAIT 0x10
163 #define FAULT_FLAG_KILLABLE 0x20
164 #define FAULT_FLAG_TRIED 0x40
177 void __user *virtual_address;
191 struct vm_operations_struct {
238 #define page_private(page) ((page)->private)
239 #define set_page_private(page, v) ((page)->private = (v))
242 static inline void set_freepage_migratetype(
struct page *
page,
int migratetype)
244 page->
index = migratetype;
248 static inline int get_freepage_migratetype(
struct page *
page)
276 static inline int put_page_testzero(
struct page *
page)
286 static inline int get_page_unless_zero(
struct page *page)
303 static inline int is_vmalloc_addr(
const void *
x)
306 unsigned long addr = (
unsigned long)x;
322 static inline void compound_lock(
struct page *page)
324 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
326 bit_spin_lock(PG_compound_lock, &page->
flags);
330 static inline void compound_unlock(
struct page *page)
332 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
334 bit_spin_unlock(PG_compound_lock, &page->
flags);
338 static inline unsigned long compound_lock_irqsave(
struct page *page)
341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
348 static inline void compound_unlock_irqrestore(
struct page *page,
351 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
352 compound_unlock(page);
357 static inline struct page *compound_head(
struct page *page)
369 static inline void reset_page_mapcount(
struct page *page)
374 static inline int page_mapcount(
struct page *page)
379 static inline int page_count(
struct page *page)
384 static inline void get_huge_page_tail(
struct page *page)
397 static inline void get_page(
struct page *page)
410 static inline struct page *virt_to_head_page(
const void *
x)
413 return compound_head(page);
420 static inline void init_page_count(
struct page *page)
434 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
436 static inline int PageBuddy(
struct page *page)
441 static inline void __SetPageBuddy(
struct page *page)
447 static inline void __ClearPageBuddy(
struct page *page)
465 typedef void compound_page_dtor(
struct page *);
467 static inline void set_compound_page_dtor(
struct page *page,
468 compound_page_dtor *dtor)
470 page[1].
lru.next = (
void *)dtor;
473 static inline compound_page_dtor *get_compound_page_dtor(
struct page *page)
475 return (compound_page_dtor *)page[1].
lru.next;
478 static inline int compound_order(
struct page *page)
482 return (
unsigned long)page[1].
lru.prev;
485 static inline int compound_trans_order(
struct page *page)
493 flags = compound_lock_irqsave(page);
494 order = compound_order(page);
495 compound_unlock_irqrestore(page, flags);
499 static inline void set_compound_order(
struct page *page,
unsigned long order)
501 page[1].
lru.prev = (
void *)order;
599 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
600 #define SECTIONS_WIDTH SECTIONS_SHIFT
602 #define SECTIONS_WIDTH 0
605 #define ZONES_WIDTH ZONES_SHIFT
607 #if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
608 #define NODES_WIDTH NODES_SHIFT
610 #ifdef CONFIG_SPARSEMEM_VMEMMAP
611 #error "Vmemmap: No space for nodes field in page flags"
613 #define NODES_WIDTH 0
617 #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
618 #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
619 #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
625 #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
626 #define NODE_NOT_IN_PAGE_FLAGS
634 #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
635 #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
636 #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
639 #ifdef NODE_NOT_IN_PAGE_FLAGS
640 #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
641 #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
642 SECTIONS_PGOFF : ZONES_PGOFF)
644 #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
645 #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
646 NODES_PGOFF : ZONES_PGOFF)
649 #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
651 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
652 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
655 #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
656 #define NODES_MASK ((1UL << NODES_WIDTH) - 1)
657 #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
658 #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
660 static inline enum zone_type page_zonenum(
const struct page *page)
662 return (page->
flags >> ZONES_PGSHIFT) & ZONES_MASK;
673 static inline int page_zone_id(
struct page *page)
675 return (page->
flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
678 static inline int zone_to_nid(
struct zone *
zone)
687 #ifdef NODE_NOT_IN_PAGE_FLAGS
688 extern int page_to_nid(
const struct page *page);
690 static inline int page_to_nid(
const struct page *page)
692 return (page->
flags >> NODES_PGSHIFT) & NODES_MASK;
696 static inline struct zone *page_zone(
const struct page *page)
698 return &
NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
701 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
702 static inline void set_page_section(
struct page *page,
unsigned long section)
704 page->
flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
705 page->
flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
708 static inline unsigned long page_to_section(
const struct page *page)
710 return (page->
flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
714 static inline void set_page_zone(
struct page *page,
enum zone_type zone)
716 page->
flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
717 page->
flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
720 static inline void set_page_node(
struct page *page,
unsigned long node)
722 page->
flags &= ~(NODES_MASK << NODES_PGSHIFT);
723 page->
flags |= (node & NODES_MASK) << NODES_PGSHIFT;
726 static inline void set_page_links(
struct page *page,
enum zone_type zone,
727 unsigned long node,
unsigned long pfn)
729 set_page_zone(page, zone);
730 set_page_node(page, node);
731 #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
732 set_page_section(page, pfn_to_section_nr(pfn));
741 static __always_inline void *lowmem_page_address(
const struct page *page)
746 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
747 #define HASHED_PAGE_VIRTUAL
750 #if defined(WANT_PAGE_VIRTUAL)
751 #define page_address(page) ((page)->virtual)
752 #define set_page_address(page, address) \
754 (page)->virtual = (address); \
756 #define page_address_init() do { } while(0)
759 #if defined(HASHED_PAGE_VIRTUAL)
761 void set_page_address(
struct page *page,
void *
virtual);
762 void page_address_init(
void);
765 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
766 #define page_address(page) lowmem_page_address(page)
767 #define set_page_address(page, address) do { } while(0)
768 #define page_address_init() do { } while(0)
787 #define PAGE_MAPPING_ANON 1
788 #define PAGE_MAPPING_KSM 2
789 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
792 static inline struct address_space *page_mapping(
struct page *page)
799 else if ((
unsigned long)mapping & PAGE_MAPPING_ANON)
805 static inline void *page_rmapping(
struct page *page)
807 return (
void *)((
unsigned long)page->
mapping & ~PAGE_MAPPING_FLAGS);
821 static inline int PageAnon(
struct page *page)
823 return ((
unsigned long)page->
mapping & PAGE_MAPPING_ANON) != 0;
830 static inline pgoff_t page_index(
struct page *page)
833 return page_private(page);
843 static inline pgoff_t page_file_index(
struct page *page)
854 static inline int page_mapped(
struct page *page)
865 #define VM_FAULT_MINOR 0
867 #define VM_FAULT_OOM 0x0001
868 #define VM_FAULT_SIGBUS 0x0002
869 #define VM_FAULT_MAJOR 0x0004
870 #define VM_FAULT_WRITE 0x0008
871 #define VM_FAULT_HWPOISON 0x0010
872 #define VM_FAULT_HWPOISON_LARGE 0x0020
874 #define VM_FAULT_NOPAGE 0x0100
875 #define VM_FAULT_LOCKED 0x0200
876 #define VM_FAULT_RETRY 0x0400
878 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
880 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
881 VM_FAULT_HWPOISON_LARGE)
884 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
885 #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
892 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
898 #define SHOW_MEM_FILTER_NODES (0x0001u)
925 unsigned long size,
struct zap_details *);
927 unsigned long start,
unsigned long end);
950 int (*pte_hole)(
unsigned long,
unsigned long,
struct mm_walk *);
952 unsigned long,
unsigned long,
struct mm_walk *);
958 struct mm_walk *walk);
960 unsigned long end,
unsigned long floor,
unsigned long ceiling);
964 loff_t
const holebegin, loff_t
const holelen,
int even_cows);
972 static inline void unmap_shared_mapping_range(
struct address_space *mapping,
973 loff_t
const holebegin, loff_t
const holelen)
988 unsigned long address,
unsigned int flags);
990 unsigned long address,
unsigned int fault_flags);
998 return VM_FAULT_SIGBUS;
1002 unsigned int fault_flags)
1016 unsigned long start,
int len,
unsigned int foll_flags,
1023 struct page **
pages);
1026 struct page **
pages);
1028 struct page *get_dump_page(
unsigned long addr);
1049 static inline int stack_guard_page_start(
struct vm_area_struct *vma,
1052 return (vma->
vm_flags & VM_GROWSDOWN) &&
1054 !vma_growsdown(vma->
vm_prev, addr);
1058 static inline int vma_growsup(
struct vm_area_struct *vma,
unsigned long addr)
1063 static inline int stack_guard_page_end(
struct vm_area_struct *vma,
1066 return (vma->
vm_flags & VM_GROWSUP) &&
1068 !vma_growsup(vma->
vm_next, addr);
1076 unsigned long new_addr,
unsigned long len,
1077 bool need_rmap_locks);
1078 extern unsigned long do_mremap(
unsigned long addr,
1079 unsigned long old_len,
unsigned long new_len,
1080 unsigned long flags,
unsigned long new_addr);
1083 unsigned long end,
unsigned long newflags);
1089 struct page **
pages);
1093 static inline unsigned long get_mm_counter(
struct mm_struct *mm,
int member)
1095 long val = atomic_long_read(&mm->
rss_stat.count[member]);
1097 #ifdef SPLIT_RSS_COUNTING
1105 return (
unsigned long)
val;
1108 static inline void add_mm_counter(
struct mm_struct *mm,
int member,
long value)
1110 atomic_long_add(value, &mm->
rss_stat.count[member]);
1113 static inline void inc_mm_counter(
struct mm_struct *mm,
int member)
1115 atomic_long_inc(&mm->
rss_stat.count[member]);
1118 static inline void dec_mm_counter(
struct mm_struct *mm,
int member)
1120 atomic_long_dec(&mm->
rss_stat.count[member]);
1123 static inline unsigned long get_mm_rss(
struct mm_struct *mm)
1129 static inline unsigned long get_mm_hiwater_rss(
struct mm_struct *mm)
1134 static inline unsigned long get_mm_hiwater_vm(
struct mm_struct *mm)
1139 static inline void update_hiwater_rss(
struct mm_struct *mm)
1141 unsigned long _rss = get_mm_rss(mm);
1143 if ((mm)->hiwater_rss < _rss)
1144 (mm)->hiwater_rss = _rss;
1147 static inline void update_hiwater_vm(
struct mm_struct *mm)
1153 static inline void setmax_mm_hiwater_rss(
unsigned long *maxrss,
1156 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1158 if (*maxrss < hiwater_rss)
1159 *maxrss = hiwater_rss;
1162 #if defined(SPLIT_RSS_COUNTING)
1165 static inline void sync_mm_rss(
struct mm_struct *mm)
1174 static inline pte_t *get_locked_pte(
struct mm_struct *mm,
unsigned long addr,
1182 #ifdef __PAGETABLE_PUD_FOLDED
1192 #ifdef __PAGETABLE_PMD_FOLDED
1210 #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1224 #if USE_SPLIT_PTLOCKS
1231 #define __pte_lockptr(page) &((page)->ptl)
1232 #define pte_lock_init(_page) do { \
1233 spin_lock_init(__pte_lockptr(_page)); \
1235 #define pte_lock_deinit(page) ((page)->mapping = NULL)
1236 #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1241 #define pte_lock_init(page) do {} while (0)
1242 #define pte_lock_deinit(page) do {} while (0)
1243 #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1246 static inline void pgtable_page_ctor(
struct page *page)
1248 pte_lock_init(page);
1252 static inline void pgtable_page_dtor(
struct page *page)
1254 pte_lock_deinit(page);
1258 #define pte_offset_map_lock(mm, pmd, address, ptlp) \
1260 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1261 pte_t *__pte = pte_offset_map(pmd, address); \
1267 #define pte_unmap_unlock(pte, ptl) do { \
1272 #define pte_alloc_map(mm, vma, pmd, address) \
1273 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1275 NULL: pte_offset_map(pmd, address))
1277 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1278 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1280 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1282 #define pte_alloc_kernel(pmd, address) \
1283 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1284 NULL: pte_offset_kernel(pmd, address))
1288 unsigned long zone_start_pfn,
unsigned long *zholes_size);
1291 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1318 extern void free_area_init_nodes(
unsigned long *max_zone_pfn);
1319 unsigned long node_map_pfn_alignment(
void);
1320 unsigned long __absent_pages_in_range(
int nid,
unsigned long start_pfn,
1321 unsigned long end_pfn);
1322 extern unsigned long absent_pages_in_range(
unsigned long start_pfn,
1323 unsigned long end_pfn);
1324 extern void get_pfn_range_for_nid(
unsigned int nid,
1325 unsigned long *start_pfn,
unsigned long *end_pfn);
1326 extern unsigned long find_min_pfn_with_active_regions(
void);
1327 extern void free_bootmem_with_active_regions(
int nid,
1329 extern void sparse_memory_present_with_active_regions(
int nid);
1333 #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1334 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1335 static inline int __early_pfn_to_nid(
unsigned long pfn)
1341 extern int __meminit early_pfn_to_nid(
unsigned long pfn);
1342 #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1344 extern int __meminit __early_pfn_to_nid(
unsigned long pfn);
1355 extern void show_mem(
unsigned int flags);
1357 extern void si_meminfo_node(
struct sysinfo *val,
int nid);
1365 extern
void zone_pcp_update(
struct zone *zone);
1366 extern
void zone_pcp_reset(
struct zone *zone);
1381 unsigned long start,
unsigned long last);
1383 unsigned long start,
unsigned long last);
1385 #define vma_interval_tree_foreach(vma, root, start, last) \
1386 for (vma = vma_interval_tree_iter_first(root, start, last); \
1387 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1389 static inline void vma_nonlinear_insert(
struct vm_area_struct *vma,
1400 struct rb_root *root,
unsigned long start,
unsigned long last);
1402 struct anon_vma_chain *node,
unsigned long start,
unsigned long last);
1403 #ifdef CONFIG_DEBUG_VM_RB
1407 #define anon_vma_interval_tree_foreach(avc, root, start, last) \
1408 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1409 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1427 unsigned long addr,
unsigned long len,
pgoff_t pgoff,
1428 bool *need_rmap_locks);
1439 unsigned long addr,
unsigned long len,
1440 unsigned long flags,
struct page **
pages);
1442 extern unsigned long get_unmapped_area(
struct file *,
unsigned long,
unsigned long,
unsigned long,
unsigned long);
1445 unsigned long len,
unsigned long flags,
1448 unsigned long,
unsigned long,
1449 unsigned long,
unsigned long);
1453 extern unsigned long vm_brk(
unsigned long,
unsigned long);
1454 extern int vm_munmap(
unsigned long,
size_t);
1455 extern unsigned long vm_mmap(
struct file *,
unsigned long,
1456 unsigned long,
unsigned long,
1457 unsigned long,
unsigned long);
1462 loff_t lstart, loff_t lend);
1466 extern int filemap_page_mkwrite(
struct vm_area_struct *vma,
struct vm_fault *vmf);
1473 #define VM_MAX_READAHEAD 128
1474 #define VM_MIN_READAHEAD 16
1483 unsigned long size);
1490 unsigned long size);
1502 unsigned long address);
1504 extern int expand_upwards(
struct vm_area_struct *vma,
unsigned long address);
1506 #define expand_upwards(vma, address) do { } while (0)
1520 if (vma && end_addr <= vma->
vm_start)
1525 static inline unsigned long vma_pages(
struct vm_area_struct *vma)
1561 unsigned int foll_flags);
1562 #define FOLL_WRITE 0x01
1563 #define FOLL_TOUCH 0x02
1564 #define FOLL_GET 0x04
1565 #define FOLL_DUMP 0x08
1566 #define FOLL_FORCE 0x10
1567 #define FOLL_NOWAIT 0x20
1569 #define FOLL_MLOCK 0x40
1570 #define FOLL_SPLIT 0x80
1571 #define FOLL_HWPOISON 0x100
1576 unsigned long size, pte_fn_t
fn,
void *
data);
1578 #ifdef CONFIG_PROC_FS
1579 void vm_stat_account(
struct mm_struct *,
unsigned long,
struct file *,
long);
1581 static inline void vm_stat_account(
struct mm_struct *mm,
1588 #ifdef CONFIG_DEBUG_PAGEALLOC
1590 #ifdef CONFIG_HIBERNATION
1591 extern bool kernel_page_present(
struct page *page);
1596 #ifdef CONFIG_HIBERNATION
1597 static inline bool kernel_page_present(
struct page *page) {
return true; }
1602 #ifdef __HAVE_ARCH_GATE_AREA
1607 #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1611 void __user *,
size_t *, loff_t *);
1613 unsigned long nr_pages_scanned,
1614 unsigned long lru_pages);
1617 #define randomize_va_space 0
1619 extern int randomize_va_space;
1626 unsigned long pnum_begin,
1627 unsigned long pnum_end,
1628 unsigned long map_count,
1640 unsigned long pages,
int node);
1642 void vmemmap_populate_print_last(
void);
1646 MF_COUNT_INCREASED = 1 << 0,
1647 MF_ACTION_REQUIRED = 1 << 1,
1648 MF_MUST_KILL = 1 << 2,
1650 extern int memory_failure(
unsigned long pfn,
int trapno,
int flags);
1653 extern int sysctl_memory_failure_early_kill;
1654 extern int sysctl_memory_failure_recovery;
1659 extern void dump_page(
struct page *page);
1661 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1662 extern void clear_huge_page(
struct page *page,
1664 unsigned int pages_per_huge_page);
1665 extern void copy_user_huge_page(
struct page *
dst,
struct page *
src,
1667 unsigned int pages_per_huge_page);
1670 #ifdef CONFIG_DEBUG_PAGEALLOC
1671 extern unsigned int _debug_guardpage_minorder;
1673 static inline unsigned int debug_guardpage_minorder(
void)
1675 return _debug_guardpage_minorder;
1678 static inline bool page_is_guard(
struct page *page)
1683 static inline unsigned int debug_guardpage_minorder(
void) {
return 0; }
1684 static inline bool page_is_guard(
struct page *page) {
return false; }