11 #ifndef __MM_INTERNAL_H
12 #define __MM_INTERNAL_H
17 unsigned long floor,
unsigned long ceiling);
19 static inline void set_page_count(
struct page *
page,
int v)
28 static inline void set_page_refcounted(
struct page *
page)
32 set_page_count(page, 1);
35 static inline void __put_page(
struct page *page)
40 static inline void __get_page_tail_foll(
struct page *page,
67 static inline void get_page_foll(
struct page *page)
75 __get_page_tail_foll(page,
true);
99 #ifdef CONFIG_MEMORY_FAILURE
100 extern bool is_free_buddy_page(
struct page *page);
103 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
115 struct compact_control {
118 unsigned long nr_freepages;
119 unsigned long nr_migratepages;
120 unsigned long free_pfn;
121 unsigned long migrate_pfn;
123 bool ignore_skip_hint;
124 bool finished_update_free;
127 bool finished_update_migrate;
137 isolate_freepages_range(
struct compact_control *
cc,
138 unsigned long start_pfn,
unsigned long end_pfn);
140 isolate_migratepages_range(
struct zone *
zone,
struct compact_control *
cc,
141 unsigned long low_pfn,
unsigned long end_pfn,
bool unevictable);
150 static inline unsigned long page_order(
struct page *page)
153 return page_private(page);
162 unsigned long start,
unsigned long end);
164 unsigned long start,
unsigned long end);
165 static inline void munlock_vma_pages_all(
struct vm_area_struct *vma)
179 if (
likely((vma->
vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
182 if (!TestSetPageMlocked(page)) {
211 static inline void mlock_migrate_page(
struct page *newpage,
struct page *page)
213 if (TestClearPageMlocked(page)) {
217 __dec_zone_page_state(page,
NR_MLOCK);
218 SetPageMlocked(newpage);
219 __inc_zone_page_state(newpage,
NR_MLOCK);
224 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
225 extern unsigned long vma_address(
struct page *page,
229 static inline int mlocked_vma_newpage(
struct vm_area_struct *
v,
struct page *
p)
235 static inline void mlock_migrate_page(
struct page *
new,
struct page *old) { }
244 static inline struct page *mem_map_offset(
struct page *base,
int offset)
255 static inline struct page *mem_map_next(
struct page *iter,
256 struct page *base,
int offset)
273 #ifdef CONFIG_SPARSEMEM
274 #define __paginginit __meminit
276 #define __paginginit __init
286 #ifdef CONFIG_DEBUG_MEMORY_INIT
288 extern int mminit_loglevel;
290 #define mminit_dprintk(level, prefix, fmt, arg...) \
292 if (level < mminit_loglevel) { \
293 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
294 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
298 extern void mminit_verify_pageflags_layout(
void);
299 extern void mminit_verify_page_links(
struct page *page,
301 extern void mminit_verify_zonelist(
void);
306 const char *
prefix,
const char *
fmt, ...)
310 static inline void mminit_verify_pageflags_layout(
void)
314 static inline void mminit_verify_page_links(
struct page *page,
319 static inline void mminit_verify_zonelist(
void)
325 #if defined(CONFIG_SPARSEMEM)
327 unsigned long *end_pfn);
330 unsigned long *end_pfn)
335 #define ZONE_RECLAIM_NOSCAN -2
336 #define ZONE_RECLAIM_FULL -1
337 #define ZONE_RECLAIM_SOME 0
338 #define ZONE_RECLAIM_SUCCESS 1
350 unsigned long,
unsigned long,
351 unsigned long,
unsigned long);
357 #define ALLOC_WMARK_MIN WMARK_MIN
358 #define ALLOC_WMARK_LOW WMARK_LOW
359 #define ALLOC_WMARK_HIGH WMARK_HIGH
360 #define ALLOC_NO_WATERMARKS 0x04
363 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
365 #define ALLOC_HARDER 0x10
366 #define ALLOC_HIGH 0x20
367 #define ALLOC_CPUSET 0x40
368 #define ALLOC_CMA 0x80