1 #ifndef _LINUX_MMZONE_H
2 #define _LINUX_MMZONE_H
5 #ifndef __GENERATING_BOUNDS_H
8 #include <linux/list.h>
9 #include <linux/wait.h>
10 #include <linux/bitops.h>
18 #include <generated/bounds.h>
23 #ifndef CONFIG_FORCE_MAX_ZONEORDER
26 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
28 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
36 #define PAGE_ALLOC_COSTLY_ORDER 3
65 # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
66 # define cma_wmark_pages(zone) zone->min_cma_pages
68 # define is_migrate_cma(migratetype) false
69 # define cma_wmark_pages(zone) 0
72 #define for_each_migratetype_order(order, type) \
73 for (order = 0; order < MAX_ORDER; order++) \
74 for (type = 0; type < MIGRATE_TYPES; type++)
78 static inline int get_pageblock_migratetype(
struct page *
page)
96 #if defined(CONFIG_SMP)
100 #define ZONE_PADDING(name) struct zone_padding name;
102 #define ZONE_PADDING(name)
170 #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
172 #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
184 static inline int is_unevictable_lru(
enum lru_list lru)
211 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
212 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
213 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
216 #define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
218 #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
220 #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
222 #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
234 #define min_wmark_pages(z) (z->watermark[WMARK_MIN])
235 #define low_wmark_pages(z) (z->watermark[WMARK_LOW])
236 #define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
261 #ifdef CONFIG_ZONE_DMA
282 #ifdef CONFIG_ZONE_DMA32
296 #ifdef CONFIG_HIGHMEM
311 #ifndef __GENERATING_BOUNDS_H
322 #define ZONES_SHIFT 0
323 #elif MAX_NR_ZONES <= 2
324 #define ZONES_SHIFT 1
325 #elif MAX_NR_ZONES <= 4
326 #define ZONES_SHIFT 2
328 #error ZONES_SHIFT -- too many zones configured adjust calculation
365 unsigned long min_unmapped_pages;
366 unsigned long min_slab_pages;
374 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
376 bool compact_blockskip_flush;
379 unsigned long compact_cached_free_pfn;
380 unsigned long compact_cached_migrate_pfn;
382 #ifdef CONFIG_MEMORY_HOTPLUG
391 unsigned long min_cma_pages;
395 #ifndef CONFIG_SPARSEMEM
403 #ifdef CONFIG_COMPACTION
409 unsigned int compact_considered;
410 unsigned int compact_defer_shift;
411 int compact_order_failed;
488 #ifdef CONFIG_MEMORY_ISOLATION
494 int nr_pageblock_isolate;
521 static inline int zone_is_reclaim_congested(
const struct zone *
zone)
526 static inline int zone_is_reclaim_locked(
const struct zone *
zone)
531 static inline int zone_is_oom_locked(
const struct zone *
zone)
541 #define DEF_PRIORITY 12
544 #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
555 #define MAX_ZONELISTS 2
617 struct zonelist_cache {
620 unsigned long last_full_zap;
623 #define MAX_ZONELISTS 1
624 struct zonelist_cache;
657 struct zonelist_cache zlcache;
661 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
662 struct node_active_region {
663 unsigned long start_pfn;
664 unsigned long end_pfn;
669 #ifndef CONFIG_DISCONTIGMEM
690 #ifdef CONFIG_FLAT_NODE_MEM_MAP
691 struct page *node_mem_map;
693 struct page_cgroup *node_page_cgroup;
696 #ifndef CONFIG_NO_BOOTMEM
699 #ifdef CONFIG_MEMORY_HOTPLUG
722 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
723 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
724 #ifdef CONFIG_FLAT_NODE_MEM_MAP
725 #define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
727 #define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
729 #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
731 #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
733 #define node_end_pfn(nid) ({\
734 pg_data_t *__pgdat = NODE_DATA(nid);\
735 __pgdat->node_start_pfn + __pgdat->node_spanned_pages;\
744 int classzone_idx,
int alloc_flags);
746 int classzone_idx,
int alloc_flags);
766 #ifdef CONFIG_HAVE_MEMORY_PRESENT
772 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
773 int local_memory_node(
int node_id);
775 static inline int local_memory_node(
int node_id) {
return node_id; };
778 #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
785 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
787 static inline int populated_zone(
struct zone *
zone)
794 static inline int zone_movable_is_highmem(
void)
796 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
797 return movable_zone == ZONE_HIGHMEM;
805 #ifdef CONFIG_HIGHMEM
806 return (idx == ZONE_HIGHMEM ||
813 static inline int is_normal_idx(
enum zone_type idx)
824 static inline int is_highmem(
struct zone *
zone)
826 #ifdef CONFIG_HIGHMEM
827 int zone_off = (
char *)zone - (
char *)zone->
zone_pgdat->node_zones;
828 return zone_off == ZONE_HIGHMEM *
sizeof(*zone) ||
830 zone_movable_is_highmem());
836 static inline int is_normal(
struct zone *zone)
841 static inline int is_dma32(
struct zone *zone)
843 #ifdef CONFIG_ZONE_DMA32
844 return zone == zone->
zone_pgdat->node_zones + ZONE_DMA32;
850 static inline int is_dma(
struct zone *zone)
852 #ifdef CONFIG_ZONE_DMA
853 return zone == zone->
zone_pgdat->node_zones + ZONE_DMA;
862 void __user *,
size_t *, loff_t *);
865 void __user *,
size_t *, loff_t *);
867 void __user *,
size_t *, loff_t *);
869 void __user *,
size_t *, loff_t *);
871 void __user *,
size_t *, loff_t *);
874 void __user *,
size_t *, loff_t *);
876 #define NUMA_ZONELIST_ORDER_LEN 16
878 #ifndef CONFIG_NEED_MULTIPLE_NODES
881 #define NODE_DATA(nid) (&contig_page_data)
882 #define NODE_MEM_MAP(nid) mem_map
886 #include <asm/mmzone.h>
892 extern struct zone *
next_zone(
struct zone *zone);
898 #define for_each_online_pgdat(pgdat) \
899 for (pgdat = first_online_pgdat(); \
901 pgdat = next_online_pgdat(pgdat))
909 #define for_each_zone(zone) \
910 for (zone = (first_online_pgdat())->node_zones; \
912 zone = next_zone(zone))
914 #define for_each_populated_zone(zone) \
915 for (zone = (first_online_pgdat())->node_zones; \
917 zone = next_zone(zone)) \
918 if (!populated_zone(zone)) \
924 return zoneref->
zone;
936 return zoneref->
zone->node;
992 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
993 for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
995 z = next_zones_zonelist(++z, highidx, nodemask, &zone)) \
1006 #define for_each_zone_zonelist(zone, z, zlist, highidx) \
1007 for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1009 #ifdef CONFIG_SPARSEMEM
1010 #include <asm/sparsemem.h>
1013 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
1014 !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1015 static inline unsigned long early_pfn_to_nid(
unsigned long pfn)
1021 #ifdef CONFIG_FLATMEM
1022 #define pfn_to_nid(pfn) (0)
1025 #ifdef CONFIG_SPARSEMEM
1033 #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
1035 #define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
1036 #define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
1038 #define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
1040 #define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
1041 #define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
1043 #define SECTION_BLOCKFLAGS_BITS \
1044 ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1046 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
1047 #error Allocator MAX_ORDER exceeds SECTION_SIZE
1050 #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
1051 #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
1053 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1054 #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1058 struct mem_section {
1071 unsigned long section_mem_map;
1074 unsigned long *pageblock_flags;
1080 struct page_cgroup *page_cgroup;
1085 #ifdef CONFIG_SPARSEMEM_EXTREME
1086 #define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
1088 #define SECTIONS_PER_ROOT 1
1091 #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
1092 #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1093 #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
1095 #ifdef CONFIG_SPARSEMEM_EXTREME
1096 extern struct mem_section *mem_section[NR_SECTION_ROOTS];
1098 extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
1101 static inline struct mem_section *__nr_to_section(
unsigned long nr)
1103 if (!mem_section[SECTION_NR_TO_ROOT(nr)])
1105 return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
1115 #define SECTION_MARKED_PRESENT (1UL<<0)
1116 #define SECTION_HAS_MEM_MAP (1UL<<1)
1117 #define SECTION_MAP_LAST_BIT (1UL<<2)
1118 #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
1119 #define SECTION_NID_SHIFT 2
1121 static inline struct page *__section_mem_map_addr(
struct mem_section *
section)
1123 unsigned long map = section->section_mem_map;
1124 map &= SECTION_MAP_MASK;
1128 static inline int present_section(
struct mem_section *section)
1130 return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
1133 static inline int present_section_nr(
unsigned long nr)
1135 return present_section(__nr_to_section(nr));
1138 static inline int valid_section(
struct mem_section *section)
1140 return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
1143 static inline int valid_section_nr(
unsigned long nr)
1145 return valid_section(__nr_to_section(nr));
1148 static inline struct mem_section *__pfn_to_section(
unsigned long pfn)
1150 return __nr_to_section(pfn_to_section_nr(pfn));
1153 #ifndef CONFIG_HAVE_ARCH_PFN_VALID
1154 static inline int pfn_valid(
unsigned long pfn)
1156 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1158 return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
1162 static inline int pfn_present(
unsigned long pfn)
1164 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
1166 return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
1175 #define pfn_to_nid(pfn) \
1177 unsigned long __pfn_to_nid_pfn = (pfn); \
1178 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
1181 #define pfn_to_nid(pfn) (0)
1184 #define early_pfn_valid(pfn) pfn_valid(pfn)
1187 #define sparse_init() do {} while (0)
1188 #define sparse_index_init(_sec, _nid) do {} while (0)
1191 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
1194 #define early_pfn_in_nid(pfn, nid) (1)
1197 #ifndef early_pfn_valid
1198 #define early_pfn_valid(pfn) (1)
1210 #ifdef CONFIG_HOLES_IN_ZONE
1211 #define pfn_valid_within(pfn) pfn_valid(pfn)
1213 #define pfn_valid_within(pfn) (1)
1216 #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
1232 int memmap_valid_within(
unsigned long pfn,
1233 struct page *
page,
struct zone *zone);
1235 static inline int memmap_valid_within(
unsigned long pfn,
1236 struct page *
page,
struct zone *zone)