5 #include <linux/linkage.h>
7 #include <linux/list.h>
9 #include <linux/sched.h>
19 #define SWAP_FLAG_PREFER 0x8000
20 #define SWAP_FLAG_PRIO_MASK 0x7fff
21 #define SWAP_FLAG_PRIO_SHIFT 0
22 #define SWAP_FLAG_DISCARD 0x10000
24 #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
27 static inline int current_is_kswapd(
void)
40 #define MAX_SWAPFILES_SHIFT 5
51 #ifdef CONFIG_MIGRATION
52 #define SWP_MIGRATION_NUM 2
53 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
54 #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
56 #define SWP_MIGRATION_NUM 0
62 #ifdef CONFIG_MEMORY_FAILURE
63 #define SWP_HWPOISON_NUM 1
64 #define SWP_HWPOISON MAX_SWAPFILES
66 #define SWP_HWPOISON_NUM 0
69 #define MAX_SWAPFILES \
70 ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
142 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
143 #define MAX_SWAP_BADPAGES \
144 ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
148 SWP_WRITEOK = (1 << 1),
149 SWP_DISCARDABLE = (1 << 2),
150 SWP_DISCARDING = (1 << 3),
151 SWP_SOLIDSTATE = (1 << 4),
152 SWP_CONTINUED = (1 << 5),
153 SWP_BLKDEV = (1 << 6),
156 SWP_SCANNING = (1 << 8),
159 #define SWAP_CLUSTER_MAX 32
160 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
169 #define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
171 #define SWAP_MAP_MAX 0x3e
172 #define SWAP_MAP_BAD 0x3f
173 #define SWAP_HAS_CACHE 0x40
174 #define SWAP_CONT_MAX 0x7f
175 #define COUNT_CONTINUED 0x80
176 #define SWAP_MAP_SHMEM 0xbf
181 struct swap_info_struct {
187 unsigned char *swap_map;
188 unsigned int lowest_bit;
189 unsigned int highest_bit;
191 unsigned int inuse_pages;
192 unsigned int cluster_next;
193 unsigned int cluster_nr;
194 unsigned int lowest_alloc;
195 unsigned int highest_alloc;
196 struct swap_extent *curr_swap_extent;
197 struct swap_extent first_swap_extent;
199 struct file *swap_file;
200 unsigned int old_block_size;
201 #ifdef CONFIG_FRONTSWAP
202 unsigned long *frontswap_map;
213 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
216 extern unsigned long totalram_pages;
217 extern unsigned long totalreserve_pages;
218 extern unsigned long dirty_balance_reserve;
223 #define nr_free_pages() global_page_state(NR_FREE_PAGES)
229 extern void lru_add_page_tail(
struct page *
page,
struct page *page_tail,
246 static inline void lru_cache_add_anon(
struct page *
page)
251 static inline void lru_cache_add_file(
struct page *
page)
260 extern unsigned long try_to_free_mem_cgroup_pages(
struct mem_cgroup *
mem,
262 extern unsigned long mem_cgroup_shrink_node_zone(
struct mem_cgroup *
mem,
265 unsigned long *nr_scanned);
266 extern unsigned long shrink_all_memory(
unsigned long nr_pages);
272 extern int zone_reclaim_mode;
273 extern int sysctl_min_unmapped_ratio;
274 extern int sysctl_min_slab_ratio;
275 extern int zone_reclaim(
struct zone *,
gfp_t,
unsigned int);
277 #define zone_reclaim_mode 0
284 extern int page_evictable(
struct page *
page);
285 extern void check_move_unevictable_pages(
struct page **,
int nr_pages);
289 void __user *,
size_t *, loff_t *);
291 extern int scan_unevictable_register_node(
struct node *
node);
292 extern void scan_unevictable_unregister_node(
struct node *
node);
294 static inline int scan_unevictable_register_node(
struct node *
node)
298 static inline void scan_unevictable_unregister_node(
struct node *
node)
313 #ifdef CONFIG_MEMCG_SWAP
327 int add_swap_extent(
struct swap_info_struct *sis,
unsigned long start_page,
334 #define total_swapcache_pages swapper_space.nrpages
362 extern unsigned int count_swap_pages(
int,
int);
383 #define nr_swap_pages 0L
384 #define total_swap_pages 0L
385 #define total_swapcache_pages 0UL
387 #define si_swapinfo(val) \
388 do { (val)->freeswap = (val)->totalswap = 0; } while (0)
391 #define free_page_and_swap_cache(page) \
392 page_cache_release(page)
393 #define free_pages_and_swap_cache(pages, nr) \
394 release_pages((pages), (nr), 0);
400 #define free_swap_and_cache(swp) is_migration_entry(swp)
401 #define swapcache_prepare(swp) is_migration_entry(swp)
465 #define reuse_swap_page(page) (page_mapcount(page) == 1)