10 #include <linux/mman.h>
11 #include <linux/slab.h>
19 #include <linux/random.h>
29 #include <linux/capability.h>
32 #include <linux/poll.h>
33 #include <linux/oom.h>
36 #include <linux/export.h>
38 #include <asm/pgtable.h>
39 #include <asm/tlbflush.h>
43 static bool swap_count_continued(
struct swap_info_struct *,
pgoff_t,
45 static void free_swap_count_continuations(
struct swap_info_struct *);
49 static unsigned int nr_swapfiles;
52 static int least_priority;
54 static const char Bad_file[] =
"Bad swap file entry ";
55 static const char Unused_file[] =
"Unused swap file entry ";
56 static const char Bad_offset[] =
"Bad swap offset entry ";
57 static const char Unused_offset[] =
"Unused swap offset entry ";
69 static inline unsigned char swap_count(
unsigned char ent)
71 return ent & ~SWAP_HAS_CACHE;
76 __try_to_reclaim_swap(
struct swap_info_struct *si,
unsigned long offset)
92 if (trylock_page(page)) {
104 static int discard_swap(
struct swap_info_struct *si)
106 struct swap_extent *se;
112 se = &si->first_swap_extent;
113 start_block = (se->start_block + 1) << (
PAGE_SHIFT - 9);
124 start_block = se->start_block << (
PAGE_SHIFT - 9);
141 static void discard_swap_cluster(
struct swap_info_struct *si,
144 struct swap_extent *se = si->curr_swap_extent;
145 int found_extent = 0;
150 if (se->start_page <= start_page &&
151 start_page < se->start_page + se->nr_pages) {
152 pgoff_t offset = start_page - se->start_page;
156 if (nr_blocks > nr_pages)
157 nr_blocks = nr_pages;
158 start_page += nr_blocks;
159 nr_pages -= nr_blocks;
162 si->curr_swap_extent = se;
176 static int wait_for_discard(
void *
word)
182 #define SWAPFILE_CLUSTER 256
183 #define LATENCY_LIMIT 256
185 static unsigned long scan_swap_map(
struct swap_info_struct *si,
189 unsigned long scan_base;
190 unsigned long last_in_cluster = 0;
192 int found_free_cluster = 0;
205 si->flags += SWP_SCANNING;
206 scan_base = offset = si->cluster_next;
213 if (si->flags & SWP_DISCARDABLE) {
221 if (si->lowest_alloc)
223 si->lowest_alloc = si->max;
224 si->highest_alloc = 0;
236 if (!(si->flags & SWP_SOLIDSTATE))
237 scan_base = offset = si->lowest_bit;
241 for (; last_in_cluster <= si->highest_bit; offset++) {
242 if (si->swap_map[offset])
244 else if (offset == last_in_cluster) {
246 offset -= SWAPFILE_CLUSTER - 1;
247 si->cluster_next =
offset;
248 si->cluster_nr = SWAPFILE_CLUSTER - 1;
249 found_free_cluster = 1;
252 if (
unlikely(--latency_ration < 0)) {
258 offset = si->lowest_bit;
259 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
262 for (; last_in_cluster < scan_base; offset++) {
263 if (si->swap_map[offset])
265 else if (offset == last_in_cluster) {
267 offset -= SWAPFILE_CLUSTER - 1;
268 si->cluster_next =
offset;
269 si->cluster_nr = SWAPFILE_CLUSTER - 1;
270 found_free_cluster = 1;
273 if (
unlikely(--latency_ration < 0)) {
281 si->cluster_nr = SWAPFILE_CLUSTER - 1;
282 si->lowest_alloc = 0;
286 if (!(si->flags & SWP_WRITEOK))
288 if (!si->highest_bit)
290 if (offset > si->highest_bit)
291 scan_base = offset = si->lowest_bit;
294 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
297 swap_was_freed = __try_to_reclaim_swap(si, offset);
305 if (si->swap_map[offset])
308 if (offset == si->lowest_bit)
310 if (offset == si->highest_bit)
313 if (si->inuse_pages == si->pages) {
314 si->lowest_bit = si->max;
318 si->cluster_next = offset + 1;
319 si->flags -= SWP_SCANNING;
321 if (si->lowest_alloc) {
326 if (found_free_cluster) {
335 si->lowest_alloc <= last_in_cluster)
336 last_in_cluster = si->lowest_alloc - 1;
337 si->flags |= SWP_DISCARDING;
340 if (offset < last_in_cluster)
341 discard_swap_cluster(si, offset,
342 last_in_cluster - offset + 1);
345 si->lowest_alloc = 0;
346 si->flags &= ~SWP_DISCARDING;
351 }
else if (si->flags & SWP_DISCARDING) {
359 wait_on_bit(&si->flags,
ilog2(SWP_DISCARDING),
369 si->lowest_alloc =
offset;
370 if (offset > si->highest_alloc)
371 si->highest_alloc =
offset;
379 if (!si->swap_map[offset]) {
383 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
387 if (
unlikely(--latency_ration < 0)) {
392 offset = si->lowest_bit;
393 while (++offset < scan_base) {
394 if (!si->swap_map[offset]) {
398 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
402 if (
unlikely(--latency_ration < 0)) {
410 si->flags -= SWP_SCANNING;
416 struct swap_info_struct *si;
426 for (type =
swap_list.next; type >= 0 && wrapped < 2; type = next) {
430 (!wrapped && si->prio !=
swap_info[next]->prio)) {
435 if (!si->highest_bit)
437 if (!(si->flags & SWP_WRITEOK))
442 offset = scan_swap_map(si, SWAP_HAS_CACHE);
445 return swp_entry(type, offset);
459 struct swap_info_struct *si;
464 if (si && (si->flags & SWP_WRITEOK)) {
467 offset = scan_swap_map(si, 1);
470 return swp_entry(type, offset);
478 static struct swap_info_struct *swap_info_get(
swp_entry_t entry)
480 struct swap_info_struct *
p;
485 type = swp_type(entry);
486 if (type >= nr_swapfiles)
489 if (!(p->flags & SWP_USED))
491 offset = swp_offset(entry);
492 if (offset >= p->max)
494 if (!p->swap_map[offset])
496 spin_lock(&swap_lock);
514 static unsigned char swap_entry_free(
struct swap_info_struct *p,
517 unsigned long offset = swp_offset(entry);
519 unsigned char has_cache;
521 count = p->swap_map[
offset];
522 has_cache = count & SWAP_HAS_CACHE;
523 count &= ~SWAP_HAS_CACHE;
525 if (usage == SWAP_HAS_CACHE) {
528 }
else if (count == SWAP_MAP_SHMEM) {
534 }
else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
535 if (count == COUNT_CONTINUED) {
536 if (swap_count_continued(p, offset, count))
537 count = SWAP_MAP_MAX | COUNT_CONTINUED;
539 count = SWAP_MAP_MAX;
545 mem_cgroup_uncharge_swap(entry);
547 usage = count | has_cache;
554 if (offset > p->highest_bit)
561 frontswap_invalidate_page(p->type, offset);
562 if (p->flags & SWP_BLKDEV) {
563 struct gendisk *disk = p->bdev->bd_disk;
564 if (disk->fops->swap_slot_free_notify)
565 disk->fops->swap_slot_free_notify(p->bdev,
579 struct swap_info_struct *
p;
581 p = swap_info_get(entry);
583 swap_entry_free(p, entry, 1);
593 struct swap_info_struct *
p;
596 p = swap_info_get(entry);
598 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
600 mem_cgroup_uncharge_swapcache(page, entry, count != 0);
613 struct swap_info_struct *
p;
616 entry.
val = page_private(page);
617 p = swap_info_get(entry);
619 count = swap_count(p->swap_map[swp_offset(entry)]);
638 count = page_mapcount(page);
639 if (count <= 1 && PageSwapCache(page)) {
641 if (count == 1 && !PageWriteback(page)) {
657 if (!PageSwapCache(page))
659 if (PageWriteback(page))
679 if (pm_suspended_storage())
693 struct swap_info_struct *
p;
694 struct page *page =
NULL;
696 if (non_swap_entry(entry))
699 p = swap_info_get(entry);
701 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
703 if (page && !trylock_page(page)) {
715 if (PageSwapCache(page) && !PageWriteback(page) &&
716 (!page_mapped(page) || vm_swap_full())) {
726 #ifdef CONFIG_HIBERNATION
741 bdev =
bdget(device);
743 spin_lock(&swap_lock);
744 for (type = 0; type < nr_swapfiles; type++) {
747 if (!(sis->flags & SWP_WRITEOK))
752 *bdev_p =
bdgrab(sis->bdev);
754 spin_unlock(&swap_lock);
757 if (bdev == sis->bdev) {
758 struct swap_extent *se = &sis->first_swap_extent;
760 if (se->start_block == offset) {
762 *bdev_p =
bdgrab(sis->bdev);
764 spin_unlock(&swap_lock);
770 spin_unlock(&swap_lock);
785 if ((
unsigned int)type >= nr_swapfiles)
789 return map_swap_entry(swp_entry(type, offset), &bdev);
798 unsigned int count_swap_pages(
int type,
int free)
802 spin_lock(&swap_lock);
803 if ((
unsigned int)type < nr_swapfiles) {
806 if (sis->flags & SWP_WRITEOK) {
809 n -= sis->inuse_pages;
812 spin_unlock(&swap_lock);
836 pte = pte_offset_map_lock(vma->
vm_mm, pmd, addr, &ptl);
857 pte_unmap_unlock(pte, ptl);
863 unsigned long addr,
unsigned long end,
866 pte_t swp_pte = swp_entry_to_pte(entry);
887 ret = unuse_pte(vma, pmd, addr, entry, page);
892 }
while (pte++, addr +=
PAGE_SIZE, addr != end);
899 unsigned long addr,
unsigned long end,
909 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
911 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
914 }
while (pmd++, addr = next, addr != end);
919 unsigned long addr,
unsigned long end,
929 if (pud_none_or_clear_bad(pud))
931 ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
934 }
while (pud++, addr = next, addr != end);
945 if (page_anon_vma(page)) {
958 next = pgd_addr_end(addr, end);
959 if (pgd_none_or_clear_bad(pgd))
961 ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
964 }
while (pgd++, addr = next, addr != end);
985 if (vma->
anon_vma && (ret = unuse_vma(vma, entry, page)))
989 return (ret < 0)? ret: 0;
997 static unsigned int find_next_to_unuse(
struct swap_info_struct *si,
998 unsigned int prev,
bool frontswap)
1000 unsigned int max = si->max;
1001 unsigned int i =
prev;
1002 unsigned char count;
1025 if (frontswap_test(si, i))
1030 count = si->swap_map[
i];
1031 if (count && swap_count(count) != SWAP_MAP_BAD)
1046 unsigned long pages_to_unuse)
1050 unsigned char *swap_map;
1051 unsigned char swcount;
1079 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1080 if (signal_pending(
current)) {
1090 swap_map = &si->swap_map[
i];
1091 entry = swp_entry(type, i);
1124 wait_on_page_locked(page);
1125 wait_on_page_writeback(page);
1127 wait_on_page_writeback(page);
1132 swcount = *swap_map;
1133 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1140 if (swap_count(swcount) && start_mm != &
init_mm)
1141 retval =
unuse_mm(start_mm, entry, page);
1143 if (swap_count(*swap_map)) {
1144 int set_start_mm = (*swap_map >= swcount);
1146 struct mm_struct *new_start_mm = start_mm;
1153 while (swap_count(*swap_map) && !retval &&
1164 swcount = *swap_map;
1165 if (!swap_count(swcount))
1170 retval =
unuse_mm(mm, entry, page);
1172 if (set_start_mm && *swap_map < swcount) {
1173 mmput(new_start_mm);
1183 start_mm = new_start_mm;
1210 if (swap_count(*swap_map) &&
1211 PageDirty(page) && PageSwapCache(page)) {
1218 wait_on_page_writeback(page);
1228 if (PageSwapCache(page) &&
1229 likely(page_private(page) == entry.
val))
1246 if (frontswap && pages_to_unuse > 0) {
1247 if (!--pages_to_unuse)
1262 static void drain_mmlist(
void)
1267 for (type = 0; type < nr_swapfiles; type++)
1270 spin_lock(&mmlist_lock);
1273 spin_unlock(&mmlist_lock);
1284 struct swap_info_struct *sis;
1285 struct swap_extent *start_se;
1286 struct swap_extent *se;
1292 offset = swp_offset(entry);
1293 start_se = sis->curr_swap_extent;
1299 if (se->start_page <= offset &&
1300 offset < (se->start_page + se->nr_pages)) {
1301 return se->start_block + (offset - se->start_page);
1305 sis->curr_swap_extent = se;
1316 entry.
val = page_private(page);
1317 return map_swap_entry(entry, bdev);
1323 static void destroy_swap_extents(
struct swap_info_struct *sis)
1325 while (!list_empty(&sis->first_swap_extent.list)) {
1326 struct swap_extent *se;
1328 se =
list_entry(sis->first_swap_extent.list.next,
1329 struct swap_extent,
list);
1334 if (sis->flags & SWP_FILE) {
1335 struct file *swap_file = sis->swap_file;
1338 sis->flags &= ~SWP_FILE;
1339 mapping->
a_ops->swap_deactivate(swap_file);
1351 unsigned long nr_pages,
sector_t start_block)
1353 struct swap_extent *se;
1354 struct swap_extent *new_se;
1357 if (start_page == 0) {
1358 se = &sis->first_swap_extent;
1359 sis->curr_swap_extent = se;
1361 se->nr_pages = nr_pages;
1365 lh = sis->first_swap_extent.list.
prev;
1367 BUG_ON(se->start_page + se->nr_pages != start_page);
1368 if (se->start_block + se->nr_pages == start_block) {
1370 se->nr_pages += nr_pages;
1381 new_se->start_page = start_page;
1382 new_se->nr_pages = nr_pages;
1420 static int setup_swap_extents(
struct swap_info_struct *sis,
sector_t *span)
1422 struct file *swap_file = sis->swap_file;
1433 if (mapping->
a_ops->swap_activate) {
1434 ret = mapping->
a_ops->swap_activate(sis, swap_file, span);
1436 sis->flags |= SWP_FILE;
1446 static void enable_swap_info(
struct swap_info_struct *p,
int prio,
1447 unsigned char *swap_map,
1448 unsigned long *frontswap_map)
1452 spin_lock(&swap_lock);
1456 p->prio = --least_priority;
1457 p->swap_map = swap_map;
1458 frontswap_map_set(p, frontswap_map);
1459 p->flags |= SWP_WRITEOK;
1475 frontswap_init(p->type);
1476 spin_unlock(&swap_lock);
1481 struct swap_info_struct *p =
NULL;
1482 unsigned char *swap_map;
1483 struct file *swap_file, *victim;
1485 struct inode *
inode;
1496 pathname =
getname(specialfile);
1497 if (IS_ERR(pathname))
1498 return PTR_ERR(pathname);
1501 err = PTR_ERR(victim);
1510 if (p->flags & SWP_WRITEOK) {
1511 if (p->swap_file->f_mapping == mapping)
1522 vm_unacct_memory(p->pages);
1537 for (i = p->next; i >= 0; i =
swap_info[i]->next)
1543 p->flags &= ~SWP_WRITEOK;
1558 enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
1562 destroy_swap_extents(p);
1563 if (p->flags & SWP_CONTINUED)
1564 free_swap_count_continuations(p);
1572 while (p->flags >= SWP_SCANNING) {
1578 swap_file = p->swap_file;
1579 p->swap_file =
NULL;
1581 swap_map = p->swap_map;
1584 frontswap_invalidate_area(type);
1588 vfree(frontswap_map_get(p));
1590 swap_cgroup_swapoff(type);
1592 inode = mapping->
host;
1614 #ifdef CONFIG_PROC_FS
1619 poll_wait(file, &proc_poll_wait, wait);
1632 struct swap_info_struct *si;
1641 for (type = 0; type < nr_swapfiles; type++) {
1644 if (!(si->flags & SWP_USED) || !si->swap_map)
1653 static void *swap_next(
struct seq_file *swap,
void *
v, loff_t *pos)
1655 struct swap_info_struct *si =
v;
1661 type = si->type + 1;
1663 for (; type < nr_swapfiles; type++) {
1666 if (!(si->flags & SWP_USED) || !si->swap_map)
1675 static void swap_stop(
struct seq_file *swap,
void *v)
1680 static int swap_show(
struct seq_file *swap,
void *v)
1682 struct swap_info_struct *si =
v;
1687 seq_puts(swap,
"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1691 file = si->swap_file;
1694 len < 40 ? 40 - len : 1,
" ",
1696 "partition" :
"file\t",
1704 .
start = swap_start,
1710 static int swaps_open(
struct inode *inode,
struct file *file)
1732 static int __init procswaps_init(
void)
1734 proc_create(
"swaps", 0,
NULL, &proc_swaps_operations);
1740 #ifdef MAX_SWAPFILES_CHECK
1741 static int __init max_swapfiles_check(
void)
1749 static struct swap_info_struct *alloc_swap_info(
void)
1751 struct swap_info_struct *
p;
1758 spin_lock(&swap_lock);
1759 for (type = 0; type < nr_swapfiles; type++) {
1764 spin_unlock(&swap_lock);
1766 return ERR_PTR(-
EPERM);
1768 if (type >= nr_swapfiles) {
1786 INIT_LIST_HEAD(&p->first_swap_extent.list);
1787 p->flags = SWP_USED;
1789 spin_unlock(&swap_lock);
1794 static int claim_swapfile(
struct swap_info_struct *p,
struct inode *inode)
1811 p->flags |= SWP_BLKDEV;
1813 p->bdev = inode->
i_sb->s_bdev;
1823 static unsigned long read_swap_header(
struct swap_info_struct *p,
1825 struct inode *inode)
1828 unsigned long maxpages;
1829 unsigned long swapfilepages;
1831 if (
memcmp(
"SWAPSPACE2", swap_header->
magic.magic, 10)) {
1837 if (
swab32(swap_header->
info.version) == 1) {
1841 for (i = 0; i < swap_header->
info.nr_badpages; i++)
1845 if (swap_header->
info.version != 1) {
1847 "Unable to handle swap header version %d\n",
1848 swap_header->
info.version);
1853 p->cluster_next = 1;
1870 maxpages = swp_offset(pte_to_swp_entry(
1871 swp_entry_to_pte(swp_entry(0, ~0
UL)))) + 1;
1872 if (maxpages > swap_header->
info.last_page) {
1873 maxpages = swap_header->
info.last_page + 1;
1875 if ((
unsigned int)maxpages == 0)
1878 p->highest_bit = maxpages - 1;
1882 swapfilepages = i_size_read(inode) >>
PAGE_SHIFT;
1883 if (swapfilepages && maxpages > swapfilepages) {
1885 "Swap area shorter than signature indicates\n");
1890 if (swap_header->
info.nr_badpages > MAX_SWAP_BADPAGES)
1896 static int setup_swap_map_and_extents(
struct swap_info_struct *p,
1897 union swap_header *swap_header,
1898 unsigned char *swap_map,
1899 unsigned long maxpages,
1903 unsigned int nr_good_pages;
1906 nr_good_pages = maxpages - 1;
1908 for (i = 0; i < swap_header->
info.nr_badpages; i++) {
1909 unsigned int page_nr = swap_header->
info.badpages[
i];
1910 if (page_nr == 0 || page_nr > swap_header->
info.last_page)
1912 if (page_nr < maxpages) {
1913 swap_map[page_nr] = SWAP_MAP_BAD;
1918 if (nr_good_pages) {
1919 swap_map[0] = SWAP_MAP_BAD;
1921 p->pages = nr_good_pages;
1922 nr_extents = setup_swap_extents(p, span);
1925 nr_good_pages = p->pages;
1927 if (!nr_good_pages) {
1937 struct swap_info_struct *
p;
1939 struct file *swap_file =
NULL;
1944 union swap_header *swap_header;
1947 unsigned long maxpages;
1948 unsigned char *swap_map =
NULL;
1949 unsigned long *frontswap_map =
NULL;
1950 struct page *page =
NULL;
1951 struct inode *inode =
NULL;
1959 p = alloc_swap_info();
1965 error = PTR_ERR(name);
1970 if (IS_ERR(swap_file)) {
1971 error = PTR_ERR(swap_file);
1976 p->swap_file = swap_file;
1979 for (i = 0; i < nr_swapfiles; i++) {
1982 if (q == p || !q->swap_file)
1984 if (mapping == q->swap_file->f_mapping) {
1990 inode = mapping->
host;
1992 error = claim_swapfile(p, inode);
1999 if (!mapping->
a_ops->readpage) {
2003 page = read_mapping_page(mapping, 0, swap_file);
2005 error = PTR_ERR(page);
2008 swap_header =
kmap(page);
2010 maxpages = read_swap_header(p, swap_header, inode);
2023 error = swap_cgroup_swapon(p->type, maxpages);
2027 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2035 frontswap_map =
vzalloc(maxpages /
sizeof(
long));
2038 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2039 p->flags |= SWP_SOLIDSTATE;
2040 p->cluster_next = 1 + (
random32() % p->highest_bit);
2043 p->flags |= SWP_DISCARDABLE;
2051 enable_swap_info(p, prio, swap_map, frontswap_map);
2054 "Priority:%d extents:%d across:%lluk %s%s%s\n",
2056 nr_extents, (
unsigned long long)span<<(
PAGE_SHIFT-10),
2057 (p->flags & SWP_SOLIDSTATE) ?
"SS" :
"",
2058 (p->flags & SWP_DISCARDABLE) ?
"D" :
"",
2059 (frontswap_map) ?
"FS" :
"");
2074 destroy_swap_extents(p);
2075 swap_cgroup_swapoff(p->type);
2077 p->swap_file =
NULL;
2089 if (page && !IS_ERR(page)) {
2103 unsigned long nr_to_be_unused = 0;
2106 for (type = 0; type < nr_swapfiles; type++) {
2109 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2110 nr_to_be_unused += si->inuse_pages;
2128 static int __swap_duplicate(
swp_entry_t entry,
unsigned char usage)
2130 struct swap_info_struct *
p;
2132 unsigned char count;
2133 unsigned char has_cache;
2136 if (non_swap_entry(entry))
2139 type = swp_type(entry);
2140 if (type >= nr_swapfiles)
2143 offset = swp_offset(entry);
2145 spin_lock(&swap_lock);
2149 count = p->swap_map[
offset];
2150 has_cache = count & SWAP_HAS_CACHE;
2151 count &= ~SWAP_HAS_CACHE;
2154 if (usage == SWAP_HAS_CACHE) {
2157 if (!has_cache && count)
2158 has_cache = SWAP_HAS_CACHE;
2164 }
else if (count || has_cache) {
2166 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2168 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2170 else if (swap_count_continued(p, offset, count))
2171 count = COUNT_CONTINUED;
2177 p->swap_map[
offset] = count | has_cache;
2180 spin_unlock(&swap_lock);
2195 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2209 while (!err && __swap_duplicate(entry, 1) == -
ENOMEM)
2224 return __swap_duplicate(entry, SWAP_HAS_CACHE);
2230 BUG_ON(!PageSwapCache(page));
2248 return swp_offset(swap);
2269 struct swap_info_struct *si;
2272 struct page *list_page;
2274 unsigned char count;
2282 si = swap_info_get(entry);
2292 offset = swp_offset(entry);
2293 count = si->swap_map[
offset] & ~SWAP_HAS_CACHE;
2295 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2321 if (!page_private(head)) {
2322 BUG_ON(count & COUNT_CONTINUED);
2323 INIT_LIST_HEAD(&head->
lru);
2324 set_page_private(head, SWP_CONTINUED);
2325 si->flags |= SWP_CONTINUED;
2335 if (!(count & COUNT_CONTINUED))
2346 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2368 static bool swap_count_continued(
struct swap_info_struct *si,
2369 pgoff_t offset,
unsigned char count)
2376 if (page_private(head) != SWP_CONTINUED) {
2377 BUG_ON(count & COUNT_CONTINUED);
2385 if (count == SWAP_MAP_MAX)
2388 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) {
2392 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2398 if (*map == SWAP_CONT_MAX) {
2409 while (page != head) {
2411 *map = COUNT_CONTINUED;
2421 BUG_ON(count != COUNT_CONTINUED);
2422 while (*map == COUNT_CONTINUED) {
2434 while (page != head) {
2436 *map = SWAP_CONT_MAX |
count;
2437 count = COUNT_CONTINUED;
2441 return count == COUNT_CONTINUED;
2449 static void free_swap_count_continuations(
struct swap_info_struct *si)
2453 for (offset = 0; offset < si->max; offset +=
PAGE_SIZE) {
2456 if (page_private(head)) {