Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
swapfile.c
Go to the documentation of this file.
1 /*
2  * linux/mm/swapfile.c
3  *
4  * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5  * Swap reorganised 29.12.95, Stephen Tweedie
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/mman.h>
11 #include <linux/slab.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/namei.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/random.h>
20 #include <linux/writeback.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/init.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/security.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mutex.h>
29 #include <linux/capability.h>
30 #include <linux/syscalls.h>
31 #include <linux/memcontrol.h>
32 #include <linux/poll.h>
33 #include <linux/oom.h>
34 #include <linux/frontswap.h>
35 #include <linux/swapfile.h>
36 #include <linux/export.h>
37 
38 #include <asm/pgtable.h>
39 #include <asm/tlbflush.h>
40 #include <linux/swapops.h>
41 #include <linux/page_cgroup.h>
42 
43 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
44  unsigned char);
45 static void free_swap_count_continuations(struct swap_info_struct *);
46 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
47 
48 DEFINE_SPINLOCK(swap_lock);
49 static unsigned int nr_swapfiles;
52 static int least_priority;
53 
54 static const char Bad_file[] = "Bad swap file entry ";
55 static const char Unused_file[] = "Unused swap file entry ";
56 static const char Bad_offset[] = "Bad swap offset entry ";
57 static const char Unused_offset[] = "Unused swap offset entry ";
58 
59 struct swap_list_t swap_list = {-1, -1};
60 
61 struct swap_info_struct *swap_info[MAX_SWAPFILES];
62 
63 static DEFINE_MUTEX(swapon_mutex);
64 
65 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
66 /* Activity counter to indicate that a swapon or swapoff has occurred */
67 static atomic_t proc_poll_event = ATOMIC_INIT(0);
68 
69 static inline unsigned char swap_count(unsigned char ent)
70 {
71  return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
72 }
73 
74 /* returns 1 if swap entry is freed */
75 static int
76 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
77 {
78  swp_entry_t entry = swp_entry(si->type, offset);
79  struct page *page;
80  int ret = 0;
81 
82  page = find_get_page(&swapper_space, entry.val);
83  if (!page)
84  return 0;
85  /*
86  * This function is called from scan_swap_map() and it's called
87  * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
88  * We have to use trylock for avoiding deadlock. This is a special
89  * case and you should use try_to_free_swap() with explicit lock_page()
90  * in usual operations.
91  */
92  if (trylock_page(page)) {
93  ret = try_to_free_swap(page);
94  unlock_page(page);
95  }
96  page_cache_release(page);
97  return ret;
98 }
99 
100 /*
101  * swapon tell device that all the old swap contents can be discarded,
102  * to allow the swap device to optimize its wear-levelling.
103  */
104 static int discard_swap(struct swap_info_struct *si)
105 {
106  struct swap_extent *se;
108  sector_t nr_blocks;
109  int err = 0;
110 
111  /* Do not discard the swap header page! */
112  se = &si->first_swap_extent;
113  start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
114  nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
115  if (nr_blocks) {
116  err = blkdev_issue_discard(si->bdev, start_block,
117  nr_blocks, GFP_KERNEL, 0);
118  if (err)
119  return err;
120  cond_resched();
121  }
122 
123  list_for_each_entry(se, &si->first_swap_extent.list, list) {
124  start_block = se->start_block << (PAGE_SHIFT - 9);
125  nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
126 
127  err = blkdev_issue_discard(si->bdev, start_block,
128  nr_blocks, GFP_KERNEL, 0);
129  if (err)
130  break;
131 
132  cond_resched();
133  }
134  return err; /* That will often be -EOPNOTSUPP */
135 }
136 
137 /*
138  * swap allocation tell device that a cluster of swap can now be discarded,
139  * to allow the swap device to optimize its wear-levelling.
140  */
141 static void discard_swap_cluster(struct swap_info_struct *si,
142  pgoff_t start_page, pgoff_t nr_pages)
143 {
144  struct swap_extent *se = si->curr_swap_extent;
145  int found_extent = 0;
146 
147  while (nr_pages) {
148  struct list_head *lh;
149 
150  if (se->start_page <= start_page &&
151  start_page < se->start_page + se->nr_pages) {
152  pgoff_t offset = start_page - se->start_page;
153  sector_t start_block = se->start_block + offset;
154  sector_t nr_blocks = se->nr_pages - offset;
155 
156  if (nr_blocks > nr_pages)
157  nr_blocks = nr_pages;
158  start_page += nr_blocks;
159  nr_pages -= nr_blocks;
160 
161  if (!found_extent++)
162  si->curr_swap_extent = se;
163 
164  start_block <<= PAGE_SHIFT - 9;
165  nr_blocks <<= PAGE_SHIFT - 9;
166  if (blkdev_issue_discard(si->bdev, start_block,
167  nr_blocks, GFP_NOIO, 0))
168  break;
169  }
170 
171  lh = se->list.next;
172  se = list_entry(lh, struct swap_extent, list);
173  }
174 }
175 
176 static int wait_for_discard(void *word)
177 {
178  schedule();
179  return 0;
180 }
181 
182 #define SWAPFILE_CLUSTER 256
183 #define LATENCY_LIMIT 256
184 
185 static unsigned long scan_swap_map(struct swap_info_struct *si,
186  unsigned char usage)
187 {
188  unsigned long offset;
189  unsigned long scan_base;
190  unsigned long last_in_cluster = 0;
191  int latency_ration = LATENCY_LIMIT;
192  int found_free_cluster = 0;
193 
194  /*
195  * We try to cluster swap pages by allocating them sequentially
196  * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
197  * way, however, we resort to first-free allocation, starting
198  * a new cluster. This prevents us from scattering swap pages
199  * all over the entire swap partition, so that we reduce
200  * overall disk seek times between swap pages. -- sct
201  * But we do now try to find an empty cluster. -Andrea
202  * And we let swap pages go all over an SSD partition. Hugh
203  */
204 
205  si->flags += SWP_SCANNING;
206  scan_base = offset = si->cluster_next;
207 
208  if (unlikely(!si->cluster_nr--)) {
209  if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
210  si->cluster_nr = SWAPFILE_CLUSTER - 1;
211  goto checks;
212  }
213  if (si->flags & SWP_DISCARDABLE) {
214  /*
215  * Start range check on racing allocations, in case
216  * they overlap the cluster we eventually decide on
217  * (we scan without swap_lock to allow preemption).
218  * It's hardly conceivable that cluster_nr could be
219  * wrapped during our scan, but don't depend on it.
220  */
221  if (si->lowest_alloc)
222  goto checks;
223  si->lowest_alloc = si->max;
224  si->highest_alloc = 0;
225  }
226  spin_unlock(&swap_lock);
227 
228  /*
229  * If seek is expensive, start searching for new cluster from
230  * start of partition, to minimize the span of allocated swap.
231  * But if seek is cheap, search from our current position, so
232  * that swap is allocated from all over the partition: if the
233  * Flash Translation Layer only remaps within limited zones,
234  * we don't want to wear out the first zone too quickly.
235  */
236  if (!(si->flags & SWP_SOLIDSTATE))
237  scan_base = offset = si->lowest_bit;
238  last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
239 
240  /* Locate the first empty (unaligned) cluster */
241  for (; last_in_cluster <= si->highest_bit; offset++) {
242  if (si->swap_map[offset])
243  last_in_cluster = offset + SWAPFILE_CLUSTER;
244  else if (offset == last_in_cluster) {
245  spin_lock(&swap_lock);
246  offset -= SWAPFILE_CLUSTER - 1;
247  si->cluster_next = offset;
248  si->cluster_nr = SWAPFILE_CLUSTER - 1;
249  found_free_cluster = 1;
250  goto checks;
251  }
252  if (unlikely(--latency_ration < 0)) {
253  cond_resched();
254  latency_ration = LATENCY_LIMIT;
255  }
256  }
257 
258  offset = si->lowest_bit;
259  last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
260 
261  /* Locate the first empty (unaligned) cluster */
262  for (; last_in_cluster < scan_base; offset++) {
263  if (si->swap_map[offset])
264  last_in_cluster = offset + SWAPFILE_CLUSTER;
265  else if (offset == last_in_cluster) {
266  spin_lock(&swap_lock);
267  offset -= SWAPFILE_CLUSTER - 1;
268  si->cluster_next = offset;
269  si->cluster_nr = SWAPFILE_CLUSTER - 1;
270  found_free_cluster = 1;
271  goto checks;
272  }
273  if (unlikely(--latency_ration < 0)) {
274  cond_resched();
275  latency_ration = LATENCY_LIMIT;
276  }
277  }
278 
279  offset = scan_base;
280  spin_lock(&swap_lock);
281  si->cluster_nr = SWAPFILE_CLUSTER - 1;
282  si->lowest_alloc = 0;
283  }
284 
285 checks:
286  if (!(si->flags & SWP_WRITEOK))
287  goto no_page;
288  if (!si->highest_bit)
289  goto no_page;
290  if (offset > si->highest_bit)
291  scan_base = offset = si->lowest_bit;
292 
293  /* reuse swap entry of cache-only swap if not busy. */
294  if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
295  int swap_was_freed;
296  spin_unlock(&swap_lock);
297  swap_was_freed = __try_to_reclaim_swap(si, offset);
298  spin_lock(&swap_lock);
299  /* entry was freed successfully, try to use this again */
300  if (swap_was_freed)
301  goto checks;
302  goto scan; /* check next one */
303  }
304 
305  if (si->swap_map[offset])
306  goto scan;
307 
308  if (offset == si->lowest_bit)
309  si->lowest_bit++;
310  if (offset == si->highest_bit)
311  si->highest_bit--;
312  si->inuse_pages++;
313  if (si->inuse_pages == si->pages) {
314  si->lowest_bit = si->max;
315  si->highest_bit = 0;
316  }
317  si->swap_map[offset] = usage;
318  si->cluster_next = offset + 1;
319  si->flags -= SWP_SCANNING;
320 
321  if (si->lowest_alloc) {
322  /*
323  * Only set when SWP_DISCARDABLE, and there's a scan
324  * for a free cluster in progress or just completed.
325  */
326  if (found_free_cluster) {
327  /*
328  * To optimize wear-levelling, discard the
329  * old data of the cluster, taking care not to
330  * discard any of its pages that have already
331  * been allocated by racing tasks (offset has
332  * already stepped over any at the beginning).
333  */
334  if (offset < si->highest_alloc &&
335  si->lowest_alloc <= last_in_cluster)
336  last_in_cluster = si->lowest_alloc - 1;
337  si->flags |= SWP_DISCARDING;
338  spin_unlock(&swap_lock);
339 
340  if (offset < last_in_cluster)
341  discard_swap_cluster(si, offset,
342  last_in_cluster - offset + 1);
343 
344  spin_lock(&swap_lock);
345  si->lowest_alloc = 0;
346  si->flags &= ~SWP_DISCARDING;
347 
348  smp_mb(); /* wake_up_bit advises this */
349  wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
350 
351  } else if (si->flags & SWP_DISCARDING) {
352  /*
353  * Delay using pages allocated by racing tasks
354  * until the whole discard has been issued. We
355  * could defer that delay until swap_writepage,
356  * but it's easier to keep this self-contained.
357  */
358  spin_unlock(&swap_lock);
359  wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
360  wait_for_discard, TASK_UNINTERRUPTIBLE);
361  spin_lock(&swap_lock);
362  } else {
363  /*
364  * Note pages allocated by racing tasks while
365  * scan for a free cluster is in progress, so
366  * that its final discard can exclude them.
367  */
368  if (offset < si->lowest_alloc)
369  si->lowest_alloc = offset;
370  if (offset > si->highest_alloc)
371  si->highest_alloc = offset;
372  }
373  }
374  return offset;
375 
376 scan:
377  spin_unlock(&swap_lock);
378  while (++offset <= si->highest_bit) {
379  if (!si->swap_map[offset]) {
380  spin_lock(&swap_lock);
381  goto checks;
382  }
383  if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
384  spin_lock(&swap_lock);
385  goto checks;
386  }
387  if (unlikely(--latency_ration < 0)) {
388  cond_resched();
389  latency_ration = LATENCY_LIMIT;
390  }
391  }
392  offset = si->lowest_bit;
393  while (++offset < scan_base) {
394  if (!si->swap_map[offset]) {
395  spin_lock(&swap_lock);
396  goto checks;
397  }
398  if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
399  spin_lock(&swap_lock);
400  goto checks;
401  }
402  if (unlikely(--latency_ration < 0)) {
403  cond_resched();
404  latency_ration = LATENCY_LIMIT;
405  }
406  }
407  spin_lock(&swap_lock);
408 
409 no_page:
410  si->flags -= SWP_SCANNING;
411  return 0;
412 }
413 
415 {
416  struct swap_info_struct *si;
417  pgoff_t offset;
418  int type, next;
419  int wrapped = 0;
420 
421  spin_lock(&swap_lock);
422  if (nr_swap_pages <= 0)
423  goto noswap;
424  nr_swap_pages--;
425 
426  for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
427  si = swap_info[type];
428  next = si->next;
429  if (next < 0 ||
430  (!wrapped && si->prio != swap_info[next]->prio)) {
431  next = swap_list.head;
432  wrapped++;
433  }
434 
435  if (!si->highest_bit)
436  continue;
437  if (!(si->flags & SWP_WRITEOK))
438  continue;
439 
440  swap_list.next = next;
441  /* This is called for allocating swap entry for cache */
442  offset = scan_swap_map(si, SWAP_HAS_CACHE);
443  if (offset) {
444  spin_unlock(&swap_lock);
445  return swp_entry(type, offset);
446  }
447  next = swap_list.next;
448  }
449 
450  nr_swap_pages++;
451 noswap:
452  spin_unlock(&swap_lock);
453  return (swp_entry_t) {0};
454 }
455 
456 /* The only caller of this function is now susupend routine */
458 {
459  struct swap_info_struct *si;
460  pgoff_t offset;
461 
462  spin_lock(&swap_lock);
463  si = swap_info[type];
464  if (si && (si->flags & SWP_WRITEOK)) {
465  nr_swap_pages--;
466  /* This is called for allocating swap entry, not cache */
467  offset = scan_swap_map(si, 1);
468  if (offset) {
469  spin_unlock(&swap_lock);
470  return swp_entry(type, offset);
471  }
472  nr_swap_pages++;
473  }
474  spin_unlock(&swap_lock);
475  return (swp_entry_t) {0};
476 }
477 
478 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
479 {
480  struct swap_info_struct *p;
481  unsigned long offset, type;
482 
483  if (!entry.val)
484  goto out;
485  type = swp_type(entry);
486  if (type >= nr_swapfiles)
487  goto bad_nofile;
488  p = swap_info[type];
489  if (!(p->flags & SWP_USED))
490  goto bad_device;
491  offset = swp_offset(entry);
492  if (offset >= p->max)
493  goto bad_offset;
494  if (!p->swap_map[offset])
495  goto bad_free;
496  spin_lock(&swap_lock);
497  return p;
498 
499 bad_free:
500  printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
501  goto out;
502 bad_offset:
503  printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
504  goto out;
505 bad_device:
506  printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
507  goto out;
508 bad_nofile:
509  printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
510 out:
511  return NULL;
512 }
513 
514 static unsigned char swap_entry_free(struct swap_info_struct *p,
515  swp_entry_t entry, unsigned char usage)
516 {
517  unsigned long offset = swp_offset(entry);
518  unsigned char count;
519  unsigned char has_cache;
520 
521  count = p->swap_map[offset];
522  has_cache = count & SWAP_HAS_CACHE;
523  count &= ~SWAP_HAS_CACHE;
524 
525  if (usage == SWAP_HAS_CACHE) {
526  VM_BUG_ON(!has_cache);
527  has_cache = 0;
528  } else if (count == SWAP_MAP_SHMEM) {
529  /*
530  * Or we could insist on shmem.c using a special
531  * swap_shmem_free() and free_shmem_swap_and_cache()...
532  */
533  count = 0;
534  } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
535  if (count == COUNT_CONTINUED) {
536  if (swap_count_continued(p, offset, count))
537  count = SWAP_MAP_MAX | COUNT_CONTINUED;
538  else
539  count = SWAP_MAP_MAX;
540  } else
541  count--;
542  }
543 
544  if (!count)
545  mem_cgroup_uncharge_swap(entry);
546 
547  usage = count | has_cache;
548  p->swap_map[offset] = usage;
549 
550  /* free if no reference */
551  if (!usage) {
552  if (offset < p->lowest_bit)
553  p->lowest_bit = offset;
554  if (offset > p->highest_bit)
555  p->highest_bit = offset;
556  if (swap_list.next >= 0 &&
557  p->prio > swap_info[swap_list.next]->prio)
558  swap_list.next = p->type;
559  nr_swap_pages++;
560  p->inuse_pages--;
561  frontswap_invalidate_page(p->type, offset);
562  if (p->flags & SWP_BLKDEV) {
563  struct gendisk *disk = p->bdev->bd_disk;
564  if (disk->fops->swap_slot_free_notify)
565  disk->fops->swap_slot_free_notify(p->bdev,
566  offset);
567  }
568  }
569 
570  return usage;
571 }
572 
573 /*
574  * Caller has made sure that the swapdevice corresponding to entry
575  * is still around or has not been recycled.
576  */
578 {
579  struct swap_info_struct *p;
580 
581  p = swap_info_get(entry);
582  if (p) {
583  swap_entry_free(p, entry, 1);
584  spin_unlock(&swap_lock);
585  }
586 }
587 
588 /*
589  * Called after dropping swapcache to decrease refcnt to swap entries.
590  */
591 void swapcache_free(swp_entry_t entry, struct page *page)
592 {
593  struct swap_info_struct *p;
594  unsigned char count;
595 
596  p = swap_info_get(entry);
597  if (p) {
598  count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
599  if (page)
600  mem_cgroup_uncharge_swapcache(page, entry, count != 0);
601  spin_unlock(&swap_lock);
602  }
603 }
604 
605 /*
606  * How many references to page are currently swapped out?
607  * This does not give an exact answer when swap count is continued,
608  * but does include the high COUNT_CONTINUED flag to allow for that.
609  */
610 int page_swapcount(struct page *page)
611 {
612  int count = 0;
613  struct swap_info_struct *p;
615 
616  entry.val = page_private(page);
617  p = swap_info_get(entry);
618  if (p) {
619  count = swap_count(p->swap_map[swp_offset(entry)]);
620  spin_unlock(&swap_lock);
621  }
622  return count;
623 }
624 
625 /*
626  * We can write to an anon page without COW if there are no other references
627  * to it. And as a side-effect, free up its swap: because the old content
628  * on disk will never be read, and seeking back there to write new content
629  * later would only waste time away from clustering.
630  */
631 int reuse_swap_page(struct page *page)
632 {
633  int count;
634 
635  VM_BUG_ON(!PageLocked(page));
636  if (unlikely(PageKsm(page)))
637  return 0;
638  count = page_mapcount(page);
639  if (count <= 1 && PageSwapCache(page)) {
640  count += page_swapcount(page);
641  if (count == 1 && !PageWriteback(page)) {
643  SetPageDirty(page);
644  }
645  }
646  return count <= 1;
647 }
648 
649 /*
650  * If swap is getting full, or if there are no more mappings of this page,
651  * then try_to_free_swap is called to free its swap space.
652  */
653 int try_to_free_swap(struct page *page)
654 {
655  VM_BUG_ON(!PageLocked(page));
656 
657  if (!PageSwapCache(page))
658  return 0;
659  if (PageWriteback(page))
660  return 0;
661  if (page_swapcount(page))
662  return 0;
663 
664  /*
665  * Once hibernation has begun to create its image of memory,
666  * there's a danger that one of the calls to try_to_free_swap()
667  * - most probably a call from __try_to_reclaim_swap() while
668  * hibernation is allocating its own swap pages for the image,
669  * but conceivably even a call from memory reclaim - will free
670  * the swap from a page which has already been recorded in the
671  * image as a clean swapcache page, and then reuse its swap for
672  * another page of the image. On waking from hibernation, the
673  * original page might be freed under memory pressure, then
674  * later read back in from swap, now with the wrong data.
675  *
676  * Hibration suspends storage while it is writing the image
677  * to disk so check that here.
678  */
679  if (pm_suspended_storage())
680  return 0;
681 
683  SetPageDirty(page);
684  return 1;
685 }
686 
687 /*
688  * Free the swap entry like above, but also try to
689  * free the page cache entry if it is the last user.
690  */
692 {
693  struct swap_info_struct *p;
694  struct page *page = NULL;
695 
696  if (non_swap_entry(entry))
697  return 1;
698 
699  p = swap_info_get(entry);
700  if (p) {
701  if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
702  page = find_get_page(&swapper_space, entry.val);
703  if (page && !trylock_page(page)) {
704  page_cache_release(page);
705  page = NULL;
706  }
707  }
708  spin_unlock(&swap_lock);
709  }
710  if (page) {
711  /*
712  * Not mapped elsewhere, or swap space full? Free it!
713  * Also recheck PageSwapCache now page is locked (above).
714  */
715  if (PageSwapCache(page) && !PageWriteback(page) &&
716  (!page_mapped(page) || vm_swap_full())) {
718  SetPageDirty(page);
719  }
720  unlock_page(page);
721  page_cache_release(page);
722  }
723  return p != NULL;
724 }
725 
726 #ifdef CONFIG_HIBERNATION
727 /*
728  * Find the swap type that corresponds to given device (if any).
729  *
730  * @offset - number of the PAGE_SIZE-sized block of the device, starting
731  * from 0, in which the swap header is expected to be located.
732  *
733  * This is needed for the suspend to disk (aka swsusp).
734  */
735 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
736 {
737  struct block_device *bdev = NULL;
738  int type;
739 
740  if (device)
741  bdev = bdget(device);
742 
743  spin_lock(&swap_lock);
744  for (type = 0; type < nr_swapfiles; type++) {
745  struct swap_info_struct *sis = swap_info[type];
746 
747  if (!(sis->flags & SWP_WRITEOK))
748  continue;
749 
750  if (!bdev) {
751  if (bdev_p)
752  *bdev_p = bdgrab(sis->bdev);
753 
754  spin_unlock(&swap_lock);
755  return type;
756  }
757  if (bdev == sis->bdev) {
758  struct swap_extent *se = &sis->first_swap_extent;
759 
760  if (se->start_block == offset) {
761  if (bdev_p)
762  *bdev_p = bdgrab(sis->bdev);
763 
764  spin_unlock(&swap_lock);
765  bdput(bdev);
766  return type;
767  }
768  }
769  }
770  spin_unlock(&swap_lock);
771  if (bdev)
772  bdput(bdev);
773 
774  return -ENODEV;
775 }
776 
777 /*
778  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
779  * corresponding to given index in swap_info (swap type).
780  */
781 sector_t swapdev_block(int type, pgoff_t offset)
782 {
783  struct block_device *bdev;
784 
785  if ((unsigned int)type >= nr_swapfiles)
786  return 0;
787  if (!(swap_info[type]->flags & SWP_WRITEOK))
788  return 0;
789  return map_swap_entry(swp_entry(type, offset), &bdev);
790 }
791 
792 /*
793  * Return either the total number of swap pages of given type, or the number
794  * of free pages of that type (depending on @free)
795  *
796  * This is needed for software suspend
797  */
798 unsigned int count_swap_pages(int type, int free)
799 {
800  unsigned int n = 0;
801 
802  spin_lock(&swap_lock);
803  if ((unsigned int)type < nr_swapfiles) {
804  struct swap_info_struct *sis = swap_info[type];
805 
806  if (sis->flags & SWP_WRITEOK) {
807  n = sis->pages;
808  if (free)
809  n -= sis->inuse_pages;
810  }
811  }
812  spin_unlock(&swap_lock);
813  return n;
814 }
815 #endif /* CONFIG_HIBERNATION */
816 
817 /*
818  * No need to decide whether this PTE shares the swap entry with others,
819  * just let do_wp_page work it out if a write is requested later - to
820  * force COW, vm_page_prot omits write permission from any private vma.
821  */
822 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
823  unsigned long addr, swp_entry_t entry, struct page *page)
824 {
825  struct mem_cgroup *memcg;
826  spinlock_t *ptl;
827  pte_t *pte;
828  int ret = 1;
829 
830  if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
831  GFP_KERNEL, &memcg)) {
832  ret = -ENOMEM;
833  goto out_nolock;
834  }
835 
836  pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
837  if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
839  ret = 0;
840  goto out;
841  }
842 
843  dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
844  inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
845  get_page(page);
846  set_pte_at(vma->vm_mm, addr, pte,
847  pte_mkold(mk_pte(page, vma->vm_page_prot)));
848  page_add_anon_rmap(page, vma, addr);
849  mem_cgroup_commit_charge_swapin(page, memcg);
850  swap_free(entry);
851  /*
852  * Move the page to the active list so it is not
853  * immediately swapped out again after swapon.
854  */
855  activate_page(page);
856 out:
857  pte_unmap_unlock(pte, ptl);
858 out_nolock:
859  return ret;
860 }
861 
862 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
863  unsigned long addr, unsigned long end,
864  swp_entry_t entry, struct page *page)
865 {
866  pte_t swp_pte = swp_entry_to_pte(entry);
867  pte_t *pte;
868  int ret = 0;
869 
870  /*
871  * We don't actually need pte lock while scanning for swp_pte: since
872  * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
873  * page table while we're scanning; though it could get zapped, and on
874  * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
875  * of unmatched parts which look like swp_pte, so unuse_pte must
876  * recheck under pte lock. Scanning without pte lock lets it be
877  * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
878  */
879  pte = pte_offset_map(pmd, addr);
880  do {
881  /*
882  * swapoff spends a _lot_ of time in this loop!
883  * Test inline before going to call unuse_pte.
884  */
885  if (unlikely(pte_same(*pte, swp_pte))) {
886  pte_unmap(pte);
887  ret = unuse_pte(vma, pmd, addr, entry, page);
888  if (ret)
889  goto out;
890  pte = pte_offset_map(pmd, addr);
891  }
892  } while (pte++, addr += PAGE_SIZE, addr != end);
893  pte_unmap(pte - 1);
894 out:
895  return ret;
896 }
897 
898 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
899  unsigned long addr, unsigned long end,
900  swp_entry_t entry, struct page *page)
901 {
902  pmd_t *pmd;
903  unsigned long next;
904  int ret;
905 
906  pmd = pmd_offset(pud, addr);
907  do {
908  next = pmd_addr_end(addr, end);
909  if (pmd_none_or_trans_huge_or_clear_bad(pmd))
910  continue;
911  ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
912  if (ret)
913  return ret;
914  } while (pmd++, addr = next, addr != end);
915  return 0;
916 }
917 
918 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
919  unsigned long addr, unsigned long end,
920  swp_entry_t entry, struct page *page)
921 {
922  pud_t *pud;
923  unsigned long next;
924  int ret;
925 
926  pud = pud_offset(pgd, addr);
927  do {
928  next = pud_addr_end(addr, end);
929  if (pud_none_or_clear_bad(pud))
930  continue;
931  ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
932  if (ret)
933  return ret;
934  } while (pud++, addr = next, addr != end);
935  return 0;
936 }
937 
938 static int unuse_vma(struct vm_area_struct *vma,
939  swp_entry_t entry, struct page *page)
940 {
941  pgd_t *pgd;
942  unsigned long addr, end, next;
943  int ret;
944 
945  if (page_anon_vma(page)) {
946  addr = page_address_in_vma(page, vma);
947  if (addr == -EFAULT)
948  return 0;
949  else
950  end = addr + PAGE_SIZE;
951  } else {
952  addr = vma->vm_start;
953  end = vma->vm_end;
954  }
955 
956  pgd = pgd_offset(vma->vm_mm, addr);
957  do {
958  next = pgd_addr_end(addr, end);
959  if (pgd_none_or_clear_bad(pgd))
960  continue;
961  ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
962  if (ret)
963  return ret;
964  } while (pgd++, addr = next, addr != end);
965  return 0;
966 }
967 
968 static int unuse_mm(struct mm_struct *mm,
969  swp_entry_t entry, struct page *page)
970 {
971  struct vm_area_struct *vma;
972  int ret = 0;
973 
974  if (!down_read_trylock(&mm->mmap_sem)) {
975  /*
976  * Activate page so shrink_inactive_list is unlikely to unmap
977  * its ptes while lock is dropped, so swapoff can make progress.
978  */
979  activate_page(page);
980  unlock_page(page);
981  down_read(&mm->mmap_sem);
982  lock_page(page);
983  }
984  for (vma = mm->mmap; vma; vma = vma->vm_next) {
985  if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
986  break;
987  }
988  up_read(&mm->mmap_sem);
989  return (ret < 0)? ret: 0;
990 }
991 
992 /*
993  * Scan swap_map (or frontswap_map if frontswap parameter is true)
994  * from current position to next entry still in use.
995  * Recycle to start on reaching the end, returning 0 when empty.
996  */
997 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
998  unsigned int prev, bool frontswap)
999 {
1000  unsigned int max = si->max;
1001  unsigned int i = prev;
1002  unsigned char count;
1003 
1004  /*
1005  * No need for swap_lock here: we're just looking
1006  * for whether an entry is in use, not modifying it; false
1007  * hits are okay, and sys_swapoff() has already prevented new
1008  * allocations from this area (while holding swap_lock).
1009  */
1010  for (;;) {
1011  if (++i >= max) {
1012  if (!prev) {
1013  i = 0;
1014  break;
1015  }
1016  /*
1017  * No entries in use at top of swap_map,
1018  * loop back to start and recheck there.
1019  */
1020  max = prev + 1;
1021  prev = 0;
1022  i = 1;
1023  }
1024  if (frontswap) {
1025  if (frontswap_test(si, i))
1026  break;
1027  else
1028  continue;
1029  }
1030  count = si->swap_map[i];
1031  if (count && swap_count(count) != SWAP_MAP_BAD)
1032  break;
1033  }
1034  return i;
1035 }
1036 
1037 /*
1038  * We completely avoid races by reading each swap page in advance,
1039  * and then search for the process using it. All the necessary
1040  * page table adjustments can then be made atomically.
1041  *
1042  * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1043  * pages_to_unuse==0 means all pages; ignored if frontswap is false
1044  */
1045 int try_to_unuse(unsigned int type, bool frontswap,
1046  unsigned long pages_to_unuse)
1047 {
1048  struct swap_info_struct *si = swap_info[type];
1049  struct mm_struct *start_mm;
1050  unsigned char *swap_map;
1051  unsigned char swcount;
1052  struct page *page;
1054  unsigned int i = 0;
1055  int retval = 0;
1056 
1057  /*
1058  * When searching mms for an entry, a good strategy is to
1059  * start at the first mm we freed the previous entry from
1060  * (though actually we don't notice whether we or coincidence
1061  * freed the entry). Initialize this start_mm with a hold.
1062  *
1063  * A simpler strategy would be to start at the last mm we
1064  * freed the previous entry from; but that would take less
1065  * advantage of mmlist ordering, which clusters forked mms
1066  * together, child after parent. If we race with dup_mmap(), we
1067  * prefer to resolve parent before child, lest we miss entries
1068  * duplicated after we scanned child: using last mm would invert
1069  * that.
1070  */
1071  start_mm = &init_mm;
1072  atomic_inc(&init_mm.mm_users);
1073 
1074  /*
1075  * Keep on scanning until all entries have gone. Usually,
1076  * one pass through swap_map is enough, but not necessarily:
1077  * there are races when an instance of an entry might be missed.
1078  */
1079  while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1080  if (signal_pending(current)) {
1081  retval = -EINTR;
1082  break;
1083  }
1084 
1085  /*
1086  * Get a page for the entry, using the existing swap
1087  * cache page if there is one. Otherwise, get a clean
1088  * page and read the swap into it.
1089  */
1090  swap_map = &si->swap_map[i];
1091  entry = swp_entry(type, i);
1092  page = read_swap_cache_async(entry,
1094  if (!page) {
1095  /*
1096  * Either swap_duplicate() failed because entry
1097  * has been freed independently, and will not be
1098  * reused since sys_swapoff() already disabled
1099  * allocation from here, or alloc_page() failed.
1100  */
1101  if (!*swap_map)
1102  continue;
1103  retval = -ENOMEM;
1104  break;
1105  }
1106 
1107  /*
1108  * Don't hold on to start_mm if it looks like exiting.
1109  */
1110  if (atomic_read(&start_mm->mm_users) == 1) {
1111  mmput(start_mm);
1112  start_mm = &init_mm;
1113  atomic_inc(&init_mm.mm_users);
1114  }
1115 
1116  /*
1117  * Wait for and lock page. When do_swap_page races with
1118  * try_to_unuse, do_swap_page can handle the fault much
1119  * faster than try_to_unuse can locate the entry. This
1120  * apparently redundant "wait_on_page_locked" lets try_to_unuse
1121  * defer to do_swap_page in such a case - in some tests,
1122  * do_swap_page and try_to_unuse repeatedly compete.
1123  */
1124  wait_on_page_locked(page);
1125  wait_on_page_writeback(page);
1126  lock_page(page);
1127  wait_on_page_writeback(page);
1128 
1129  /*
1130  * Remove all references to entry.
1131  */
1132  swcount = *swap_map;
1133  if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1134  retval = shmem_unuse(entry, page);
1135  /* page has already been unlocked and released */
1136  if (retval < 0)
1137  break;
1138  continue;
1139  }
1140  if (swap_count(swcount) && start_mm != &init_mm)
1141  retval = unuse_mm(start_mm, entry, page);
1142 
1143  if (swap_count(*swap_map)) {
1144  int set_start_mm = (*swap_map >= swcount);
1145  struct list_head *p = &start_mm->mmlist;
1146  struct mm_struct *new_start_mm = start_mm;
1147  struct mm_struct *prev_mm = start_mm;
1148  struct mm_struct *mm;
1149 
1150  atomic_inc(&new_start_mm->mm_users);
1151  atomic_inc(&prev_mm->mm_users);
1152  spin_lock(&mmlist_lock);
1153  while (swap_count(*swap_map) && !retval &&
1154  (p = p->next) != &start_mm->mmlist) {
1155  mm = list_entry(p, struct mm_struct, mmlist);
1156  if (!atomic_inc_not_zero(&mm->mm_users))
1157  continue;
1158  spin_unlock(&mmlist_lock);
1159  mmput(prev_mm);
1160  prev_mm = mm;
1161 
1162  cond_resched();
1163 
1164  swcount = *swap_map;
1165  if (!swap_count(swcount)) /* any usage ? */
1166  ;
1167  else if (mm == &init_mm)
1168  set_start_mm = 1;
1169  else
1170  retval = unuse_mm(mm, entry, page);
1171 
1172  if (set_start_mm && *swap_map < swcount) {
1173  mmput(new_start_mm);
1174  atomic_inc(&mm->mm_users);
1175  new_start_mm = mm;
1176  set_start_mm = 0;
1177  }
1178  spin_lock(&mmlist_lock);
1179  }
1180  spin_unlock(&mmlist_lock);
1181  mmput(prev_mm);
1182  mmput(start_mm);
1183  start_mm = new_start_mm;
1184  }
1185  if (retval) {
1186  unlock_page(page);
1187  page_cache_release(page);
1188  break;
1189  }
1190 
1191  /*
1192  * If a reference remains (rare), we would like to leave
1193  * the page in the swap cache; but try_to_unmap could
1194  * then re-duplicate the entry once we drop page lock,
1195  * so we might loop indefinitely; also, that page could
1196  * not be swapped out to other storage meanwhile. So:
1197  * delete from cache even if there's another reference,
1198  * after ensuring that the data has been saved to disk -
1199  * since if the reference remains (rarer), it will be
1200  * read from disk into another page. Splitting into two
1201  * pages would be incorrect if swap supported "shared
1202  * private" pages, but they are handled by tmpfs files.
1203  *
1204  * Given how unuse_vma() targets one particular offset
1205  * in an anon_vma, once the anon_vma has been determined,
1206  * this splitting happens to be just what is needed to
1207  * handle where KSM pages have been swapped out: re-reading
1208  * is unnecessarily slow, but we can fix that later on.
1209  */
1210  if (swap_count(*swap_map) &&
1211  PageDirty(page) && PageSwapCache(page)) {
1212  struct writeback_control wbc = {
1214  };
1215 
1216  swap_writepage(page, &wbc);
1217  lock_page(page);
1218  wait_on_page_writeback(page);
1219  }
1220 
1221  /*
1222  * It is conceivable that a racing task removed this page from
1223  * swap cache just before we acquired the page lock at the top,
1224  * or while we dropped it in unuse_mm(). The page might even
1225  * be back in swap cache on another swap area: that we must not
1226  * delete, since it may not have been written out to swap yet.
1227  */
1228  if (PageSwapCache(page) &&
1229  likely(page_private(page) == entry.val))
1230  delete_from_swap_cache(page);
1231 
1232  /*
1233  * So we could skip searching mms once swap count went
1234  * to 1, we did not mark any present ptes as dirty: must
1235  * mark page dirty so shrink_page_list will preserve it.
1236  */
1237  SetPageDirty(page);
1238  unlock_page(page);
1239  page_cache_release(page);
1240 
1241  /*
1242  * Make sure that we aren't completely killing
1243  * interactive performance.
1244  */
1245  cond_resched();
1246  if (frontswap && pages_to_unuse > 0) {
1247  if (!--pages_to_unuse)
1248  break;
1249  }
1250  }
1251 
1252  mmput(start_mm);
1253  return retval;
1254 }
1255 
1256 /*
1257  * After a successful try_to_unuse, if no swap is now in use, we know
1258  * we can empty the mmlist. swap_lock must be held on entry and exit.
1259  * Note that mmlist_lock nests inside swap_lock, and an mm must be
1260  * added to the mmlist just after page_duplicate - before would be racy.
1261  */
1262 static void drain_mmlist(void)
1263 {
1264  struct list_head *p, *next;
1265  unsigned int type;
1266 
1267  for (type = 0; type < nr_swapfiles; type++)
1268  if (swap_info[type]->inuse_pages)
1269  return;
1270  spin_lock(&mmlist_lock);
1271  list_for_each_safe(p, next, &init_mm.mmlist)
1272  list_del_init(p);
1273  spin_unlock(&mmlist_lock);
1274 }
1275 
1276 /*
1277  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1278  * corresponds to page offset for the specified swap entry.
1279  * Note that the type of this function is sector_t, but it returns page offset
1280  * into the bdev, not sector offset.
1281  */
1282 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1283 {
1284  struct swap_info_struct *sis;
1285  struct swap_extent *start_se;
1286  struct swap_extent *se;
1287  pgoff_t offset;
1288 
1289  sis = swap_info[swp_type(entry)];
1290  *bdev = sis->bdev;
1291 
1292  offset = swp_offset(entry);
1293  start_se = sis->curr_swap_extent;
1294  se = start_se;
1295 
1296  for ( ; ; ) {
1297  struct list_head *lh;
1298 
1299  if (se->start_page <= offset &&
1300  offset < (se->start_page + se->nr_pages)) {
1301  return se->start_block + (offset - se->start_page);
1302  }
1303  lh = se->list.next;
1304  se = list_entry(lh, struct swap_extent, list);
1305  sis->curr_swap_extent = se;
1306  BUG_ON(se == start_se); /* It *must* be present */
1307  }
1308 }
1309 
1310 /*
1311  * Returns the page offset into bdev for the specified page's swap entry.
1312  */
1313 sector_t map_swap_page(struct page *page, struct block_device **bdev)
1314 {
1316  entry.val = page_private(page);
1317  return map_swap_entry(entry, bdev);
1318 }
1319 
1320 /*
1321  * Free all of a swapdev's extent information
1322  */
1323 static void destroy_swap_extents(struct swap_info_struct *sis)
1324 {
1325  while (!list_empty(&sis->first_swap_extent.list)) {
1326  struct swap_extent *se;
1327 
1328  se = list_entry(sis->first_swap_extent.list.next,
1329  struct swap_extent, list);
1330  list_del(&se->list);
1331  kfree(se);
1332  }
1333 
1334  if (sis->flags & SWP_FILE) {
1335  struct file *swap_file = sis->swap_file;
1336  struct address_space *mapping = swap_file->f_mapping;
1337 
1338  sis->flags &= ~SWP_FILE;
1339  mapping->a_ops->swap_deactivate(swap_file);
1340  }
1341 }
1342 
1343 /*
1344  * Add a block range (and the corresponding page range) into this swapdev's
1345  * extent list. The extent list is kept sorted in page order.
1346  *
1347  * This function rather assumes that it is called in ascending page order.
1348  */
1349 int
1350 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1351  unsigned long nr_pages, sector_t start_block)
1352 {
1353  struct swap_extent *se;
1354  struct swap_extent *new_se;
1355  struct list_head *lh;
1356 
1357  if (start_page == 0) {
1358  se = &sis->first_swap_extent;
1359  sis->curr_swap_extent = se;
1360  se->start_page = 0;
1361  se->nr_pages = nr_pages;
1362  se->start_block = start_block;
1363  return 1;
1364  } else {
1365  lh = sis->first_swap_extent.list.prev; /* Highest extent */
1366  se = list_entry(lh, struct swap_extent, list);
1367  BUG_ON(se->start_page + se->nr_pages != start_page);
1368  if (se->start_block + se->nr_pages == start_block) {
1369  /* Merge it */
1370  se->nr_pages += nr_pages;
1371  return 0;
1372  }
1373  }
1374 
1375  /*
1376  * No merge. Insert a new extent, preserving ordering.
1377  */
1378  new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1379  if (new_se == NULL)
1380  return -ENOMEM;
1381  new_se->start_page = start_page;
1382  new_se->nr_pages = nr_pages;
1383  new_se->start_block = start_block;
1384 
1385  list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1386  return 1;
1387 }
1388 
1389 /*
1390  * A `swap extent' is a simple thing which maps a contiguous range of pages
1391  * onto a contiguous range of disk blocks. An ordered list of swap extents
1392  * is built at swapon time and is then used at swap_writepage/swap_readpage
1393  * time for locating where on disk a page belongs.
1394  *
1395  * If the swapfile is an S_ISBLK block device, a single extent is installed.
1396  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1397  * swap files identically.
1398  *
1399  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1400  * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
1401  * swapfiles are handled *identically* after swapon time.
1402  *
1403  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1404  * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
1405  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1406  * requirements, they are simply tossed out - we will never use those blocks
1407  * for swapping.
1408  *
1409  * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
1410  * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1411  * which will scribble on the fs.
1412  *
1413  * The amount of disk space which a single swap extent represents varies.
1414  * Typically it is in the 1-4 megabyte range. So we can have hundreds of
1415  * extents in the list. To avoid much list walking, we cache the previous
1416  * search location in `curr_swap_extent', and start new searches from there.
1417  * This is extremely effective. The average number of iterations in
1418  * map_swap_page() has been measured at about 0.3 per page. - akpm.
1419  */
1420 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1421 {
1422  struct file *swap_file = sis->swap_file;
1423  struct address_space *mapping = swap_file->f_mapping;
1424  struct inode *inode = mapping->host;
1425  int ret;
1426 
1427  if (S_ISBLK(inode->i_mode)) {
1428  ret = add_swap_extent(sis, 0, sis->max, 0);
1429  *span = sis->pages;
1430  return ret;
1431  }
1432 
1433  if (mapping->a_ops->swap_activate) {
1434  ret = mapping->a_ops->swap_activate(sis, swap_file, span);
1435  if (!ret) {
1436  sis->flags |= SWP_FILE;
1437  ret = add_swap_extent(sis, 0, sis->max, 0);
1438  *span = sis->pages;
1439  }
1440  return ret;
1441  }
1442 
1443  return generic_swapfile_activate(sis, swap_file, span);
1444 }
1445 
1446 static void enable_swap_info(struct swap_info_struct *p, int prio,
1447  unsigned char *swap_map,
1448  unsigned long *frontswap_map)
1449 {
1450  int i, prev;
1451 
1452  spin_lock(&swap_lock);
1453  if (prio >= 0)
1454  p->prio = prio;
1455  else
1456  p->prio = --least_priority;
1457  p->swap_map = swap_map;
1458  frontswap_map_set(p, frontswap_map);
1459  p->flags |= SWP_WRITEOK;
1460  nr_swap_pages += p->pages;
1461  total_swap_pages += p->pages;
1462 
1463  /* insert swap space into swap_list: */
1464  prev = -1;
1465  for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1466  if (p->prio >= swap_info[i]->prio)
1467  break;
1468  prev = i;
1469  }
1470  p->next = i;
1471  if (prev < 0)
1472  swap_list.head = swap_list.next = p->type;
1473  else
1474  swap_info[prev]->next = p->type;
1475  frontswap_init(p->type);
1476  spin_unlock(&swap_lock);
1477 }
1478 
1479 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1480 {
1481  struct swap_info_struct *p = NULL;
1482  unsigned char *swap_map;
1483  struct file *swap_file, *victim;
1484  struct address_space *mapping;
1485  struct inode *inode;
1486  struct filename *pathname;
1487  int oom_score_adj;
1488  int i, type, prev;
1489  int err;
1490 
1491  if (!capable(CAP_SYS_ADMIN))
1492  return -EPERM;
1493 
1494  BUG_ON(!current->mm);
1495 
1496  pathname = getname(specialfile);
1497  if (IS_ERR(pathname))
1498  return PTR_ERR(pathname);
1499 
1500  victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1501  err = PTR_ERR(victim);
1502  if (IS_ERR(victim))
1503  goto out;
1504 
1505  mapping = victim->f_mapping;
1506  prev = -1;
1507  spin_lock(&swap_lock);
1508  for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1509  p = swap_info[type];
1510  if (p->flags & SWP_WRITEOK) {
1511  if (p->swap_file->f_mapping == mapping)
1512  break;
1513  }
1514  prev = type;
1515  }
1516  if (type < 0) {
1517  err = -EINVAL;
1518  spin_unlock(&swap_lock);
1519  goto out_dput;
1520  }
1521  if (!security_vm_enough_memory_mm(current->mm, p->pages))
1522  vm_unacct_memory(p->pages);
1523  else {
1524  err = -ENOMEM;
1525  spin_unlock(&swap_lock);
1526  goto out_dput;
1527  }
1528  if (prev < 0)
1529  swap_list.head = p->next;
1530  else
1531  swap_info[prev]->next = p->next;
1532  if (type == swap_list.next) {
1533  /* just pick something that's safe... */
1534  swap_list.next = swap_list.head;
1535  }
1536  if (p->prio < 0) {
1537  for (i = p->next; i >= 0; i = swap_info[i]->next)
1538  swap_info[i]->prio = p->prio--;
1539  least_priority++;
1540  }
1541  nr_swap_pages -= p->pages;
1542  total_swap_pages -= p->pages;
1543  p->flags &= ~SWP_WRITEOK;
1544  spin_unlock(&swap_lock);
1545 
1546  oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1547  err = try_to_unuse(type, false, 0); /* force all pages to be unused */
1549 
1550  if (err) {
1551  /*
1552  * reading p->prio and p->swap_map outside the lock is
1553  * safe here because only sys_swapon and sys_swapoff
1554  * change them, and there can be no other sys_swapon or
1555  * sys_swapoff for this swap_info_struct at this point.
1556  */
1557  /* re-insert swap space back into swap_list */
1558  enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
1559  goto out_dput;
1560  }
1561 
1562  destroy_swap_extents(p);
1563  if (p->flags & SWP_CONTINUED)
1564  free_swap_count_continuations(p);
1565 
1566  mutex_lock(&swapon_mutex);
1567  spin_lock(&swap_lock);
1568  drain_mmlist();
1569 
1570  /* wait for anyone still in scan_swap_map */
1571  p->highest_bit = 0; /* cuts scans short */
1572  while (p->flags >= SWP_SCANNING) {
1573  spin_unlock(&swap_lock);
1575  spin_lock(&swap_lock);
1576  }
1577 
1578  swap_file = p->swap_file;
1579  p->swap_file = NULL;
1580  p->max = 0;
1581  swap_map = p->swap_map;
1582  p->swap_map = NULL;
1583  p->flags = 0;
1584  frontswap_invalidate_area(type);
1585  spin_unlock(&swap_lock);
1586  mutex_unlock(&swapon_mutex);
1587  vfree(swap_map);
1588  vfree(frontswap_map_get(p));
1589  /* Destroy swap account informatin */
1590  swap_cgroup_swapoff(type);
1591 
1592  inode = mapping->host;
1593  if (S_ISBLK(inode->i_mode)) {
1594  struct block_device *bdev = I_BDEV(inode);
1595  set_blocksize(bdev, p->old_block_size);
1597  } else {
1598  mutex_lock(&inode->i_mutex);
1599  inode->i_flags &= ~S_SWAPFILE;
1600  mutex_unlock(&inode->i_mutex);
1601  }
1602  filp_close(swap_file, NULL);
1603  err = 0;
1604  atomic_inc(&proc_poll_event);
1605  wake_up_interruptible(&proc_poll_wait);
1606 
1607 out_dput:
1608  filp_close(victim, NULL);
1609 out:
1610  putname(pathname);
1611  return err;
1612 }
1613 
1614 #ifdef CONFIG_PROC_FS
1615 static unsigned swaps_poll(struct file *file, poll_table *wait)
1616 {
1617  struct seq_file *seq = file->private_data;
1618 
1619  poll_wait(file, &proc_poll_wait, wait);
1620 
1621  if (seq->poll_event != atomic_read(&proc_poll_event)) {
1622  seq->poll_event = atomic_read(&proc_poll_event);
1623  return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1624  }
1625 
1626  return POLLIN | POLLRDNORM;
1627 }
1628 
1629 /* iterator */
1630 static void *swap_start(struct seq_file *swap, loff_t *pos)
1631 {
1632  struct swap_info_struct *si;
1633  int type;
1634  loff_t l = *pos;
1635 
1636  mutex_lock(&swapon_mutex);
1637 
1638  if (!l)
1639  return SEQ_START_TOKEN;
1640 
1641  for (type = 0; type < nr_swapfiles; type++) {
1642  smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1643  si = swap_info[type];
1644  if (!(si->flags & SWP_USED) || !si->swap_map)
1645  continue;
1646  if (!--l)
1647  return si;
1648  }
1649 
1650  return NULL;
1651 }
1652 
1653 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1654 {
1655  struct swap_info_struct *si = v;
1656  int type;
1657 
1658  if (v == SEQ_START_TOKEN)
1659  type = 0;
1660  else
1661  type = si->type + 1;
1662 
1663  for (; type < nr_swapfiles; type++) {
1664  smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1665  si = swap_info[type];
1666  if (!(si->flags & SWP_USED) || !si->swap_map)
1667  continue;
1668  ++*pos;
1669  return si;
1670  }
1671 
1672  return NULL;
1673 }
1674 
1675 static void swap_stop(struct seq_file *swap, void *v)
1676 {
1677  mutex_unlock(&swapon_mutex);
1678 }
1679 
1680 static int swap_show(struct seq_file *swap, void *v)
1681 {
1682  struct swap_info_struct *si = v;
1683  struct file *file;
1684  int len;
1685 
1686  if (si == SEQ_START_TOKEN) {
1687  seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1688  return 0;
1689  }
1690 
1691  file = si->swap_file;
1692  len = seq_path(swap, &file->f_path, " \t\n\\");
1693  seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1694  len < 40 ? 40 - len : 1, " ",
1695  S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1696  "partition" : "file\t",
1697  si->pages << (PAGE_SHIFT - 10),
1698  si->inuse_pages << (PAGE_SHIFT - 10),
1699  si->prio);
1700  return 0;
1701 }
1702 
1703 static const struct seq_operations swaps_op = {
1704  .start = swap_start,
1705  .next = swap_next,
1706  .stop = swap_stop,
1707  .show = swap_show
1708 };
1709 
1710 static int swaps_open(struct inode *inode, struct file *file)
1711 {
1712  struct seq_file *seq;
1713  int ret;
1714 
1715  ret = seq_open(file, &swaps_op);
1716  if (ret)
1717  return ret;
1718 
1719  seq = file->private_data;
1720  seq->poll_event = atomic_read(&proc_poll_event);
1721  return 0;
1722 }
1723 
1724 static const struct file_operations proc_swaps_operations = {
1725  .open = swaps_open,
1726  .read = seq_read,
1727  .llseek = seq_lseek,
1728  .release = seq_release,
1729  .poll = swaps_poll,
1730 };
1731 
1732 static int __init procswaps_init(void)
1733 {
1734  proc_create("swaps", 0, NULL, &proc_swaps_operations);
1735  return 0;
1736 }
1737 __initcall(procswaps_init);
1738 #endif /* CONFIG_PROC_FS */
1739 
1740 #ifdef MAX_SWAPFILES_CHECK
1741 static int __init max_swapfiles_check(void)
1742 {
1744  return 0;
1745 }
1746 late_initcall(max_swapfiles_check);
1747 #endif
1748 
1749 static struct swap_info_struct *alloc_swap_info(void)
1750 {
1751  struct swap_info_struct *p;
1752  unsigned int type;
1753 
1754  p = kzalloc(sizeof(*p), GFP_KERNEL);
1755  if (!p)
1756  return ERR_PTR(-ENOMEM);
1757 
1758  spin_lock(&swap_lock);
1759  for (type = 0; type < nr_swapfiles; type++) {
1760  if (!(swap_info[type]->flags & SWP_USED))
1761  break;
1762  }
1763  if (type >= MAX_SWAPFILES) {
1764  spin_unlock(&swap_lock);
1765  kfree(p);
1766  return ERR_PTR(-EPERM);
1767  }
1768  if (type >= nr_swapfiles) {
1769  p->type = type;
1770  swap_info[type] = p;
1771  /*
1772  * Write swap_info[type] before nr_swapfiles, in case a
1773  * racing procfs swap_start() or swap_next() is reading them.
1774  * (We never shrink nr_swapfiles, we never free this entry.)
1775  */
1776  smp_wmb();
1777  nr_swapfiles++;
1778  } else {
1779  kfree(p);
1780  p = swap_info[type];
1781  /*
1782  * Do not memset this entry: a racing procfs swap_next()
1783  * would be relying on p->type to remain valid.
1784  */
1785  }
1786  INIT_LIST_HEAD(&p->first_swap_extent.list);
1787  p->flags = SWP_USED;
1788  p->next = -1;
1789  spin_unlock(&swap_lock);
1790 
1791  return p;
1792 }
1793 
1794 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1795 {
1796  int error;
1797 
1798  if (S_ISBLK(inode->i_mode)) {
1799  p->bdev = bdgrab(I_BDEV(inode));
1800  error = blkdev_get(p->bdev,
1802  sys_swapon);
1803  if (error < 0) {
1804  p->bdev = NULL;
1805  return -EINVAL;
1806  }
1807  p->old_block_size = block_size(p->bdev);
1808  error = set_blocksize(p->bdev, PAGE_SIZE);
1809  if (error < 0)
1810  return error;
1811  p->flags |= SWP_BLKDEV;
1812  } else if (S_ISREG(inode->i_mode)) {
1813  p->bdev = inode->i_sb->s_bdev;
1814  mutex_lock(&inode->i_mutex);
1815  if (IS_SWAPFILE(inode))
1816  return -EBUSY;
1817  } else
1818  return -EINVAL;
1819 
1820  return 0;
1821 }
1822 
1823 static unsigned long read_swap_header(struct swap_info_struct *p,
1824  union swap_header *swap_header,
1825  struct inode *inode)
1826 {
1827  int i;
1828  unsigned long maxpages;
1829  unsigned long swapfilepages;
1830 
1831  if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1832  printk(KERN_ERR "Unable to find swap-space signature\n");
1833  return 0;
1834  }
1835 
1836  /* swap partition endianess hack... */
1837  if (swab32(swap_header->info.version) == 1) {
1838  swab32s(&swap_header->info.version);
1839  swab32s(&swap_header->info.last_page);
1840  swab32s(&swap_header->info.nr_badpages);
1841  for (i = 0; i < swap_header->info.nr_badpages; i++)
1842  swab32s(&swap_header->info.badpages[i]);
1843  }
1844  /* Check the swap header's sub-version */
1845  if (swap_header->info.version != 1) {
1847  "Unable to handle swap header version %d\n",
1848  swap_header->info.version);
1849  return 0;
1850  }
1851 
1852  p->lowest_bit = 1;
1853  p->cluster_next = 1;
1854  p->cluster_nr = 0;
1855 
1856  /*
1857  * Find out how many pages are allowed for a single swap
1858  * device. There are two limiting factors: 1) the number
1859  * of bits for the swap offset in the swp_entry_t type, and
1860  * 2) the number of bits in the swap pte as defined by the
1861  * different architectures. In order to find the
1862  * largest possible bit mask, a swap entry with swap type 0
1863  * and swap offset ~0UL is created, encoded to a swap pte,
1864  * decoded to a swp_entry_t again, and finally the swap
1865  * offset is extracted. This will mask all the bits from
1866  * the initial ~0UL mask that can't be encoded in either
1867  * the swp_entry_t or the architecture definition of a
1868  * swap pte.
1869  */
1870  maxpages = swp_offset(pte_to_swp_entry(
1871  swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
1872  if (maxpages > swap_header->info.last_page) {
1873  maxpages = swap_header->info.last_page + 1;
1874  /* p->max is an unsigned int: don't overflow it */
1875  if ((unsigned int)maxpages == 0)
1876  maxpages = UINT_MAX;
1877  }
1878  p->highest_bit = maxpages - 1;
1879 
1880  if (!maxpages)
1881  return 0;
1882  swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1883  if (swapfilepages && maxpages > swapfilepages) {
1885  "Swap area shorter than signature indicates\n");
1886  return 0;
1887  }
1888  if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1889  return 0;
1890  if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1891  return 0;
1892 
1893  return maxpages;
1894 }
1895 
1896 static int setup_swap_map_and_extents(struct swap_info_struct *p,
1897  union swap_header *swap_header,
1898  unsigned char *swap_map,
1899  unsigned long maxpages,
1900  sector_t *span)
1901 {
1902  int i;
1903  unsigned int nr_good_pages;
1904  int nr_extents;
1905 
1906  nr_good_pages = maxpages - 1; /* omit header page */
1907 
1908  for (i = 0; i < swap_header->info.nr_badpages; i++) {
1909  unsigned int page_nr = swap_header->info.badpages[i];
1910  if (page_nr == 0 || page_nr > swap_header->info.last_page)
1911  return -EINVAL;
1912  if (page_nr < maxpages) {
1913  swap_map[page_nr] = SWAP_MAP_BAD;
1914  nr_good_pages--;
1915  }
1916  }
1917 
1918  if (nr_good_pages) {
1919  swap_map[0] = SWAP_MAP_BAD;
1920  p->max = maxpages;
1921  p->pages = nr_good_pages;
1922  nr_extents = setup_swap_extents(p, span);
1923  if (nr_extents < 0)
1924  return nr_extents;
1925  nr_good_pages = p->pages;
1926  }
1927  if (!nr_good_pages) {
1928  printk(KERN_WARNING "Empty swap-file\n");
1929  return -EINVAL;
1930  }
1931 
1932  return nr_extents;
1933 }
1934 
1935 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1936 {
1937  struct swap_info_struct *p;
1938  struct filename *name;
1939  struct file *swap_file = NULL;
1940  struct address_space *mapping;
1941  int i;
1942  int prio;
1943  int error;
1944  union swap_header *swap_header;
1945  int nr_extents;
1946  sector_t span;
1947  unsigned long maxpages;
1948  unsigned char *swap_map = NULL;
1949  unsigned long *frontswap_map = NULL;
1950  struct page *page = NULL;
1951  struct inode *inode = NULL;
1952 
1953  if (swap_flags & ~SWAP_FLAGS_VALID)
1954  return -EINVAL;
1955 
1956  if (!capable(CAP_SYS_ADMIN))
1957  return -EPERM;
1958 
1959  p = alloc_swap_info();
1960  if (IS_ERR(p))
1961  return PTR_ERR(p);
1962 
1963  name = getname(specialfile);
1964  if (IS_ERR(name)) {
1965  error = PTR_ERR(name);
1966  name = NULL;
1967  goto bad_swap;
1968  }
1969  swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
1970  if (IS_ERR(swap_file)) {
1971  error = PTR_ERR(swap_file);
1972  swap_file = NULL;
1973  goto bad_swap;
1974  }
1975 
1976  p->swap_file = swap_file;
1977  mapping = swap_file->f_mapping;
1978 
1979  for (i = 0; i < nr_swapfiles; i++) {
1980  struct swap_info_struct *q = swap_info[i];
1981 
1982  if (q == p || !q->swap_file)
1983  continue;
1984  if (mapping == q->swap_file->f_mapping) {
1985  error = -EBUSY;
1986  goto bad_swap;
1987  }
1988  }
1989 
1990  inode = mapping->host;
1991  /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
1992  error = claim_swapfile(p, inode);
1993  if (unlikely(error))
1994  goto bad_swap;
1995 
1996  /*
1997  * Read the swap header.
1998  */
1999  if (!mapping->a_ops->readpage) {
2000  error = -EINVAL;
2001  goto bad_swap;
2002  }
2003  page = read_mapping_page(mapping, 0, swap_file);
2004  if (IS_ERR(page)) {
2005  error = PTR_ERR(page);
2006  goto bad_swap;
2007  }
2008  swap_header = kmap(page);
2009 
2010  maxpages = read_swap_header(p, swap_header, inode);
2011  if (unlikely(!maxpages)) {
2012  error = -EINVAL;
2013  goto bad_swap;
2014  }
2015 
2016  /* OK, set up the swap map and apply the bad block list */
2017  swap_map = vzalloc(maxpages);
2018  if (!swap_map) {
2019  error = -ENOMEM;
2020  goto bad_swap;
2021  }
2022 
2023  error = swap_cgroup_swapon(p->type, maxpages);
2024  if (error)
2025  goto bad_swap;
2026 
2027  nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2028  maxpages, &span);
2029  if (unlikely(nr_extents < 0)) {
2030  error = nr_extents;
2031  goto bad_swap;
2032  }
2033  /* frontswap enabled? set up bit-per-page map for frontswap */
2034  if (frontswap_enabled)
2035  frontswap_map = vzalloc(maxpages / sizeof(long));
2036 
2037  if (p->bdev) {
2038  if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2039  p->flags |= SWP_SOLIDSTATE;
2040  p->cluster_next = 1 + (random32() % p->highest_bit);
2041  }
2042  if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
2043  p->flags |= SWP_DISCARDABLE;
2044  }
2045 
2046  mutex_lock(&swapon_mutex);
2047  prio = -1;
2048  if (swap_flags & SWAP_FLAG_PREFER)
2049  prio =
2050  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2051  enable_swap_info(p, prio, swap_map, frontswap_map);
2052 
2053  printk(KERN_INFO "Adding %uk swap on %s. "
2054  "Priority:%d extents:%d across:%lluk %s%s%s\n",
2055  p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2056  nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2057  (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2058  (p->flags & SWP_DISCARDABLE) ? "D" : "",
2059  (frontswap_map) ? "FS" : "");
2060 
2061  mutex_unlock(&swapon_mutex);
2062  atomic_inc(&proc_poll_event);
2063  wake_up_interruptible(&proc_poll_wait);
2064 
2065  if (S_ISREG(inode->i_mode))
2066  inode->i_flags |= S_SWAPFILE;
2067  error = 0;
2068  goto out;
2069 bad_swap:
2070  if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2071  set_blocksize(p->bdev, p->old_block_size);
2073  }
2074  destroy_swap_extents(p);
2075  swap_cgroup_swapoff(p->type);
2076  spin_lock(&swap_lock);
2077  p->swap_file = NULL;
2078  p->flags = 0;
2079  spin_unlock(&swap_lock);
2080  vfree(swap_map);
2081  if (swap_file) {
2082  if (inode && S_ISREG(inode->i_mode)) {
2083  mutex_unlock(&inode->i_mutex);
2084  inode = NULL;
2085  }
2086  filp_close(swap_file, NULL);
2087  }
2088 out:
2089  if (page && !IS_ERR(page)) {
2090  kunmap(page);
2091  page_cache_release(page);
2092  }
2093  if (name)
2094  putname(name);
2095  if (inode && S_ISREG(inode->i_mode))
2096  mutex_unlock(&inode->i_mutex);
2097  return error;
2098 }
2099 
2100 void si_swapinfo(struct sysinfo *val)
2101 {
2102  unsigned int type;
2103  unsigned long nr_to_be_unused = 0;
2104 
2105  spin_lock(&swap_lock);
2106  for (type = 0; type < nr_swapfiles; type++) {
2107  struct swap_info_struct *si = swap_info[type];
2108 
2109  if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2110  nr_to_be_unused += si->inuse_pages;
2111  }
2112  val->freeswap = nr_swap_pages + nr_to_be_unused;
2113  val->totalswap = total_swap_pages + nr_to_be_unused;
2114  spin_unlock(&swap_lock);
2115 }
2116 
2117 /*
2118  * Verify that a swap entry is valid and increment its swap map count.
2119  *
2120  * Returns error code in following case.
2121  * - success -> 0
2122  * - swp_entry is invalid -> EINVAL
2123  * - swp_entry is migration entry -> EINVAL
2124  * - swap-cache reference is requested but there is already one. -> EEXIST
2125  * - swap-cache reference is requested but the entry is not used. -> ENOENT
2126  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2127  */
2128 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2129 {
2130  struct swap_info_struct *p;
2131  unsigned long offset, type;
2132  unsigned char count;
2133  unsigned char has_cache;
2134  int err = -EINVAL;
2135 
2136  if (non_swap_entry(entry))
2137  goto out;
2138 
2139  type = swp_type(entry);
2140  if (type >= nr_swapfiles)
2141  goto bad_file;
2142  p = swap_info[type];
2143  offset = swp_offset(entry);
2144 
2145  spin_lock(&swap_lock);
2146  if (unlikely(offset >= p->max))
2147  goto unlock_out;
2148 
2149  count = p->swap_map[offset];
2150  has_cache = count & SWAP_HAS_CACHE;
2151  count &= ~SWAP_HAS_CACHE;
2152  err = 0;
2153 
2154  if (usage == SWAP_HAS_CACHE) {
2155 
2156  /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2157  if (!has_cache && count)
2158  has_cache = SWAP_HAS_CACHE;
2159  else if (has_cache) /* someone else added cache */
2160  err = -EEXIST;
2161  else /* no users remaining */
2162  err = -ENOENT;
2163 
2164  } else if (count || has_cache) {
2165 
2166  if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2167  count += usage;
2168  else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2169  err = -EINVAL;
2170  else if (swap_count_continued(p, offset, count))
2171  count = COUNT_CONTINUED;
2172  else
2173  err = -ENOMEM;
2174  } else
2175  err = -ENOENT; /* unused swap entry */
2176 
2177  p->swap_map[offset] = count | has_cache;
2178 
2179 unlock_out:
2180  spin_unlock(&swap_lock);
2181 out:
2182  return err;
2183 
2184 bad_file:
2185  printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2186  goto out;
2187 }
2188 
2189 /*
2190  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2191  * (in which case its reference count is never incremented).
2192  */
2194 {
2195  __swap_duplicate(entry, SWAP_MAP_SHMEM);
2196 }
2197 
2198 /*
2199  * Increase reference count of swap entry by 1.
2200  * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2201  * but could not be atomically allocated. Returns 0, just as if it succeeded,
2202  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2203  * might occur if a page table entry has got corrupted.
2204  */
2206 {
2207  int err = 0;
2208 
2209  while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2211  return err;
2212 }
2213 
2214 /*
2215  * @entry: swap entry for which we allocate swap cache.
2216  *
2217  * Called when allocating swap cache for existing swap entry,
2218  * This can return error codes. Returns 0 at success.
2219  * -EBUSY means there is a swap cache.
2220  * Note: return code is different from swap_duplicate().
2221  */
2223 {
2224  return __swap_duplicate(entry, SWAP_HAS_CACHE);
2225 }
2226 
2227 struct swap_info_struct *page_swap_info(struct page *page)
2228 {
2229  swp_entry_t swap = { .val = page_private(page) };
2230  BUG_ON(!PageSwapCache(page));
2231  return swap_info[swp_type(swap)];
2232 }
2233 
2234 /*
2235  * out-of-line __page_file_ methods to avoid include hell.
2236  */
2237 struct address_space *__page_file_mapping(struct page *page)
2238 {
2239  VM_BUG_ON(!PageSwapCache(page));
2240  return page_swap_info(page)->swap_file->f_mapping;
2241 }
2243 
2244 pgoff_t __page_file_index(struct page *page)
2245 {
2246  swp_entry_t swap = { .val = page_private(page) };
2247  VM_BUG_ON(!PageSwapCache(page));
2248  return swp_offset(swap);
2249 }
2251 
2252 /*
2253  * add_swap_count_continuation - called when a swap count is duplicated
2254  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2255  * page of the original vmalloc'ed swap_map, to hold the continuation count
2256  * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
2257  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2258  *
2259  * These continuation pages are seldom referenced: the common paths all work
2260  * on the original swap_map, only referring to a continuation page when the
2261  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2262  *
2263  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2264  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2265  * can be called after dropping locks.
2266  */
2268 {
2269  struct swap_info_struct *si;
2270  struct page *head;
2271  struct page *page;
2272  struct page *list_page;
2273  pgoff_t offset;
2274  unsigned char count;
2275 
2276  /*
2277  * When debugging, it's easier to use __GFP_ZERO here; but it's better
2278  * for latency not to zero a page while GFP_ATOMIC and holding locks.
2279  */
2280  page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2281 
2282  si = swap_info_get(entry);
2283  if (!si) {
2284  /*
2285  * An acceptable race has occurred since the failing
2286  * __swap_duplicate(): the swap entry has been freed,
2287  * perhaps even the whole swap_map cleared for swapoff.
2288  */
2289  goto outer;
2290  }
2291 
2292  offset = swp_offset(entry);
2293  count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2294 
2295  if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2296  /*
2297  * The higher the swap count, the more likely it is that tasks
2298  * will race to add swap count continuation: we need to avoid
2299  * over-provisioning.
2300  */
2301  goto out;
2302  }
2303 
2304  if (!page) {
2305  spin_unlock(&swap_lock);
2306  return -ENOMEM;
2307  }
2308 
2309  /*
2310  * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2311  * no architecture is using highmem pages for kernel pagetables: so it
2312  * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2313  */
2314  head = vmalloc_to_page(si->swap_map + offset);
2315  offset &= ~PAGE_MASK;
2316 
2317  /*
2318  * Page allocation does not initialize the page's lru field,
2319  * but it does always reset its private field.
2320  */
2321  if (!page_private(head)) {
2322  BUG_ON(count & COUNT_CONTINUED);
2323  INIT_LIST_HEAD(&head->lru);
2324  set_page_private(head, SWP_CONTINUED);
2325  si->flags |= SWP_CONTINUED;
2326  }
2327 
2328  list_for_each_entry(list_page, &head->lru, lru) {
2329  unsigned char *map;
2330 
2331  /*
2332  * If the previous map said no continuation, but we've found
2333  * a continuation page, free our allocation and use this one.
2334  */
2335  if (!(count & COUNT_CONTINUED))
2336  goto out;
2337 
2338  map = kmap_atomic(list_page) + offset;
2339  count = *map;
2340  kunmap_atomic(map);
2341 
2342  /*
2343  * If this continuation count now has some space in it,
2344  * free our allocation and use this one.
2345  */
2346  if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2347  goto out;
2348  }
2349 
2350  list_add_tail(&page->lru, &head->lru);
2351  page = NULL; /* now it's attached, don't free it */
2352 out:
2353  spin_unlock(&swap_lock);
2354 outer:
2355  if (page)
2356  __free_page(page);
2357  return 0;
2358 }
2359 
2360 /*
2361  * swap_count_continued - when the original swap_map count is incremented
2362  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2363  * into, carry if so, or else fail until a new continuation page is allocated;
2364  * when the original swap_map count is decremented from 0 with continuation,
2365  * borrow from the continuation and report whether it still holds more.
2366  * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2367  */
2368 static bool swap_count_continued(struct swap_info_struct *si,
2369  pgoff_t offset, unsigned char count)
2370 {
2371  struct page *head;
2372  struct page *page;
2373  unsigned char *map;
2374 
2375  head = vmalloc_to_page(si->swap_map + offset);
2376  if (page_private(head) != SWP_CONTINUED) {
2377  BUG_ON(count & COUNT_CONTINUED);
2378  return false; /* need to add count continuation */
2379  }
2380 
2381  offset &= ~PAGE_MASK;
2382  page = list_entry(head->lru.next, struct page, lru);
2383  map = kmap_atomic(page) + offset;
2384 
2385  if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2386  goto init_map; /* jump over SWAP_CONT_MAX checks */
2387 
2388  if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2389  /*
2390  * Think of how you add 1 to 999
2391  */
2392  while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2393  kunmap_atomic(map);
2394  page = list_entry(page->lru.next, struct page, lru);
2395  BUG_ON(page == head);
2396  map = kmap_atomic(page) + offset;
2397  }
2398  if (*map == SWAP_CONT_MAX) {
2399  kunmap_atomic(map);
2400  page = list_entry(page->lru.next, struct page, lru);
2401  if (page == head)
2402  return false; /* add count continuation */
2403  map = kmap_atomic(page) + offset;
2404 init_map: *map = 0; /* we didn't zero the page */
2405  }
2406  *map += 1;
2407  kunmap_atomic(map);
2408  page = list_entry(page->lru.prev, struct page, lru);
2409  while (page != head) {
2410  map = kmap_atomic(page) + offset;
2411  *map = COUNT_CONTINUED;
2412  kunmap_atomic(map);
2413  page = list_entry(page->lru.prev, struct page, lru);
2414  }
2415  return true; /* incremented */
2416 
2417  } else { /* decrementing */
2418  /*
2419  * Think of how you subtract 1 from 1000
2420  */
2421  BUG_ON(count != COUNT_CONTINUED);
2422  while (*map == COUNT_CONTINUED) {
2423  kunmap_atomic(map);
2424  page = list_entry(page->lru.next, struct page, lru);
2425  BUG_ON(page == head);
2426  map = kmap_atomic(page) + offset;
2427  }
2428  BUG_ON(*map == 0);
2429  *map -= 1;
2430  if (*map == 0)
2431  count = 0;
2432  kunmap_atomic(map);
2433  page = list_entry(page->lru.prev, struct page, lru);
2434  while (page != head) {
2435  map = kmap_atomic(page) + offset;
2436  *map = SWAP_CONT_MAX | count;
2437  count = COUNT_CONTINUED;
2438  kunmap_atomic(map);
2439  page = list_entry(page->lru.prev, struct page, lru);
2440  }
2441  return count == COUNT_CONTINUED;
2442  }
2443 }
2444 
2445 /*
2446  * free_swap_count_continuations - swapoff free all the continuation pages
2447  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2448  */
2449 static void free_swap_count_continuations(struct swap_info_struct *si)
2450 {
2451  pgoff_t offset;
2452 
2453  for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2454  struct page *head;
2455  head = vmalloc_to_page(si->swap_map + offset);
2456  if (page_private(head)) {
2457  struct list_head *this, *next;
2458  list_for_each_safe(this, next, &head->lru) {
2459  struct page *page;
2460  page = list_entry(this, struct page, lru);
2461  list_del(this);
2462  __free_page(page);
2463  }
2464  }
2465  }
2466 }