22 #include <asm/pgtable.h>
44 .backing_dev_info = &swap_backing_dev_info,
47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
58 printk(
"%lu pages in swap cache\n", total_swapcache_pages);
59 printk(
"Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info.add_total, swap_cache_info.del_total,
61 swap_cache_info.find_success, swap_cache_info.find_total);
79 SetPageSwapCache(page);
80 set_page_private(page, entry.
val);
85 total_swapcache_pages++;
89 spin_unlock_irq(&swapper_space.
tree_lock);
98 set_page_private(page, 0
UL);
99 ClearPageSwapCache(page);
113 error = __add_to_swap_cache(page, entry);
114 radix_tree_preload_end();
130 set_page_private(page, 0);
131 ClearPageSwapCache(page);
132 total_swapcache_pages--;
199 entry.
val = page_private(page);
203 spin_unlock_irq(&swapper_space.
tree_lock);
217 static inline void free_swap_cache(
struct page *page)
219 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
231 free_swap_cache(page);
241 struct page **pagep =
pages;
248 for (i = 0; i <
todo; i++)
249 free_swap_cache(pagep[i]);
284 struct page *found_page, *new_page =
NULL;
318 radix_tree_preload_end();
322 radix_tree_preload_end();
327 __set_page_locked(new_page);
328 SetPageSwapBacked(new_page);
329 err = __add_to_swap_cache(new_page, entry);
331 radix_tree_preload_end();
335 lru_cache_add_anon(new_page);
339 radix_tree_preload_end();
340 ClearPageSwapBacked(new_page);
341 __clear_page_locked(new_page);
377 unsigned long offset = swp_offset(entry);
383 start_offset = offset & ~mask;
384 end_offset = offset |
mask;
389 for (offset = start_offset; offset <=
end_offset ; offset++) {
392 gfp_mask, vma, addr);