17 #include <linux/sched.h>
20 #include <linux/mman.h>
24 #include <linux/export.h>
47 static void __page_cache_release(
struct page *
page)
58 del_page_from_lru_list(page, lruvec, page_off_lru(page));
59 spin_unlock_irqrestore(&zone->
lru_lock, flags);
63 static void __put_single_page(
struct page *page)
65 __page_cache_release(page);
69 static void __put_compound_page(
struct page *page)
71 compound_page_dtor *dtor;
73 __page_cache_release(page);
74 dtor = get_compound_page_dtor(page);
78 static void put_compound_page(
struct page *page)
84 if (
likely(page != page_head &&
85 get_page_unless_zero(page_head))) {
96 if (PageSlab(page_head)) {
98 if (put_page_testzero(page_head))
112 flags = compound_lock_irqsave(page_head);
115 compound_unlock_irqrestore(page_head, flags);
117 if (put_page_testzero(page_head))
118 __put_single_page(page_head);
120 if (put_page_testzero(page))
121 __put_single_page(page);
131 if (put_page_testzero(page_head))
138 compound_unlock_irqrestore(page_head, flags);
141 if (put_page_testzero(page_head)) {
142 if (PageHead(page_head))
143 __put_compound_page(page_head);
145 __put_single_page(page_head);
152 }
else if (put_page_testzero(page)) {
154 __put_compound_page(page);
156 __put_single_page(page);
163 put_compound_page(page);
164 else if (put_page_testzero(page))
165 __put_single_page(page);
187 if (
likely(page != page_head && get_page_unless_zero(page_head))) {
190 if (PageSlab(page_head)) {
191 if (
likely(PageTail(page))) {
192 __get_page_tail_foll(page,
false);
206 flags = compound_lock_irqsave(page_head);
208 if (
likely(PageTail(page))) {
209 __get_page_tail_foll(page,
false);
212 compound_unlock_irqrestore(page_head, flags);
229 while (!list_empty(pages)) {
257 for (seg = 0; seg < nr_segs; seg++) {
261 pages[
seg] = kmap_to_page(kiov[seg].iov_base);
282 const struct kvec kiov = {
291 static void pagevec_lru_move_fn(
struct pagevec *pvec,
292 void (*move_fn)(
struct page *page,
struct lruvec *lruvec,
void *
arg),
296 struct zone *zone =
NULL;
297 struct lruvec *lruvec;
298 unsigned long flags = 0;
300 for (i = 0; i < pagevec_count(pvec); i++) {
301 struct page *page = pvec->
pages[
i];
302 struct zone *pagezone = page_zone(page);
304 if (pagezone != zone) {
306 spin_unlock_irqrestore(&zone->
lru_lock, flags);
315 spin_unlock_irqrestore(&zone->
lru_lock, flags);
317 pagevec_reinit(pvec);
320 static void pagevec_move_tail_fn(
struct page *page,
struct lruvec *lruvec,
325 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
327 list_move_tail(&page->
lru, &lruvec->
lists[lru]);
336 static void pagevec_move_tail(
struct pagevec *pvec)
340 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
351 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
352 !PageUnevictable(page) && PageLRU(page)) {
359 if (!pagevec_add(pvec, page))
360 pagevec_move_tail(pvec);
365 static void update_page_reclaim_stat(
struct lruvec *lruvec,
366 int file,
int rotated)
375 static void __activate_page(
struct page *page,
struct lruvec *lruvec,
378 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
379 int file = page_is_file_cache(page);
380 int lru = page_lru_base_type(page);
382 del_page_from_lru_list(page, lruvec, lru);
385 add_page_to_lru_list(page, lruvec, lru);
388 update_page_reclaim_stat(lruvec, file, 1);
395 static void activate_page_drain(
int cpu)
399 if (pagevec_count(pvec))
400 pagevec_lru_move_fn(pvec, __activate_page,
NULL);
405 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
409 if (!pagevec_add(pvec, page))
410 pagevec_lru_move_fn(pvec, __activate_page,
NULL);
416 static inline void activate_page_drain(
int cpu)
422 struct zone *zone = page_zone(page);
439 if (!PageActive(page) && !PageUnevictable(page) &&
440 PageReferenced(page) && PageLRU(page)) {
442 ClearPageReferenced(page);
443 }
else if (!PageReferenced(page)) {
444 SetPageReferenced(page);
462 if (!pagevec_space(pvec))
464 pagevec_add(pvec, page);
476 if (PageActive(page)) {
478 ClearPageActive(page);
479 }
else if (PageUnevictable(page)) {
481 ClearPageUnevictable(page);
484 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
500 struct zone *zone = page_zone(page);
501 struct lruvec *lruvec;
505 SetPageUnevictable(page);
532 static void lru_deactivate_fn(
struct page *page,
struct lruvec *lruvec,
541 if (PageUnevictable(page))
545 if (page_mapped(page))
548 active = PageActive(page);
549 file = page_is_file_cache(page);
550 lru = page_lru_base_type(page);
552 del_page_from_lru_list(page, lruvec, lru + active);
553 ClearPageActive(page);
554 ClearPageReferenced(page);
555 add_page_to_lru_list(page, lruvec, lru);
557 if (PageWriteback(page) || PageDirty(page)) {
563 SetPageReclaim(page);
569 list_move_tail(&page->
lru, &lruvec->
lists[lru]);
575 update_page_reclaim_stat(lruvec, file, 0);
591 if (pagevec_count(pvec))
595 pvec = &
per_cpu(lru_rotate_pvecs, cpu);
596 if (pagevec_count(pvec)) {
601 pagevec_move_tail(pvec);
605 pvec = &
per_cpu(lru_deactivate_pvecs, cpu);
606 if (pagevec_count(pvec))
607 pagevec_lru_move_fn(pvec, lru_deactivate_fn,
NULL);
609 activate_page_drain(cpu);
626 if (PageUnevictable(page))
629 if (
likely(get_page_unless_zero(page))) {
632 if (!pagevec_add(pvec, page))
633 pagevec_lru_move_fn(pvec, lru_deactivate_fn,
NULL);
674 struct zone *zone =
NULL;
675 struct lruvec *lruvec;
678 for (i = 0; i <
nr; i++) {
679 struct page *page = pages[
i];
683 spin_unlock_irqrestore(&zone->
lru_lock, flags);
686 put_compound_page(page);
690 if (!put_page_testzero(page))
694 struct zone *pagezone = page_zone(page);
696 if (pagezone != zone) {
698 spin_unlock_irqrestore(&zone->
lru_lock,
706 __ClearPageLRU(page);
707 del_page_from_lru_list(page, lruvec, page_off_lru(page));
710 list_add(&page->
lru, &pages_to_free);
713 spin_unlock_irqrestore(&zone->
lru_lock, flags);
733 pagevec_reinit(pvec);
737 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
739 void lru_add_page_tail(
struct page *page,
struct page *page_tail,
740 struct lruvec *lruvec)
750 !spin_is_locked(&lruvec_zone(lruvec)->
lru_lock));
752 SetPageLRU(page_tail);
754 if (page_evictable(page_tail)) {
755 if (PageActive(page)) {
756 SetPageActive(page_tail);
764 SetPageUnevictable(page_tail);
768 if (
likely(PageLRU(page)))
779 add_page_to_lru_list(page_tail, lruvec, lru);
780 list_head = page_tail->
lru.prev;
781 list_move_tail(&page_tail->
lru, list_head);
784 if (!PageUnevictable(page))
785 update_page_reclaim_stat(lruvec, file, active);
789 static void __pagevec_lru_add_fn(
struct page *page,
struct lruvec *lruvec,
793 int file = is_file_lru(lru);
794 int active = is_active_lru(lru);
803 add_page_to_lru_list(page, lruvec, lru);
804 update_page_reclaim_stat(lruvec, file, active);
815 pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (
void *)lru);
836 pgoff_t start,
unsigned nr_pages)
839 return pagevec_count(pvec);
847 nr_pages, pvec->
pages);
848 return pagevec_count(pvec);
857 unsigned long megs = totalram_pages >> (20 -
PAGE_SHIFT);