10 #include <linux/kernel.h>
14 #include <linux/export.h>
35 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
47 if (page_has_private(page)) {
48 if (!trylock_page(page))
66 while (!list_empty(pages)) {
69 read_cache_pages_invalidate_page(mapping, victim);
89 while (!list_empty(pages)) {
94 read_cache_pages_invalidate_page(mapping, page);
101 read_cache_pages_invalidate_pages(mapping, pages);
112 struct list_head *pages,
unsigned nr_pages)
120 if (mapping->
a_ops->readpages) {
121 ret = mapping->
a_ops->readpages(filp, mapping, pages, nr_pages);
127 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
132 mapping->
a_ops->readpage(filp, page);
155 unsigned long lookahead_size)
159 unsigned long end_index;
163 loff_t isize = i_size_read(inode);
173 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
176 if (page_offset > end_index)
185 page = page_cache_alloc_readahead(mapping);
189 list_add(&page->
lru, &page_pool);
190 if (page_idx == nr_to_read - lookahead_size)
191 SetPageReadahead(page);
201 read_pages(mapping, filp, &page_pool, ret);
202 BUG_ON(!list_empty(&page_pool));
212 pgoff_t offset,
unsigned long nr_to_read)
225 if (this_chunk > nr_to_read)
226 this_chunk = nr_to_read;
227 err = __do_page_cache_readahead(mapping, filp,
228 offset, this_chunk, 0);
234 offset += this_chunk;
235 nr_to_read -= this_chunk;
258 actual = __do_page_cache_readahead(mapping, filp,
270 static unsigned long get_init_ra_size(
unsigned long size,
unsigned long max)
274 if (newsize <= max / 32)
275 newsize = newsize * 4;
276 else if (newsize <= max / 4)
277 newsize = newsize * 2;
292 unsigned long newsize;
299 return min(newsize, max);
349 pgoff_t offset,
unsigned long max)
357 return offset - 1 -
head;
363 static int try_context_readahead(
struct address_space *mapping,
366 unsigned long req_size,
371 size = count_history_pages(mapping, ra, offset, max);
388 ra->
size = get_init_ra_size(size + req_size, max);
400 bool hit_readahead_marker,
pgoff_t offset,
401 unsigned long req_size)
409 goto initial_readahead;
418 ra->
size = get_next_ra_size(ra, max);
429 if (hit_readahead_marker) {
436 if (!start || start - offset > max)
441 ra->
size += req_size;
442 ra->
size = get_next_ra_size(ra, max);
451 goto initial_readahead;
457 goto initial_readahead;
463 if (try_context_readahead(mapping, ra, offset, req_size, max))
470 return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
474 ra->
size = get_init_ra_size(req_size, max);
507 pgoff_t offset,
unsigned long req_size)
520 ondemand_readahead(mapping, ra, filp,
false, offset, req_size);
542 struct page *page,
pgoff_t offset,
543 unsigned long req_size)
552 if (PageWriteback(page))
555 ClearPageReadahead(page);
564 ondemand_readahead(mapping, ra, filp,
true, offset, req_size);
572 if (!mapping || !mapping->
a_ops || !mapping->
a_ops->readpage)
591 unsigned long len = end - start + 1;
592 ret = do_readahead(mapping, f.
file, start, len);
598 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
601 return SYSC_readahead((
int) fd, offset, (
size_t) count);