13 #include <linux/slab.h>
34 _enter(
"{%lu},%u,%d,{%p,%u}",
42 _debug(
"--- monitor %p %lx ---", page, page->
flags);
44 if (!PageUptodate(page) && !PageError(page)) {
46 _debug(
"page probably truncated");
62 fscache_enqueue_retrieval(monitor->
op);
76 struct address_space *bmapping =
object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->
back_page, *backpage2;
80 kenter(
"{ino=%lx},{%lx,%lx}",
81 object->
backer->d_inode->i_ino,
85 if (backpage->
mapping != bmapping) {
86 kleave(
" = -ENODATA [mapping]");
92 kleave(
" = -ENODATA [gone]");
96 if (backpage != backpage2) {
98 kleave(
" = -ENODATA [different]");
106 INIT_LIST_HEAD(&monitor->
op_link);
109 if (trylock_page(backpage)) {
111 if (PageError(backpage))
114 if (PageUptodate(backpage))
118 ret = bmapping->
a_ops->readpage(
NULL, backpage);
126 if (trylock_page(backpage)) {
127 _debug(
"jumpstart %p {%lx}", backpage, backpage->
flags);
132 kleave(
" = -EINPROGRESS");
167 while (!list_empty(&op->
to_do)) {
183 }
else if (!PageError(monitor->
back_page)) {
185 error = cachefiles_read_reissue(
object, monitor);
192 "Readpage failed on backing file %lx",
193 (
unsigned long) monitor->
back_page->flags);
199 fscache_end_io(op, monitor->
netfs_page, error);
201 fscache_put_retrieval(op);
207 if (max < 0 || need_resched()) {
208 if (!list_empty(&op->
to_do))
209 fscache_enqueue_retrieval(op);
227 struct page *netpage,
232 struct page *newpage, *backpage;
237 pagevec_reinit(pagevec);
239 _debug(
"read back %p{%lu,%d}",
240 netpage, netpage->
index, page_count(netpage));
242 monitor = kzalloc(
sizeof(*monitor),
GFP_KERNEL);
247 monitor->
op = fscache_get_retrieval(op);
249 init_waitqueue_func_entry(&monitor->
monitor, cachefiles_read_waiter);
252 bmapping =
object->backer->d_inode->i_mapping;
258 goto backing_page_already_present;
261 newpage = page_cache_alloc_cold(bmapping);
266 ret = add_to_page_cache(newpage, bmapping,
269 goto installed_new_backing_page;
276 installed_new_backing_page:
277 _debug(
"- new %p", newpage);
283 pagevec_add(pagevec, backpage);
284 __pagevec_lru_add_file(pagevec);
287 ret = bmapping->
a_ops->readpage(
NULL, backpage);
292 monitor_backing_page:
306 if (trylock_page(backpage)) {
307 _debug(
"jumpstart %p {%lx}", backpage, backpage->
flags);
314 backing_page_already_present:
322 if (PageError(backpage))
325 if (PageUptodate(backpage))
326 goto backing_page_already_uptodate;
328 if (!trylock_page(backpage))
329 goto monitor_backing_page;
330 _debug(
"read %p {%lx}", backpage, backpage->
flags);
331 goto read_backing_page;
335 backing_page_already_uptodate:
338 pagevec_add(pagevec, netpage);
341 copy_highpage(netpage, backpage);
342 fscache_end_io(op, netpage, 0);
352 fscache_put_retrieval(monitor->
op);
359 _debug(
"read error %d", ret);
370 fscache_put_retrieval(monitor->
op);
397 struct pagevec pagevec;
413 inode =
object->backer->d_inode;
426 op->
op.processor = cachefiles_read_copier;
428 pagevec_init(&pagevec, 0);
436 block0 = page->
index;
441 (
unsigned long long) block0,
442 (
unsigned long long) block);
447 ret = cachefiles_read_backing_file_one(
object, op, page,
451 pagevec_add(&pagevec, page);
469 struct pagevec *mark_pvec)
472 struct address_space *bmapping =
object->backer->d_inode->i_mapping;
473 struct pagevec lru_pvec;
474 struct page *newpage =
NULL, *netpage, *_n, *backpage =
NULL;
479 pagevec_init(&lru_pvec, 0);
484 _debug(
"read back %p{%lu,%d}",
485 netpage, netpage->index, page_count(netpage));
488 monitor = kzalloc(
sizeof(*monitor),
GFP_KERNEL);
492 monitor->
op = fscache_get_retrieval(op);
493 init_waitqueue_func_entry(&monitor->
monitor,
494 cachefiles_read_waiter);
500 goto backing_page_already_present;
503 newpage = page_cache_alloc_cold(bmapping);
508 ret = add_to_page_cache(newpage, bmapping,
511 goto installed_new_backing_page;
518 installed_new_backing_page:
519 _debug(
"- new %p", newpage);
525 if (!pagevec_add(&lru_pvec, backpage))
526 __pagevec_lru_add_file(&lru_pvec);
529 ret = bmapping->
a_ops->readpage(
NULL, backpage);
535 monitor_backing_page:
538 ret = add_to_page_cache(netpage, op->
mapping, netpage->index,
549 if (!pagevec_add(&lru_pvec, netpage))
550 __pagevec_lru_add_file(&lru_pvec);
565 if (trylock_page(backpage)) {
566 _debug(
"2unlock %p {%lx}", backpage, backpage->flags);
579 backing_page_already_present:
580 _debug(
"- present %p", backpage);
582 if (PageError(backpage))
585 if (PageUptodate(backpage))
586 goto backing_page_already_uptodate;
588 _debug(
"- not ready %p{%lx}", backpage, backpage->flags);
590 if (!trylock_page(backpage))
591 goto monitor_backing_page;
593 if (PageError(backpage)) {
594 _debug(
"error %lx", backpage->flags);
599 if (PageUptodate(backpage))
600 goto backing_page_already_uptodate_unlock;
604 goto reread_backing_page;
608 backing_page_already_uptodate_unlock:
609 _debug(
"uptodate %lx", backpage->flags);
611 backing_page_already_uptodate:
614 ret = add_to_page_cache(netpage, op->
mapping, netpage->index,
624 copy_highpage(netpage, backpage);
629 if (!pagevec_add(mark_pvec, netpage))
633 if (!pagevec_add(&lru_pvec, netpage))
634 __pagevec_lru_add_file(&lru_pvec);
636 fscache_end_io(op, netpage, 0);
648 pagevec_lru_add_file(&lru_pvec);
657 fscache_put_retrieval(op);
675 _debug(
"read error %d", ret);
696 struct pagevec pagevec;
698 struct page *
page, *_n;
699 unsigned shift, nrbackpages;
700 int ret, ret2, space;
707 _enter(
"{OBJ%x,%d},,%d,,",
718 inode =
object->backer->d_inode;
729 pagevec_init(&pagevec, 0);
733 op->
op.processor = cachefiles_read_copier;
735 INIT_LIST_HEAD(&backpages);
748 block0 = page->
index;
754 (
unsigned long long) block0,
755 (
unsigned long long) block);
760 list_move(&page->
lru, &backpages);
763 }
else if (space && pagevec_add(&pagevec, page) == 0) {
769 if (pagevec_count(&pagevec) > 0)
772 if (list_empty(pages))
777 if (nrbackpages > 0) {
778 ret2 = cachefiles_read_backing_file(
object, op, &backpages,
784 if (pagevec_count(&pagevec) > 0)
788 ret, *nr_pages, list_empty(pages) ?
" empty" :
"");
809 struct pagevec pagevec;
821 pagevec_init(&pagevec, 0);
822 pagevec_add(&pagevec, page);
850 struct pagevec pagevec;
859 _enter(
"%p,,,%d,",
object, *nr_pages);
863 pagevec_init(&pagevec, 0);
866 if (pagevec_add(&pagevec, page) == 0)
870 if (pagevec_count(&pagevec) > 0)
906 _enter(
"%p,%p{%lx},,,",
object, page, page->
index);
921 path.
dentry =
object->backer;
927 if (file->
f_op->write) {
932 eof = object->
fscache.store_limit_l;
937 _debug(
"cut short %llx to %llx",
947 ret = file->
f_op->write(
948 file, (
const void __user *) data, len, &pos);
960 object,
"Write page to backing file failed");
983 spin_unlock(&object->
fscache.cookie->lock);