9 #include <linux/types.h>
10 #include <linux/slab.h>
19 #include <linux/nfs_fs.h>
23 #include <linux/export.h>
25 #include <asm/uaccess.h>
34 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
36 #define MIN_POOL_WRITE (32)
37 #define MIN_POOL_COMMIT (4)
42 static void nfs_redirty_request(
struct nfs_page *
req);
59 INIT_LIST_HEAD(&p->
pages);
79 INIT_LIST_HEAD(&hdr->
pages);
90 unsigned int pagecount)
106 if (data != prealloc)
127 if (wdata->
pages.pagevec != wdata->
pages.page_array)
129 if (wdata != &write_header->
rpc_data)
150 if (PagePrivate(page))
151 req = (
struct nfs_page *)page_private(page);
152 else if (
unlikely(PageSwapCache(page))) {
170 static struct nfs_page *nfs_page_find_request(
struct page *page)
172 struct inode *
inode = page_file_mapping(page)->host;
175 spin_lock(&inode->
i_lock);
176 req = nfs_page_find_request_locked(NFS_I(inode), page);
177 spin_unlock(&inode->
i_lock);
182 static void nfs_grow_file(
struct page *page,
unsigned int offset,
unsigned int count)
184 struct inode *inode = page_file_mapping(page)->host;
188 spin_lock(&inode->
i_lock);
189 i_size = i_size_read(inode);
191 if (i_size > 0 && page_file_index(page) < end_index)
193 end = page_file_offset(page) + ((loff_t)offset+count);
196 i_size_write(inode, end);
199 spin_unlock(&inode->
i_lock);
203 static void nfs_set_pageerror(
struct page *page)
212 static void nfs_mark_uptodate(
struct page *page,
unsigned int base,
unsigned int count)
214 if (PageUptodate(page))
218 if (count != nfs_page_length(page))
220 SetPageUptodate(page);
238 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
239 #define NFS_CONGESTION_OFF_THRESH \
240 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
242 static int nfs_set_page_writeback(
struct page *page)
247 struct inode *inode = page_file_mapping(page)->host;
250 if (atomic_long_inc_return(&nfss->
writeback) >
259 static void nfs_end_page_writeback(
struct page *page)
261 struct inode *inode = page_file_mapping(page)->host;
269 static struct nfs_page *nfs_find_and_lock_request(
struct page *page,
bool nonblock)
271 struct inode *inode = page_file_mapping(page)->host;
275 spin_lock(&inode->
i_lock);
277 req = nfs_page_find_request_locked(NFS_I(inode), page);
280 if (nfs_lock_request(req))
287 spin_unlock(&inode->
i_lock);
295 spin_lock(&inode->
i_lock);
297 spin_unlock(&inode->
i_lock);
306 struct page *page,
bool nonblock)
311 req = nfs_find_and_lock_request(page, nonblock);
318 ret = nfs_set_page_writeback(page);
323 nfs_redirty_request(req);
332 struct inode *inode = page_file_mapping(page)->host;
350 static int nfs_writepage_locked(
struct page *page,
struct writeback_control *wbc)
355 NFS_PROTO(page_file_mapping(page)->
host)->write_pageio_init(&pgio,
358 &nfs_async_write_completion_ops);
359 err = nfs_do_writepage(page, wbc, &pgio);
372 ret = nfs_writepage_locked(page, wbc);
377 static int nfs_writepages_callback(
struct page *page,
struct writeback_control *wbc,
void *data)
381 ret = nfs_do_writepage(page, wbc, data);
388 struct inode *inode = mapping->
host;
389 unsigned long *bitlock = &NFS_I(inode)->flags;
401 NFS_PROTO(inode)->write_pageio_init(&pgio, inode, wb_priority(wbc), &nfs_async_write_completion_ops);
422 static void nfs_inode_add_request(
struct inode *inode,
struct nfs_page *req)
427 nfs_lock_request(req);
429 spin_lock(&inode->
i_lock);
439 set_page_private(req->
wb_page, (
unsigned long)req);
443 spin_unlock(&inode->
i_lock);
449 static void nfs_inode_remove_request(
struct nfs_page *req)
451 struct inode *inode = req->
wb_context->dentry->d_inode;
456 spin_lock(&inode->
i_lock);
458 set_page_private(req->
wb_page, 0);
459 ClearPagePrivate(req->
wb_page);
463 spin_unlock(&inode->
i_lock);
468 nfs_mark_request_dirty(
struct nfs_page *req)
473 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
492 spin_lock(cinfo->
lock);
493 nfs_list_add_request(req, dst);
494 cinfo->
mds->ncommit++;
495 spin_unlock(cinfo->
lock);
498 inc_bdi_stat(page_file_mapping(req->
wb_page)->backing_dev_info,
523 nfs_list_remove_request(req);
524 cinfo->
mds->ncommit--;
532 cinfo->
mds = &NFS_I(inode)->commit_info;
533 cinfo->
ds = pnfs_get_ds_info(inode);
545 nfs_init_cinfo_from_inode(cinfo, inode);
556 if (pnfs_mark_request_commit(req, lseg, cinfo))
562 nfs_clear_page_commit(
struct page *page)
569 nfs_clear_request_commit(
struct nfs_page *req)
572 struct inode *inode = req->
wb_context->dentry->d_inode;
575 nfs_init_cinfo_from_inode(&cinfo, inode);
576 if (!pnfs_clear_request_commit(req, &cinfo)) {
577 spin_lock(cinfo.
lock);
579 spin_unlock(cinfo.
lock);
581 nfs_clear_page_commit(req->
wb_page);
612 nfs_clear_request_commit(
struct nfs_page *req)
627 unsigned long bytes = 0;
631 nfs_init_cinfo_from_inode(&cinfo, hdr->
inode);
632 while (!list_empty(&hdr->
pages)) {
636 nfs_list_remove_request(req);
639 nfs_set_pageerror(req->
wb_page);
644 nfs_mark_request_dirty(req);
653 nfs_inode_remove_request(req);
656 nfs_end_page_writeback(req->
wb_page);
663 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
667 return cinfo->
mds->ncommit;
679 if (!nfs_lock_request(req))
685 nfs_list_add_request(req, dst);
687 if ((ret == max) && !cinfo->
dreq)
708 spin_lock(cinfo->
lock);
709 if (cinfo->
mds->ncommit > 0) {
714 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
716 spin_unlock(cinfo->
lock);
740 static struct nfs_page *nfs_try_to_update_request(
struct inode *inode,
750 if (!PagePrivate(page))
753 end = offset +
bytes;
754 spin_lock(&inode->
i_lock);
757 req = nfs_page_find_request_locked(NFS_I(inode), page);
772 if (nfs_lock_request(req))
776 spin_unlock(&inode->
i_lock);
781 spin_lock(&inode->
i_lock);
794 spin_unlock(&inode->
i_lock);
796 nfs_clear_request_commit(req);
799 spin_unlock(&inode->
i_lock);
803 return ERR_PTR(error);
814 struct page *page,
unsigned int offset,
unsigned int bytes)
816 struct inode *inode = page_file_mapping(page)->host;
819 req = nfs_try_to_update_request(inode, page, offset, bytes);
825 nfs_inode_add_request(inode, req);
830 static int nfs_writepage_setup(
struct nfs_open_context *ctx,
struct page *page,
831 unsigned int offset,
unsigned int count)
835 req = nfs_setup_write_request(ctx, page, offset, count);
839 nfs_grow_file(page, offset, count);
841 nfs_mark_request_dirty(req);
861 req = nfs_page_find_request(page);
874 }
while (status == 0);
883 static bool nfs_write_pageuptodate(
struct page *page,
struct inode *inode)
885 if (nfs_have_delegated_attributes(inode))
890 return PageUptodate(page) != 0;
900 unsigned int offset,
unsigned int count)
903 struct inode *inode = page_file_mapping(page)->host;
908 dprintk(
"NFS: nfs_updatepage(%s/%s %d@%lld)\n",
909 file->
f_path.dentry->d_parent->d_name.name,
910 file->
f_path.dentry->d_name.name, count,
911 (
long long)(page_file_offset(page) + offset));
918 if (nfs_write_pageuptodate(page, inode) &&
921 count =
max(count + offset, nfs_page_length(page));
925 status = nfs_writepage_setup(ctx, page, offset, count);
927 nfs_set_pageerror(page);
931 dprintk(
"NFS: nfs_updatepage returns %d (isize %lld)\n",
932 status, (
long long)i_size_read(inode));
936 static int flush_task_priority(
int how)
952 struct inode *inode = data->
header->inode;
953 int priority = flush_task_priority(how);
957 .rpc_resp = &data->
res,
958 .rpc_cred = data->
header->cred,
964 .callback_ops = call_ops,
965 .callback_data =
data,
973 NFS_PROTO(inode)->write_setup(data, &msg);
975 dprintk(
"NFS: %5u initiated write call "
976 "(req %s/%lld, %u bytes @ offset %llu)\n",
979 (
long long)NFS_FILEID(inode),
981 (
unsigned long long)data->
args.offset);
989 ret = rpc_wait_for_completion_task(task);
1003 unsigned int count,
unsigned int offset,
1012 data->
args.offset = req_offset(req) +
offset;
1025 if (nfs_reqs_to_commit(cinfo))
1041 struct inode *inode = data->
header->inode;
1053 while (!list_empty(head)) {
1057 list_del_init(&data->
list);
1059 ret2 = nfs_do_write(data, call_ops, how);
1070 static void nfs_redirty_request(
struct nfs_page *req)
1072 nfs_mark_request_dirty(req);
1074 nfs_end_page_writeback(req->
wb_page);
1078 static void nfs_async_write_error(
struct list_head *head)
1082 while (!list_empty(head)) {
1083 req = nfs_list_entry(head->
next);
1084 nfs_list_remove_request(req);
1085 nfs_redirty_request(req);
1090 .error_cleanup = nfs_async_write_error,
1091 .completion = nfs_write_completion,
1098 while (!list_empty(&hdr->
rpc_list)) {
1115 struct page *page = req->
wb_page;
1125 (desc->
pg_moreio || nfs_reqs_to_commit(&cinfo) ||
1135 data = nfs_writedata_alloc(hdr, 1);
1137 nfs_flush_error(desc, hdr);
1141 nfs_write_rpcsetup(data, len, offset, desc->
pg_ioflags, &cinfo);
1147 nfs_list_remove_request(req);
1148 nfs_list_add_request(req, &hdr->
pages);
1165 struct page **
pages;
1170 data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->
pg_base,
1173 nfs_flush_error(desc, hdr);
1178 pages = data->
pages.pagevec;
1179 while (!list_empty(head)) {
1180 req = nfs_list_entry(head->
next);
1181 nfs_list_remove_request(req);
1182 nfs_list_add_request(req, &hdr->
pages);
1187 (desc->
pg_moreio || nfs_reqs_to_commit(&cinfo)))
1192 list_add(&data->list, &hdr->
rpc_list);
1201 return nfs_flush_multi(desc, hdr);
1202 return nfs_flush_one(desc, hdr);
1222 ret = nfs_do_multiple_writes(&hdr->
rpc_list,
1232 .pg_doio = nfs_generic_pg_writepages,
1236 struct inode *inode,
int ioflags,
1240 NFS_SERVER(inode)->wsize, ioflags);
1246 pgio->
pg_ops = &nfs_pageio_write_ops;
1255 NFS_PROTO(data->
header->inode)->write_rpc_prepare(task, data);
1262 NFS_PROTO(data->
inode)->commit_rpc_prepare(task, data);
1272 static void nfs_writeback_done_common(
struct rpc_task *
task,
void *calldata)
1279 static void nfs_writeback_release_common(
void *calldata)
1285 if ((status >= 0) && nfs_write_need_commit(data)) {
1286 spin_lock(&hdr->
lock);
1293 spin_unlock(&hdr->
lock);
1298 static const struct rpc_call_ops nfs_write_common_ops = {
1300 .rpc_call_done = nfs_writeback_done_common,
1301 .rpc_release = nfs_writeback_release_common,
1312 struct inode *inode = data->
header->inode;
1315 dprintk(
"NFS: %5u nfs_writeback_done (status %d)\n",
1325 status = NFS_PROTO(inode)->write_done(task, data);
1330 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1340 static unsigned long complain;
1344 dprintk(
"NFS: faulty NFS server %s:"
1345 " (committed = %d) != (stable = %d)\n",
1355 static unsigned long complain;
1361 if (resp->
count == 0) {
1364 "NFS: Server wrote zero bytes, expected %u.\n",
1390 #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
1391 static int nfs_commit_set_lock(
struct nfs_inode *nfsi,
int may_wait)
1403 return (ret < 0) ? ret : 1;
1406 static void nfs_commit_clear_lock(
struct nfs_inode *nfsi)
1425 int priority = flush_task_priority(how);
1427 .rpc_argp = &data->
args,
1428 .rpc_resp = &data->
res,
1429 .rpc_cred = data->
cred,
1434 .rpc_message = &
msg,
1435 .callback_ops = call_ops,
1436 .callback_data =
data,
1442 NFS_PROTO(data->
inode)->commit_setup(data, &msg);
1444 dprintk(
"NFS: %5u initiated commit call\n", data->
task.tk_pid);
1448 return PTR_ERR(task);
1450 rpc_wait_for_completion_task(task);
1465 struct inode *inode = first->
wb_context->dentry->d_inode;
1470 list_splice_init(head, &data->
pages);
1475 data->
mds_ops = &nfs_commit_ops;
1481 data->
args.offset = 0;
1482 data->
args.count = 0;
1496 while (!list_empty(page_list)) {
1497 req = nfs_list_entry(page_list->
next);
1498 nfs_list_remove_request(req);
1502 dec_bdi_stat(page_file_mapping(req->
wb_page)->backing_dev_info,
1514 nfs_commit_list(
struct inode *inode,
struct list_head *head,
int how,
1538 static void nfs_commit_done(
struct rpc_task *task,
void *calldata)
1542 dprintk(
"NFS: %5u nfs_commit_done (status %d)\n",
1546 NFS_PROTO(data->
inode)->commit_done(task, data);
1552 int status = data->
task.tk_status;
1555 while (!list_empty(&data->
pages)) {
1556 req = nfs_list_entry(data->
pages.next);
1557 nfs_list_remove_request(req);
1558 nfs_clear_page_commit(req->
wb_page);
1560 dprintk(
"NFS: commit (%s/%lld %d@%lld)",
1562 (
long long)NFS_FILEID(req->
wb_context->dentry->d_inode),
1564 (
long long)req_offset(req));
1566 nfs_context_set_write_error(req->
wb_context, status);
1567 nfs_inode_remove_request(req);
1568 dprintk(
", error = %d\n", status);
1576 nfs_inode_remove_request(req);
1582 nfs_mark_request_dirty(req);
1589 nfs_commit_clear_lock(NFS_I(data->
inode));
1592 static void nfs_commit_release(
void *calldata)
1602 .rpc_call_done = nfs_commit_done,
1603 .rpc_release = nfs_commit_release,
1608 .error_cleanup = nfs_commit_clear_lock,
1616 status = pnfs_commit_list(inode, head, how, cinfo);
1618 status = nfs_commit_list(inode, head, how, cinfo);
1622 int nfs_commit_inode(
struct inode *inode,
int how)
1629 res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1631 goto out_mark_dirty;
1632 nfs_init_cinfo_from_inode(&cinfo, inode);
1641 goto out_mark_dirty;
1642 error = wait_on_bit(&NFS_I(inode)->flags,
1649 nfs_commit_clear_lock(NFS_I(inode));
1661 static int nfs_commit_unstable_pages(
struct inode *inode,
struct writeback_control *wbc)
1676 goto out_mark_dirty;
1682 ret = nfs_commit_inode(inode, flags);
1685 if (ret < wbc->nr_to_write)
1697 static int nfs_commit_unstable_pages(
struct inode *inode,
struct writeback_control *wbc)
1705 return nfs_commit_unstable_pages(inode, wbc);
1730 BUG_ON(!PageLocked(page));
1732 wait_on_page_writeback(page);
1733 req = nfs_page_find_request(page);
1736 if (nfs_lock_request(req)) {
1737 nfs_clear_request_commit(req);
1738 nfs_inode_remove_request(req);
1771 wait_on_page_writeback(page);
1773 ret = nfs_writepage_locked(page, &wbc);
1778 if (!PagePrivate(page))
1780 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1789 #ifdef CONFIG_MIGRATION
1801 if (PagePrivate(page))
1816 if (nfs_wdata_cachep ==
NULL)
1821 if (nfs_wdata_mempool ==
NULL)
1822 goto out_destroy_write_cache;
1828 if (nfs_cdata_cachep ==
NULL)
1829 goto out_destroy_write_mempool;
1833 if (nfs_commit_mempool ==
NULL)
1834 goto out_destroy_commit_cache;
1853 if (nfs_congestion_kb > 256*1024)
1854 nfs_congestion_kb = 256*1024;
1858 out_destroy_commit_cache:
1860 out_destroy_write_mempool:
1862 out_destroy_write_cache: