33 #include <linux/module.h>
39 #include <linux/prefetch.h>
43 #include "../internal.h"
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
52 static void print_page(
struct page *
page)
54 dprintk(
"PRINTPAGE page %p\n", page);
55 dprintk(
" PagePrivate %d\n", PagePrivate(page));
56 dprintk(
" PageUptodate %d\n", PageUptodate(page));
57 dprintk(
" PageError %d\n", PageError(page));
58 dprintk(
" PageDirty %d\n", PageDirty(page));
59 dprintk(
" PageReferenced %d\n", PageReferenced(page));
60 dprintk(
" PageLocked %d\n", PageLocked(page));
61 dprintk(
" PageWriteback %d\n", PageWriteback(page));
62 dprintk(
" PageMappedToDisk %d\n", PageMappedToDisk(page));
116 static void destroy_parallel(
struct kref *
kref)
120 dprintk(
"%s enter\n", __func__);
125 static inline void put_parallel(
struct parallel_io *p)
127 kref_put(&p->
refcnt, destroy_parallel);
131 bl_submit_bio(
int rw,
struct bio *bio)
134 get_parallel(bio->bi_private);
135 dprintk(
"%s submitting %s bio %u@%llu\n", __func__,
136 rw ==
READ ?
"read" :
"write",
137 bio->bi_size, (
unsigned long long)bio->bi_sector);
143 static struct bio *bl_alloc_init_bio(
int npg,
sector_t isect,
145 void (*end_io)(
struct bio *,
int err),
150 npg =
min(npg, BIO_MAX_PAGES);
153 while (!bio && (npg /= 2))
160 bio->bi_end_io = end_io;
161 bio->bi_private = par;
166 static struct bio *do_add_page_to_bio(
struct bio *bio,
int npg,
int rw,
169 void (*end_io)(
struct bio *,
int err),
171 unsigned int offset,
int len)
174 dprintk(
"%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
175 npg, rw, (
unsigned long long)isect, offset, len);
178 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
183 bio = bl_submit_bio(rw, bio);
189 static struct bio *bl_add_page_to_bio(
struct bio *bio,
int npg,
int rw,
192 void (*end_io)(
struct bio *,
int err),
195 return do_add_page_to_bio(bio, npg, rw, isect, page, be,
200 static void bl_end_io_read(
struct bio *bio,
int err)
203 const int uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
204 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
207 struct page *page = bvec->bv_page;
209 if (--bvec >= bio->bi_io_vec)
212 SetPageUptodate(page);
213 }
while (bvec >= bio->bi_io_vec);
230 dprintk(
"%s enter\n", __func__);
241 rdata->
task.tk_status = rdata->
header->pnfs_error;
251 struct bio *bio =
NULL;
255 loff_t f_offset = rdata->
args.offset;
256 size_t bytes_left = rdata->
args.count;
257 unsigned int pg_offset, pg_len;
260 const bool is_dio = (header->
dreq !=
NULL);
262 dprintk(
"%s enter nr_pages %u offset %lld count %u\n", __func__,
263 rdata->
pages.npages, f_offset, (
unsigned int)rdata->
args.count);
265 par = alloc_parallel(rdata);
273 for (i = pg_index; i < rdata->
pages.npages; i++) {
274 if (!extent_length) {
278 bio = bl_submit_bio(
READ, bio);
289 sector_t cow_length = cow_read->be_length -
290 (isect - cow_read->be_f_offset);
291 extent_length =
min(extent_length, cow_length);
303 bytes_left -= pg_len;
310 hole = is_hole(be, isect);
311 if (hole && !cow_read) {
312 bio = bl_submit_bio(
READ, bio);
314 dprintk(
"%s Zeroing page for hole\n", __func__);
315 zero_user_segment(pages[i], pg_offset, pg_len);
316 print_page(pages[i]);
317 SetPageUptodate(pages[i]);
321 be_read = (hole && cow_read) ? cow_read : be;
322 bio = do_add_page_to_bio(bio, rdata->
pages.npages - i,
324 isect, pages[i], be_read,
338 rdata->
res.count = header->
inode->i_size - rdata->
args.offset;
345 bl_submit_bio(
READ, bio);
350 dprintk(
"Giving up and using normal NFS\n");
361 dprintk(
"%s(%llu, %u)\n", __func__, offset, count);
367 while (isect < end) {
382 static void bl_end_io_write_zero(
struct bio *bio,
int err)
385 const int uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
386 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
389 struct page *page = bvec->bv_page;
391 if (--bvec >= bio->bi_io_vec)
396 }
while (bvec >= bio->bi_io_vec);
410 static void bl_end_io_write(
struct bio *bio,
int err)
413 const int uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
433 dprintk(
"%s enter\n", __func__);
438 mark_extents_written(BLK_LSEG2EXT(wdata->
header->lseg),
439 wdata->
args.offset, wdata->
args.count);
445 static void bl_end_par_io_write(
void *data,
int num_se)
454 wdata->
task.tk_status = wdata->
header->pnfs_error;
463 static void mark_bad_read(
void)
475 dprintk(
"%s enter be=%p\n", __func__, be);
477 set_buffer_mapped(bh);
482 dprintk(
"%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
483 __func__, (
unsigned long long)isect, (
long)bh->b_blocknr,
489 bl_read_single_end_io(
struct bio *bio,
int error)
491 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
492 struct page *page = bvec->bv_page;
500 unsigned int offset,
unsigned int len)
503 struct page *shadow_page;
505 char *kaddr, *kshadow_addr;
508 dprintk(
"%s: offset %u len %u\n", __func__, offset, len);
511 if (shadow_page ==
NULL)
523 bio->bi_end_io = bl_read_single_end_io;
525 lock_page(shadow_page);
534 wait_on_page_locked(shadow_page);
540 memcpy(kaddr + offset, kshadow_addr + offset, len);
552 unsigned int dirty_offset,
unsigned int dirty_len,
566 dprintk(
"%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
568 zero_user_segments(page, start, dirty_offset,
569 dirty_offset + dirty_len, end);
571 trylock_page(page)) {
572 SetPageUptodate(page);
578 if (start != dirty_offset)
579 ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
581 if (!ret && (dirty_offset + dirty_len < end))
582 ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
583 end - dirty_offset - dirty_len);
594 struct buffer_head *bh =
NULL;
598 dprintk(
"%s enter, %p\n", __func__, page);
599 BUG_ON(PageUptodate(page));
602 SetPageUptodate(page);
618 SetPageUptodate(page);
658 if (PageDirty(page) || PageWriteback(page)) {
671 if (!PageUptodate(page)) {
673 init_page_for_write(page, cow_read);
675 set_page_writeback(page);
685 int i,
ret, npg_zero, pg_index,
last = 0;
686 struct bio *bio =
NULL;
688 sector_t isect, last_isect = 0, extent_length = 0;
690 loff_t offset = wdata->
args.offset;
691 size_t count = wdata->
args.count;
692 unsigned int pg_offset, pg_len, saved_len;
693 struct page **pages = wdata->
args.pages;
700 dprintk(
"%s enter, %Zu@%lld\n", __func__, count, offset);
705 dprintk(
"pnfsblock nonblock aligned DIO writes. Resend MDS\n");
712 par = alloc_parallel(wdata);
720 if (!be || !is_writable(be, isect)) {
721 dprintk(
"%s no matching extents!\n", __func__);
732 npg_zero =
do_div(temp, npg_per_block);
738 dprintk(
"%s need to zero %d pages\n", __func__, npg_zero);
739 for (;npg_zero > 0; npg_zero--) {
741 dprintk(
"isect %llu already init\n",
742 (
unsigned long long)isect);
747 dprintk(
"%s zero %dth page: index %lu isect %llu\n",
748 __func__, npg_zero, index,
749 (
unsigned long long)isect);
750 page = bl_find_get_zeroing_page(header->
inode, index,
755 }
else if (page ==
NULL)
761 dprintk(
"%s bl_mark_sectors_init fail %d\n",
777 mark_extents_written(BLK_LSEG2EXT(header->
lseg),
778 page->
index << PAGE_CACHE_SHIFT,
781 bio = bl_add_page_to_bio(bio, npg_zero,
WRITE,
783 bl_end_io_write_zero, par);
796 bio = bl_submit_bio(
WRITE, bio);
800 for (i = pg_index; i < wdata->
pages.npages; i++) {
801 if (!extent_length) {
805 bio = bl_submit_bio(
WRITE, bio);
809 if (!be || !is_writable(be, isect)) {
826 dprintk(
"%s offset %lld count %Zu\n", __func__, offset, count);
827 pg_offset = offset & ~PAGE_CACHE_MASK;
828 if (pg_offset + count > PAGE_CACHE_SIZE)
829 pg_len = PAGE_CACHE_SIZE - pg_offset;
836 ret = bl_read_partial_page_sync(pages[i], cow_read,
837 pg_offset, pg_len,
true);
839 dprintk(
"%s bl_read_partial_page_sync fail %d\n",
848 dprintk(
"%s bl_mark_sectors_init fail %d\n",
862 unsigned int saved_offset = pg_offset;
863 ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
871 bio = do_add_page_to_bio(bio, wdata->
pages.npages - i,
WRITE,
873 bl_end_io_write, par,
889 bio = bl_submit_bio(
WRITE, bio);
891 npg_zero = npg_per_block -
do_div(temp, npg_per_block);
892 if (npg_zero < npg_per_block) {
894 goto fill_invalid_ext;
899 wdata->
res.count = wdata->
args.count;
903 bl_submit_bio(
WRITE, bio);
951 static void bl_free_layout_hdr(
struct pnfs_layout_hdr *lo)
955 dprintk(
"%s enter\n", __func__);
956 release_extents(bl,
NULL);
961 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(
struct inode *inode,
966 dprintk(
"%s enter\n", __func__);
967 bl = kzalloc(
sizeof(*bl), gfp_flags);
983 dprintk(
"%s enter\n", __func__);
997 dprintk(
"%s enter\n", __func__);
998 lseg = kzalloc(
sizeof(*lseg), gfp_flags);
1007 return ERR_PTR(status);
1013 bl_encode_layoutcommit(
struct pnfs_layout_hdr *lo,
struct xdr_stream *xdr,
1016 dprintk(
"%s enter\n", __func__);
1023 struct pnfs_layout_hdr *lo = NFS_I(lcdata->
args.inode)->layout;
1025 dprintk(
"%s enter\n", __func__);
1047 nfs4_blk_get_deviceinfo(
struct nfs_server *server,
const struct nfs_fh *fh,
1050 struct pnfs_device *
dev;
1054 struct page **pages =
NULL;
1061 max_resp_sz = server->
nfs_client->cl_session->fc_attrs.max_resp_sz;
1062 max_pages = nfs_page_array_len(0, max_resp_sz);
1063 dprintk(
"%s max_resp_sz %u max_pages %d\n",
1064 __func__, max_resp_sz, max_pages);
1068 dprintk(
"%s kmalloc failed\n", __func__);
1072 pages = kzalloc(max_pages *
sizeof(
struct page *),
GFP_NOFS);
1073 if (pages ==
NULL) {
1077 for (i = 0; i < max_pages; i++) {
1085 memcpy(&dev->dev_id, d_id,
sizeof(*d_id));
1092 dprintk(
"%s: dev_id: %s\n", __func__, dev->dev_id.data);
1093 rc = nfs4_proc_getdeviceinfo(server, dev);
1094 dprintk(
"%s getdevice info returns %d\n", __func__, rc);
1102 for (i = 0; i < max_pages; i++)
1113 struct pnfs_devicelist *dlist =
NULL;
1118 dprintk(
"%s enter\n", __func__);
1121 dprintk(
"%s Server did not return blksize\n", __func__);
1139 while (!dlist->eof) {
1140 status = nfs4_proc_getdevicelist(server, fh, dlist);
1143 dprintk(
"%s GETDEVICELIST numdevs=%i, eof=%i\n",
1144 __func__, dlist->num_devs, dlist->eof);
1145 for (i = 0; i < dlist->num_devs; i++) {
1146 bdev = nfs4_blk_get_deviceinfo(server, fh,
1149 status = PTR_ERR(bdev);
1154 spin_unlock(&b_mt_id->
bm_lock);
1157 dprintk(
"%s SUCCESS\n", __func__);
1158 server->pnfs_ld_data = b_mt_id;
1165 free_blk_mountid(b_mt_id);
1170 bl_clear_layoutdriver(
struct nfs_server *server)
1174 dprintk(
"%s enter\n", __func__);
1175 free_blk_mountid(b_mt_id);
1176 dprintk(
"%s RETURNS\n", __func__);
1212 static u64 pnfs_num_cont_bytes(
struct inode *inode,
pgoff_t idx)
1218 end =
DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
1219 if (end != NFS_I(inode)->npages) {
1235 !is_aligned_req(req, PAGE_CACHE_SIZE)) {
1240 wb_size = pnfs_num_cont_bytes(pgio->
pg_inode,
1254 !is_aligned_req(req, PAGE_CACHE_SIZE))
1261 .pg_init = bl_pg_init_read,
1262 .pg_test = bl_pg_test_read,
1267 .pg_init = bl_pg_init_write,
1268 .pg_test = bl_pg_test_write,
1272 static struct pnfs_layoutdriver_type blocklayout_type = {
1274 .name =
"LAYOUT_BLOCK_VOLUME",
1275 .read_pagelist = bl_read_pagelist,
1276 .write_pagelist = bl_write_pagelist,
1277 .alloc_layout_hdr = bl_alloc_layout_hdr,
1278 .free_layout_hdr = bl_free_layout_hdr,
1279 .alloc_lseg = bl_alloc_lseg,
1280 .free_lseg = bl_free_lseg,
1281 .encode_layoutcommit = bl_encode_layoutcommit,
1282 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1283 .set_layoutdriver = bl_set_layoutdriver,
1284 .clear_layoutdriver = bl_clear_layoutdriver,
1285 .pg_read_ops = &bl_pg_read_ops,
1286 .pg_write_ops = &bl_pg_write_ops,
1289 static const struct rpc_pipe_ops bl_upcall_ops = {
1296 struct rpc_pipe *
pipe)
1308 static void nfs4blocklayout_unregister_sb(
struct super_block *sb,
1309 struct rpc_pipe *pipe)
1321 struct dentry *dentry;
1333 case RPC_PIPEFS_MOUNT:
1335 if (IS_ERR(dentry)) {
1336 ret = PTR_ERR(dentry);
1341 case RPC_PIPEFS_UMOUNT:
1354 .notifier_call = rpc_pipefs_event,
1357 static struct dentry *nfs4blocklayout_register_net(
struct net *net,
1358 struct rpc_pipe *pipe)
1361 struct dentry *dentry;
1366 dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
1371 static void nfs4blocklayout_unregister_net(
struct net *net,
1372 struct rpc_pipe *pipe)
1378 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
1383 static int nfs4blocklayout_net_init(
struct net *net)
1386 struct dentry *dentry;
1393 if (IS_ERR(dentry)) {
1395 return PTR_ERR(dentry);
1401 static void nfs4blocklayout_net_exit(
struct net *net)
1411 .init = nfs4blocklayout_net_init,
1412 .exit = nfs4blocklayout_net_exit,
1415 static int __init nfs4blocklayout_init(
void)
1419 dprintk(
"%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1441 static void __exit nfs4blocklayout_exit(
void)
1443 dprintk(
"%s: NFSv4 Block Layout Driver Unregistering...\n",