55 static int gfs2_unstuffer_page(
struct gfs2_inode *
ip,
struct buffer_head *dibh,
59 struct buffer_head *bh;
62 if (!page || page->
index) {
69 if (!PageUptodate(page)) {
70 void *kaddr =
kmap(page);
71 u64 dsize = i_size_read(inode);
73 if (dsize > (dibh->b_size -
sizeof(
struct gfs2_dinode)))
80 SetPageUptodate(page);
83 if (!page_has_buffers(page))
87 bh = page_buffers(page);
89 if (!buffer_mapped(bh))
90 map_bh(bh, inode->
i_sb, block);
92 set_buffer_uptodate(bh);
93 if (!gfs2_is_jdata(ip))
95 if (!gfs2_is_writeback(ip))
119 struct buffer_head *bh, *dibh;
122 int isdir = gfs2_is_dir(ip);
127 error = gfs2_meta_inode_buffer(ip, &dibh);
131 if (i_size_read(&ip->
i_inode)) {
148 error = gfs2_unstuffer_page(ip, dibh, block, page);
158 gfs2_buffer_clear_tail(dibh,
sizeof(
struct gfs2_dinode));
160 if (i_size_read(&ip->
i_inode)) {
162 gfs2_add_inode_blocks(&ip->
i_inode, 1);
237 static void find_metapath(
const struct gfs2_sbd *sdp,
u64 block,
242 for (i = height; i--;)
247 static inline unsigned int metapath_branch_start(
const struct metapath *mp)
264 static inline __be64 *metapointer(
unsigned int height,
const struct metapath *mp)
267 unsigned int head_size = (height > 0) ?
272 static void gfs2_metapath_ra(
struct gfs2_glock *gl,
273 const struct buffer_head *bh,
const __be64 *
pos)
275 struct buffer_head *rabh;
276 const __be64 *endp = (
const __be64 *)(bh->b_data + bh->b_size);
279 for (t = pos; t < endp; t++) {
284 if (trylock_buffer(rabh)) {
285 if (!buffer_uptodate(rabh)) {
315 unsigned int end_of_metadata = ip->
i_height - 1;
321 for (x = 0; x < end_of_metadata; x++) {
322 ptr = metapointer(x, mp);
335 static inline void release_metapath(
struct metapath *mp)
342 brelse(mp->
mp_bh[i]);
361 static inline unsigned int gfs2_extent_length(
void *
start,
unsigned int len,
__be64 *ptr,
unsigned limit,
int *eob)
372 if (limit && --limit == 0)
379 return (ptr - first);
390 static inline void bmap_unlock(
struct gfs2_inode *ip,
int create)
447 static int gfs2_bmap_alloc(
struct inode *inode,
const sector_t lblock,
448 struct buffer_head *bh_map,
struct metapath *mp,
449 const unsigned int sheight,
450 const unsigned int height,
451 const unsigned int maxlen)
454 struct gfs2_sbd *sdp = GFS2_SB(inode);
456 struct buffer_head *dibh = mp->
mp_bh[0];
458 unsigned n,
i, blks, alloced = 0, iblks = 0, branch_start = 0;
460 unsigned ptrs_per_blk;
461 const unsigned end_of_metadata = height - 1;
473 if (height == sheight) {
474 struct buffer_head *bh;
476 ptr = metapointer(end_of_metadata, mp);
477 bh = mp->
mp_bh[end_of_metadata];
478 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
485 dblks =
min(maxlen, ptrs_per_blk - mp->
mp_list[end_of_metadata]);
488 iblks = height - sheight;
494 branch_start = metapath_branch_start(mp);
495 iblks += (height - branch_start);
501 blks = dblks + iblks;
516 ptr = (
__be64 *)(dibh->b_data +
520 for (; i - 1 < height - ip->
i_height && n > 0; i++, n--)
521 gfs2_indirect_init(mp, ip->
i_gl, i, 0, bn++);
522 if (i - 1 == height - ip->
i_height) {
524 gfs2_buffer_copy_tail(mp->
mp_bh[i],
527 gfs2_buffer_clear_tail(dibh,
534 for(i = branch_start; i <
height; i++) {
537 brelse(mp->
mp_bh[i]);
546 if (i > 1 && i < height)
548 for (; i < height && n > 0; i++, n--)
549 gfs2_indirect_init(mp, ip->
i_gl, i,
561 ptr = metapointer(end_of_metadata, mp);
565 if (buffer_zeronew(bh_map)) {
566 ret = sb_issue_zeroout(sb, dblock, dblks,
570 "Failed to zero data buffers\n");
571 clear_buffer_zeronew(bh_map);
579 gfs2_add_inode_blocks(&ip->
i_inode, alloced);
581 map_bh(bh_map, inode->
i_sb, dblock);
582 bh_map->b_size = dblks << inode->
i_blkbits;
583 set_buffer_new(bh_map);
602 struct buffer_head *bh_map,
int create)
605 struct gfs2_sbd *sdp = GFS2_SB(inode);
606 unsigned int bsize = sdp->
sd_sb.sb_bsize;
607 const unsigned int maxlen = bh_map->b_size >> inode->
i_blkbits;
615 struct buffer_head *bh;
621 bmap_lock(ip, create);
622 clear_buffer_mapped(bh_map);
623 clear_buffer_new(bh_map);
624 clear_buffer_boundary(bh_map);
625 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
626 if (gfs2_is_dir(ip)) {
631 ret = gfs2_meta_inode_buffer(ip, &mp.
mp_bh[0]);
636 size = (lblock + 1) * bsize;
637 while (size > arr[height])
639 find_metapath(sdp, lblock, &mp, height);
641 if (height > ip->
i_height || gfs2_is_stuffed(ip))
643 ret = lookup_metapath(ip, &mp);
648 ptr = metapointer(ip->
i_height - 1, &mp);
653 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
654 bh_map->b_size = (len << inode->
i_blkbits);
656 set_buffer_boundary(bh_map);
659 release_metapath(&mp);
660 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
661 bmap_unlock(ip, create);
667 BUG_ON(gfs2_is_stuffed(ip));
673 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
682 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
690 bh.b_size = 1 << (inode->
i_blkbits + (create ? 0 : 5));
693 *dblock = bh.b_blocknr;
714 static int do_strip(
struct gfs2_inode *ip,
struct buffer_head *dibh,
723 unsigned int rg_blocks = 0;
725 unsigned int revokes = 0;
744 metadata = (height != ip->
i_height - 1);
754 for (p = top; p <
bottom; p++) {
760 if (bstart + blen == bn)
778 for (x = 0; x <
rlist.rl_rgrps; x++) {
780 rgd =
rlist.rl_ghs[
x].gh_gl->gl_object;
788 if (gfs2_rs_active(ip->
i_res))
806 for (p = top; p <
bottom; p++) {
812 if (bstart + blen == bn)
825 gfs2_add_inode_blocks(&ip->
i_inode, -1);
868 static int recursive_scan(
struct gfs2_inode *ip,
struct buffer_head *dibh,
869 struct metapath *mp,
unsigned int height,
873 struct buffer_head *bh =
NULL;
880 error = gfs2_meta_inode_buffer(ip, &bh);
892 top = (
__be64 *)(bh->b_data + mh_size) +
898 error = do_strip(ip, dibh, bh, top, bottom, height, sm);
902 if (height < ip->i_height - 1) {
904 gfs2_metapath_ra(ip->
i_gl, bh, top);
906 for (; top <
bottom; top++, first = 0) {
912 error = recursive_scan(ip, dibh, mp, height + 1, bn,
931 struct inode *inode = mapping->
host;
936 struct buffer_head *bh;
944 blocksize = inode->
i_sb->s_blocksize;
945 length = blocksize - (offset & (blocksize - 1));
946 iblock = index << (PAGE_CACHE_SHIFT - inode->
i_sb->s_blocksize_bits);
948 if (!page_has_buffers(page))
952 bh = page_buffers(page);
954 while (offset >= pos) {
955 bh = bh->b_this_page;
962 if (!buffer_mapped(bh)) {
965 if (!buffer_mapped(bh))
970 if (PageUptodate(page))
971 set_buffer_uptodate(bh);
973 if (!buffer_uptodate(bh)) {
978 if (!buffer_uptodate(bh))
983 if (!gfs2_is_writeback(ip))
986 zero_user(page, offset, length);
994 static int trunc_start(
struct inode *inode,
u64 oldsize,
u64 newsize)
997 struct gfs2_sbd *sdp = GFS2_SB(inode);
999 struct buffer_head *dibh;
1000 int journaled = gfs2_is_jdata(ip);
1008 error = gfs2_meta_inode_buffer(ip, &dibh);
1014 if (gfs2_is_stuffed(ip)) {
1015 gfs2_buffer_clear_tail(dibh,
sizeof(
struct gfs2_dinode) + newsize);
1017 if (newsize & (
u64)(sdp->
sd_sb.sb_bsize - 1)) {
1018 error = gfs2_block_truncate_page(mapping, newsize);
1025 i_size_write(inode, newsize);
1040 unsigned int height = ip->
i_height;
1048 lblock = (size - 1) >> sdp->
sd_sb.sb_bsize_shift;
1050 find_metapath(sdp, lblock, &mp, ip->
i_height);
1064 error = recursive_scan(ip,
NULL, &mp, 0, 0, 1, &sm);
1077 struct buffer_head *dibh;
1086 error = gfs2_meta_inode_buffer(ip, &dibh);
1090 if (!i_size_read(&ip->
i_inode)) {
1093 gfs2_buffer_clear_tail(dibh,
sizeof(
struct gfs2_dinode));
1120 static int do_shrink(
struct inode *inode,
u64 oldsize,
u64 newsize)
1125 error = trunc_start(inode, oldsize, newsize);
1128 if (gfs2_is_stuffed(ip))
1131 error = trunc_dealloc(ip, newsize);
1133 error = trunc_end(ip);
1143 ret = do_shrink(inode, size, size);
1167 static int do_grow(
struct inode *inode,
u64 size)
1170 struct gfs2_sbd *sdp = GFS2_SB(inode);
1171 struct buffer_head *dibh;
1175 if (gfs2_is_stuffed(ip) &&
1177 error = gfs2_quota_lock_check(ip);
1183 goto do_grow_qunlock;
1189 goto do_grow_release;
1197 error = gfs2_meta_inode_buffer(ip, &dibh);
1201 i_size_write(inode, size);
1244 if (newsize >= oldsize)
1245 return do_grow(inode, newsize);
1247 return do_shrink(inode, oldsize, newsize);
1253 error = trunc_dealloc(ip, i_size_read(&ip->
i_inode));
1255 error = trunc_end(ip);
1261 return trunc_dealloc(ip, 0);
1277 struct buffer_head bh;
1279 u64 lblock, lblock_stop,
size;
1285 if (gfs2_is_stuffed(ip)) {
1292 shift = sdp->
sd_sb.sb_bsize_shift;
1294 end_of_file = (i_size_read(&ip->
i_inode) + sdp->
sd_sb.sb_bsize - 1) >> shift;
1295 lblock = offset >> shift;
1296 lblock_stop = (offset + len + sdp->
sd_sb.sb_bsize - 1) >> shift;
1297 if (lblock_stop > end_of_file)
1300 size = (lblock_stop - lblock) << shift;
1305 if (!buffer_mapped(&bh))
1308 lblock += (bh.b_size >> ip->
i_inode.i_blkbits);