31 struct buffer_head *bh;
71 static int ext4_block_to_path(
struct inode *
inode,
76 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->
i_sb);
78 indirect_blocks =
ptrs,
79 double_blocks = (1 << (ptrs_bits * 2));
83 if (i_block < direct_blocks) {
84 offsets[n++] = i_block;
85 final = direct_blocks;
86 }
else if ((i_block -= direct_blocks) < indirect_blocks) {
88 offsets[n++] = i_block;
90 }
else if ((i_block -= indirect_blocks) < double_blocks) {
92 offsets[n++] = i_block >> ptrs_bits;
93 offsets[n++] = i_block & (ptrs - 1);
95 }
else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) <
ptrs) {
97 offsets[n++] = i_block >> (ptrs_bits * 2);
98 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
99 offsets[n++] = i_block & (ptrs - 1);
102 ext4_warning(inode->
i_sb,
"block %lu > max in inode %lu",
103 i_block + direct_blocks +
104 indirect_blocks + double_blocks, inode->
i_ino);
107 *boundary =
final - 1 - (i_block & (ptrs - 1));
141 static Indirect *ext4_get_branch(
struct inode *inode,
int depth,
147 struct buffer_head *bh;
151 add_chain(chain,
NULL,
EXT4_I(inode)->i_data + *offsets);
165 if (ext4_check_indirect_blockref(inode, bh)) {
171 add_chain(++p, bh, (
__le32 *)bh->b_data + *++offsets);
211 for (p = ind->
p - 1; p >= start; p--) {
218 return ind->
bh->b_blocknr;
247 goal = ext4_find_near(inode, partial);
264 static int ext4_blks_to_allocate(
Indirect *
branch,
int k,
unsigned int blks,
265 int blocks_to_boundary)
267 unsigned int count = 0;
275 if (blks < blocks_to_boundary + 1)
278 count += blocks_to_boundary + 1;
283 while (count < blks && count <= blocks_to_boundary &&
306 static int ext4_alloc_blocks(handle_t *
handle,
struct inode *inode,
308 int indirect_blks,
int blks,
313 unsigned long count = 0, blk_allocated = 0;
327 target = indirect_blks;
338 "current_block %llu + count %lu > %d!",
339 current_block, count,
347 while (index < indirect_blks && count) {
348 new_blocks[index++] = current_block++;
356 new_blocks[
index] = current_block;
358 "requested\n", __func__);
364 target = blks -
count ;
365 blk_allocated =
count;
381 "current_block %llu + ar.len %d > %d!",
382 current_block,
ar.len,
388 if (*err && (target == blks)) {
396 if (target == blks) {
401 new_blocks[
index] = current_block;
403 blk_allocated +=
ar.len;
411 for (i = 0; i <
index; i++)
443 static int ext4_alloc_branch(handle_t *handle,
struct inode *inode,
448 int blocksize = inode->
i_sb->s_blocksize;
451 struct buffer_head *bh;
456 num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
457 *blks, new_blocks, &err);
465 for (n = 1; n <= indirect_blks; n++) {
471 bh = sb_getblk(inode->
i_sb, new_blocks[n-1]);
479 BUFFER_TRACE(bh,
"call get_create_access");
488 memset(bh->b_data, 0, blocksize);
489 branch[
n].
p = (
__le32 *) bh->b_data + offsets[n];
491 *branch[
n].
p = branch[
n].
key;
492 if (n == indirect_blks) {
493 current_block = new_blocks[
n];
499 for (i = 1; i < num; i++)
502 BUFFER_TRACE(bh,
"marking uptodate");
503 set_buffer_uptodate(bh);
506 BUFFER_TRACE(bh,
"call ext4_handle_dirty_metadata");
516 for (i = 1; i <=
n ; i++) {
525 for (i = n+1; i < indirect_blks; i++)
548 static int ext4_splice_branch(handle_t *handle,
struct inode *inode,
562 BUFFER_TRACE(where->
bh,
"get_write_access");
569 *where->
p = where->
key;
575 if (num == 0 && blks > 1) {
577 for (i = 1; i < blks; i++)
592 jbd_debug(5,
"splicing indirect only\n");
593 BUFFER_TRACE(where->
bh,
"call ext4_handle_dirty_metadata");
607 for (i = 1; i <= num; i++) {
660 int blocks_to_boundary = 0;
665 trace_ext4_ind_map_blocks_enter(inode, map->
m_lblk, map->
m_len, flags);
668 depth = ext4_block_to_path(inode, map->
m_lblk, offsets,
669 &blocks_to_boundary);
674 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
681 while (count < map->m_len && count <= blocks_to_boundary) {
686 if (blk == first_block + count)
695 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -
EIO)
704 "non-extent mapped inodes with bigalloc");
708 goal = ext4_find_goal(inode, map->
m_lblk, partial);
711 indirect_blks = (chain +
depth) - partial - 1;
717 count = ext4_blks_to_allocate(partial, indirect_blks,
718 map->
m_len, blocks_to_boundary);
722 err = ext4_alloc_branch(handle, inode, map->
m_lblk, indirect_blks,
724 offsets + (partial - chain), partial);
734 err = ext4_splice_branch(handle, inode, map->
m_lblk,
735 partial, indirect_blks, count);
741 ext4_update_inode_fsync_trans(handle, inode, 1);
746 if (count > blocks_to_boundary)
750 partial = chain + depth - 1;
752 while (partial > chain) {
753 BUFFER_TRACE(partial->
bh,
"call brelse");
758 trace_ext4_ind_map_blocks_exit(inode, map->
m_lblk,
776 unsigned long nr_segs)
779 struct inode *inode = file->
f_mapping->host;
784 size_t count = iov_length(iov, nr_segs);
788 loff_t final_size = offset +
count;
790 if (final_size > inode->
i_size) {
792 handle = ext4_journal_start(inode, 2);
793 if (IS_ERR(handle)) {
794 ret = PTR_ERR(handle);
809 if (rw ==
READ && ext4_should_dioread_nolock(inode)) {
822 if (
unlikely(ext4_test_inode_state(inode,
823 EXT4_STATE_DIOREAD_LOCK))) {
828 inode->
i_sb->s_bdev, iov,
834 ret = blockdev_direct_IO(rw, iocb, inode, iov,
838 loff_t isize = i_size_read(inode);
839 loff_t
end = offset + iov_length(iov, nr_segs);
842 ext4_truncate_failed_write(inode);
852 handle = ext4_journal_start(inode, 2);
853 if (IS_ERR(handle)) {
857 ret = PTR_ERR(handle);
866 loff_t
end = offset +
ret;
867 if (end > inode->
i_size) {
869 i_size_write(inode, end);
911 return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->
i_sb)) + 1;
933 indirects = nrblocks * 2 + 1;
947 static handle_t *start_transaction(
struct inode *inode)
951 result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
955 ext4_std_error(inode->
i_sb, PTR_ERR(result));
965 static int try_to_extend_transaction(handle_t *handle,
struct inode *inode)
967 if (!ext4_handle_valid(handle))
971 if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
1024 static Indirect *ext4_find_shared(
struct inode *inode,
int depth,
1033 for (k = depth; k > 1 && !offsets[k-1]; k--)
1035 partial = ext4_get_branch(inode, k, offsets, chain, &err);
1038 partial = chain + k-1;
1043 if (!partial->
key && *partial->
p)
1046 for (p = partial; (p >
chain) && all_zeroes((
__le32 *) p->
bh->b_data, p->
p); p--)
1054 if (p == chain + k - 1 && p > chain) {
1065 while (partial > p) {
1066 brelse(partial->
bh);
1084 static int ext4_clear_blocks(handle_t *handle,
struct inode *inode,
1085 struct buffer_head *bh,
1100 "blocks %llu len %lu",
1101 (
unsigned long long) block_to_free, count);
1105 if (try_to_extend_transaction(handle, inode)) {
1107 BUFFER_TRACE(bh,
"call ext4_handle_dirty_metadata");
1116 ext4_blocks_for_truncate(inode));
1120 BUFFER_TRACE(bh,
"retaking write access");
1127 for (p = first; p < last; p++)
1133 ext4_std_error(inode->
i_sb, err);
1156 static void ext4_free_data(handle_t *handle,
struct inode *inode,
1157 struct buffer_head *this_bh,
1161 unsigned long count = 0;
1171 BUFFER_TRACE(this_bh,
"get_write_access");
1179 for (p = first; p < last; p++) {
1185 block_to_free_p =
p;
1187 }
else if (nr == block_to_free + count) {
1190 err = ext4_clear_blocks(handle, inode, this_bh,
1191 block_to_free, count,
1192 block_to_free_p, p);
1196 block_to_free_p =
p;
1202 if (!err && count > 0)
1203 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1204 count, block_to_free_p, p);
1210 BUFFER_TRACE(this_bh,
"call ext4_handle_dirty_metadata");
1222 "circular indirect block detected at "
1224 (
unsigned long long) this_bh->b_blocknr);
1241 static void ext4_free_branches(handle_t *handle,
struct inode *inode,
1242 struct buffer_head *parent_bh,
1248 if (ext4_handle_is_aborted(handle))
1252 struct buffer_head *bh;
1255 while (--p >= first) {
1263 "invalid indirect mapped "
1264 "block %lu (level %d)",
1265 (
unsigned long) nr, depth);
1270 bh = sb_bread(inode->
i_sb, nr);
1283 BUFFER_TRACE(bh,
"free child branches");
1284 ext4_free_branches(handle, inode, bh,
1286 (
__le32 *) bh->b_data + addr_per_block,
1306 if (ext4_handle_is_aborted(handle))
1308 if (try_to_extend_transaction(handle, inode)) {
1311 ext4_blocks_for_truncate(inode));
1334 BUFFER_TRACE(parent_bh,
"get_write_access");
1338 BUFFER_TRACE(parent_bh,
1339 "call ext4_handle_dirty_metadata");
1348 BUFFER_TRACE(parent_bh,
"free data blocks");
1367 unsigned blocksize = inode->
i_sb->s_blocksize;
1370 handle = start_transaction(inode);
1374 last_block = (inode->
i_size + blocksize-1)
1376 max_block = (
EXT4_SB(inode->
i_sb)->s_bitmap_maxbytes + blocksize-1)
1384 mapping, inode->
i_size, page_len, 0);
1390 if (last_block != max_block) {
1391 n = ext4_block_to_path(inode, last_block, offsets,
NULL);
1425 if (last_block == max_block) {
1431 }
else if (n == 1) {
1437 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1440 if (partial == chain) {
1442 ext4_free_branches(handle, inode,
NULL,
1443 &nr, &nr+1, (chain+n-1) - partial);
1451 BUFFER_TRACE(partial->
bh,
"get_write_access");
1452 ext4_free_branches(handle, inode, partial->
bh,
1454 partial->
p+1, (chain+n-1) - partial);
1458 while (partial > chain) {
1459 ext4_free_branches(handle, inode, partial->
bh, partial->
p + 1,
1460 (
__le32*)partial->
bh->b_data+addr_per_block,
1461 (chain+n-1) - partial);
1462 BUFFER_TRACE(partial->
bh,
"call brelse");
1463 brelse(partial->
bh);
1468 switch (offsets[0]) {
1472 ext4_free_branches(handle, inode,
NULL, &nr, &nr+1, 1);
1478 ext4_free_branches(handle, inode,
NULL, &nr, &nr+1, 2);
1484 ext4_free_branches(handle, inode,
NULL, &nr, &nr+1, 3);
1501 ext4_handle_sync(handle);
1514 trace_ext4_truncate_exit(inode);