33 #include <linux/time.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
50 #define EXT4_EXT_MAY_ZEROOUT 0x1
52 #define EXT4_EXT_MARK_UNINIT1 0x2
53 #define EXT4_EXT_MARK_UNINIT2 0x4
55 #define EXT4_EXT_DATA_VALID1 0x8
56 #define EXT4_EXT_DATA_VALID2 0x10
70 static int ext4_extent_block_csum_verify(
struct inode *
inode,
79 et = find_ext4_extent_tail(eh);
80 if (et->
et_checksum != ext4_extent_block_csum(inode, eh))
85 static void ext4_extent_block_csum_set(
struct inode *inode,
94 et = find_ext4_extent_tail(eh);
95 et->
et_checksum = ext4_extent_block_csum(inode, eh);
98 static int ext4_split_extent(handle_t *
handle,
105 static int ext4_split_extent_at(handle_t *
handle,
112 static int ext4_ext_truncate_extend_restart(handle_t *
handle,
118 if (!ext4_handle_valid(handle))
120 if (handle->h_buffer_credits > needed)
122 err = ext4_journal_extend(handle, needed);
137 static int ext4_ext_get_access(handle_t *handle,
struct inode *inode,
155 #define ext4_ext_dirty(handle, inode, path) \
156 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
157 static int __ext4_ext_dirty(
const char *where,
unsigned int line,
158 handle_t *handle,
struct inode *inode,
163 ext4_extent_block_csum_set(inode, ext_block_hdr(path->
p_bh));
174 static ext4_fsblk_t ext4_ext_find_goal(
struct inode *inode,
204 if (block > ext_block)
205 return ext_pblk + (block - ext_block);
207 return ext_pblk - (ext_block -
block);
212 if (path[depth].p_bh)
224 ext4_ext_new_meta_block(handle_t *handle,
struct inode *inode,
236 static inline int ext4_ext_space_block(
struct inode *inode,
int check)
242 #ifdef AGGRESSIVE_TEST
243 if (!check && size > 6)
249 static inline int ext4_ext_space_block_idx(
struct inode *inode,
int check)
255 #ifdef AGGRESSIVE_TEST
256 if (!check && size > 5)
262 static inline int ext4_ext_space_root(
struct inode *inode,
int check)
266 size =
sizeof(
EXT4_I(inode)->i_data);
269 #ifdef AGGRESSIVE_TEST
270 if (!check && size > 3)
276 static inline int ext4_ext_space_root_idx(
struct inode *inode,
int check)
280 size =
sizeof(
EXT4_I(inode)->i_data);
283 #ifdef AGGRESSIVE_TEST
284 if (!check && size > 4)
301 / sizeof(struct ext4_extent_idx));
334 return ext_depth(inode) + 1;
338 ext4_ext_max_entries(
struct inode *inode,
int depth)
342 if (depth == ext_depth(inode)) {
344 max = ext4_ext_space_root(inode, 1);
346 max = ext4_ext_space_root_idx(inode, 1);
349 max = ext4_ext_space_block(inode, 1);
351 max = ext4_ext_space_block_idx(inode, 1);
357 static int ext4_valid_extent(
struct inode *inode,
struct ext4_extent *
ext)
360 int len = ext4_ext_get_actual_len(ext);
367 static int ext4_valid_extent_idx(
struct inode *inode,
375 static int ext4_valid_extent_entries(
struct inode *inode,
389 if (!ext4_valid_extent(inode, ext))
397 if (!ext4_valid_extent_idx(inode, ext_idx))
406 static int __ext4_ext_check(
const char *
function,
unsigned int line,
410 const char *error_msg;
414 error_msg =
"invalid magic";
418 error_msg =
"unexpected eh_depth";
422 error_msg =
"invalid eh_max";
425 max = ext4_ext_max_entries(inode, depth);
427 error_msg =
"too large eh_max";
431 error_msg =
"invalid eh_entries";
434 if (!ext4_valid_extent_entries(inode, eh, depth)) {
435 error_msg =
"invalid extent entries";
439 if (ext_depth(inode) != depth &&
440 !ext4_extent_block_csum_verify(inode, eh)) {
441 error_msg =
"extent tree corrupted";
448 "bad header/extent: %s - magic %x, "
449 "entries %u, max %u(%u), depth %u(%u)",
457 #define ext4_ext_check(inode, eh, depth) \
458 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
462 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
465 static int __ext4_ext_check_block(
const char *
function,
unsigned int line,
469 struct buffer_head *bh)
473 if (buffer_verified(bh))
478 set_buffer_verified(bh);
482 #define ext4_ext_check_block(inode, eh, depth, bh) \
483 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
491 for (k = 0; k <=
l; k++, path++) {
494 ext4_idx_pblock(path->
p_idx));
495 }
else if (path->
p_ext) {
498 ext4_ext_is_uninitialized(path->
p_ext),
499 ext4_ext_get_actual_len(path->
p_ext),
500 ext4_ext_pblock(path->
p_ext));
509 int depth = ext_depth(inode);
520 ext_debug(
"Displaying leaf extents for inode %lu\n", inode->
i_ino);
524 ext4_ext_is_uninitialized(ex),
525 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
533 int depth = ext_depth(inode);
536 if (depth != level) {
540 ext_debug(
"%d: move %d:%llu in new index %llu\n", level,
542 ext4_idx_pblock(idx),
552 ext_debug(
"move %d:%llu:[%d]%d in new leaf %llu\n",
555 ext4_ext_is_uninitialized(ex),
556 ext4_ext_get_actual_len(ex),
563 #define ext4_ext_show_path(inode, path)
564 #define ext4_ext_show_leaf(inode, path)
565 #define ext4_ext_show_move(inode, path, newblock, level)
573 for (i = 0; i <=
depth; i++, path++)
586 ext4_ext_binsearch_idx(
struct inode *inode,
593 ext_debug(
"binsearch for %u(idx): ", block);
610 ext4_idx_pblock(path->
p_idx));
612 #ifdef CHECK_BINSEARCH
646 ext4_ext_binsearch(
struct inode *inode,
679 ext4_ext_pblock(path->
p_ext),
680 ext4_ext_is_uninitialized(path->
p_ext),
681 ext4_ext_get_actual_len(path->
p_ext));
683 #ifdef CHECK_BINSEARCH
706 eh = ext_inode_hdr(inode);
712 ext4_ext_invalidate_cache(inode);
721 struct buffer_head *bh;
722 short int depth,
i, ppos = 0, alloc = 0;
724 eh = ext_inode_hdr(inode);
725 depth = ext_depth(inode);
744 ext4_ext_binsearch_idx(inode, path + ppos, block);
745 path[ppos].
p_block = ext4_idx_pblock(path[ppos].p_idx);
753 trace_ext4_ext_load_extent(inode, block,
760 eh = ext_block_hdr(bh);
765 "ppos %d > depth %d", ppos, depth);
768 path[ppos].
p_bh = bh;
769 path[ppos].
p_hdr = eh;
781 ext4_ext_binsearch(inode, path + ppos, block);
783 if (path[ppos].p_ext)
784 path[ppos].
p_block = ext4_ext_pblock(path[ppos].p_ext);
794 return ERR_PTR(-
EIO);
802 static int ext4_ext_insert_index(handle_t *handle,
struct inode *inode,
809 err = ext4_ext_get_access(handle, inode, curp);
815 "logical %d == ei_block %d!",
823 "eh_entries %d >= eh_max %d!",
831 ext_debug(
"insert new index %d after: %llu\n", logical, ptr);
832 ix = curp->
p_idx + 1;
835 ext_debug(
"insert new index %d before: %llu\n", logical, ptr);
843 "move %d indices from 0x%p to 0x%p\n",
844 logical, len, ix, ix + 1);
854 ext4_idx_store_pblock(ix, ptr);
855 le16_add_cpu(&curp->
p_hdr->eh_entries, 1);
863 ext4_std_error(inode->
i_sb, err);
878 static int ext4_ext_split(handle_t *handle,
struct inode *inode,
883 struct buffer_head *bh =
NULL;
884 int depth = ext_depth(inode);
905 " next leaf starts at %d\n",
910 " next leaf starts at %d\n",
931 ext_debug(
"allocate %d blocks for indexes/leaf\n", depth - at);
932 for (a = 0; a < depth -
at; a++) {
933 newblock = ext4_ext_new_meta_block(handle, inode, path,
934 newext, &err, flags);
937 ablocks[
a] = newblock;
941 newblock = ablocks[--
a];
947 bh = sb_getblk(inode->
i_sb, newblock);
958 neh = ext_block_hdr(bh);
965 if (
unlikely(path[depth].p_hdr->eh_entries !=
966 path[depth].
p_hdr->eh_max)) {
968 path[depth].p_hdr->eh_entries,
969 path[depth].
p_hdr->eh_max);
983 ext4_extent_block_csum_set(inode, neh);
984 set_buffer_uptodate(bh);
995 err = ext4_ext_get_access(handle, inode, path + depth);
998 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1013 ext_debug(
"create %d intermediate indices\n", k);
1018 oldblock = newblock;
1019 newblock = ablocks[--
a];
1020 bh = sb_getblk(inode->
i_sb, newblock);
1031 neh = ext_block_hdr(bh);
1038 ext4_idx_store_pblock(fidx, oldblock);
1040 ext_debug(
"int.index at %d (block %llu): %u -> %llu\n",
1047 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1054 ext_debug(
"cur 0x%p, last 0x%p\n", path[i].p_idx,
1058 memmove(++fidx, path[i].p_idx,
1062 ext4_extent_block_csum_set(inode, neh);
1063 set_buffer_uptodate(bh);
1074 err = ext4_ext_get_access(handle, inode, path + i);
1077 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1087 err = ext4_ext_insert_index(handle, inode, path + at,
1092 if (buffer_locked(bh))
1099 for (i = 0; i <
depth; i++) {
1119 static int ext4_ext_grow_indepth(handle_t *handle,
struct inode *inode,
1124 struct buffer_head *bh;
1128 newblock = ext4_ext_new_meta_block(handle, inode,
NULL,
1129 newext, &err, flags);
1133 bh = sb_getblk(inode->
i_sb, newblock);
1136 ext4_std_error(inode->
i_sb, err);
1149 sizeof(
EXT4_I(inode)->i_data));
1152 neh = ext_block_hdr(bh);
1155 if (ext_depth(inode))
1160 ext4_extent_block_csum_set(inode, neh);
1161 set_buffer_uptodate(bh);
1169 neh = ext_inode_hdr(inode);
1178 ext_debug(
"new root: num %d(%d), lblock %d, ptr %llu\n",
1196 static int ext4_ext_create_new_leaf(handle_t *handle,
struct inode *inode,
1205 i = depth = ext_depth(inode);
1208 curp = path +
depth;
1219 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1229 err = PTR_ERR(path);
1232 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1242 err = PTR_ERR(path);
1250 depth = ext_depth(inode);
1251 if (path[depth].p_hdr->eh_entries == path[depth].
p_hdr->eh_max) {
1268 static int ext4_ext_search_left(
struct inode *inode,
1291 ee_len = ext4_ext_get_actual_len(ex);
1295 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1299 while (--depth >= 0) {
1303 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1316 "logical %d < ee_block %d + ee_len %d!",
1322 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1333 static int ext4_ext_search_right(
struct inode *inode,
1338 struct buffer_head *bh =
NULL;
1361 ee_len = ext4_ext_get_actual_len(ex);
1365 "first_extent(path[%d].p_hdr) != ex",
1369 while (--depth >= 0) {
1373 "ix != EXT_FIRST_INDEX *logical %d!",
1383 "logical %d < ee_block %d + ee_len %d!",
1395 while (--depth >= 0) {
1409 block = ext4_idx_pblock(ix);
1410 while (++depth < path->p_depth) {
1411 bh = sb_bread(inode->
i_sb, block);
1414 eh = ext_block_hdr(bh);
1422 block = ext4_idx_pblock(ix);
1426 bh = sb_bread(inode->
i_sb, block);
1429 eh = ext_block_hdr(bh);
1437 *phys = ext4_ext_pblock(ex);
1462 while (depth >= 0) {
1465 if (path[depth].p_ext &&
1466 path[depth].p_ext !=
1471 if (path[depth].p_idx !=
1473 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1499 while (depth >= 0) {
1500 if (path[depth].p_idx !=
1516 static int ext4_ext_correct_indexes(handle_t *handle,
struct inode *inode,
1520 int depth = ext_depth(inode);
1530 "ex %p == NULL or eh %p == NULL", ex, eh);
1549 err = ext4_ext_get_access(handle, inode, path + k);
1552 path[
k].
p_idx->ei_block = border;
1561 err = ext4_ext_get_access(handle, inode, path + k);
1564 path[
k].
p_idx->ei_block = border;
1577 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1583 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1586 if (ext4_ext_is_uninitialized(ex1))
1591 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1592 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1603 if (ext1_ee_len + ext2_ee_len > max_len)
1605 #ifdef AGGRESSIVE_TEST
1606 if (ext1_ee_len >= 4)
1610 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1622 static int ext4_ext_try_to_merge_right(
struct inode *inode,
1627 unsigned int depth, len;
1629 int uninitialized = 0;
1631 depth = ext_depth(inode);
1639 if (ext4_ext_is_uninitialized(ex))
1642 + ext4_ext_get_actual_len(ex + 1));
1644 ext4_ext_mark_uninitialized(ex);
1665 static void ext4_ext_try_to_merge_up(handle_t *handle,
1666 struct inode *inode,
1670 unsigned max_root = ext4_ext_space_root(inode, 0);
1673 if ((path[0].p_depth != 1) ||
1675 (
le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1683 if (ext4_journal_extend(handle, 2))
1689 blk = ext4_idx_pblock(path[0].p_idx);
1694 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1700 brelse(path[1].p_bh);
1709 static void ext4_ext_try_to_merge(handle_t *handle,
1710 struct inode *inode,
1717 depth = ext_depth(inode);
1722 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1725 (
void) ext4_ext_try_to_merge_right(inode, path, ex);
1727 ext4_ext_try_to_merge_up(handle, inode, path);
1738 static unsigned int ext4_ext_check_overlap(
struct ext4_sb_info *sbi,
1739 struct inode *inode,
1744 unsigned int depth, len1;
1745 unsigned int ret = 0;
1748 len1 = ext4_ext_get_actual_len(newext);
1749 depth = ext_depth(inode);
1750 if (!path[depth].p_ext)
1753 b2 &= ~(sbi->s_cluster_ratio - 1);
1760 b2 = ext4_ext_next_allocated_block(path);
1763 b2 &= ~(sbi->s_cluster_ratio - 1);
1767 if (b1 + len1 < b1) {
1774 if (b1 + len1 > b2) {
1798 unsigned uninitialized = 0;
1801 if (
unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1805 depth = ext_depth(inode);
1815 ext_debug(
"append [%d]%d block to %u:[%d]%d (from %llu)\n",
1816 ext4_ext_is_uninitialized(newext),
1817 ext4_ext_get_actual_len(newext),
1819 ext4_ext_is_uninitialized(ex),
1820 ext4_ext_get_actual_len(ex),
1821 ext4_ext_pblock(ex));
1822 err = ext4_ext_get_access(handle, inode, path + depth);
1831 if (ext4_ext_is_uninitialized(ex))
1834 + ext4_ext_get_actual_len(newext));
1836 ext4_ext_mark_uninitialized(ex);
1842 depth = ext_depth(inode);
1851 next = ext4_ext_next_leaf_block(path);
1853 ext_debug(
"next leaf block - %u\n", next);
1857 return PTR_ERR(npath);
1866 ext_debug(
"next leaf has no free space(%d,%d)\n",
1876 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1879 depth = ext_depth(inode);
1885 err = ext4_ext_get_access(handle, inode, path + depth);
1891 ext_debug(
"first extent in the leaf: %u:%llu:[%d]%d\n",
1893 ext4_ext_pblock(newext),
1894 ext4_ext_is_uninitialized(newext),
1895 ext4_ext_get_actual_len(newext));
1901 ext_debug(
"insert %u:%llu:[%d]%d before: "
1904 ext4_ext_pblock(newext),
1905 ext4_ext_is_uninitialized(newext),
1906 ext4_ext_get_actual_len(newext),
1912 ext_debug(
"insert %u:%llu:[%d]%d after: "
1915 ext4_ext_pblock(newext),
1916 ext4_ext_is_uninitialized(newext),
1917 ext4_ext_get_actual_len(newext),
1923 "move %d extents from 0x%p to 0x%p\n",
1925 ext4_ext_pblock(newext),
1926 ext4_ext_is_uninitialized(newext),
1927 ext4_ext_get_actual_len(newext),
1928 len, nearex, nearex + 1);
1937 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1942 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1943 ext4_ext_try_to_merge(handle, inode, path, nearex);
1947 err = ext4_ext_correct_indexes(handle, inode, path);
1958 ext4_ext_invalidate_cache(inode);
1962 static int ext4_ext_walk_space(
struct inode *inode,
ext4_lblk_t block,
1971 int depth, exists, err = 0;
1983 err = PTR_ERR(path);
1988 depth = ext_depth(inode);
1995 next = ext4_ext_next_allocated_block(path);
2007 if (block + num <
end)
2010 + ext4_ext_get_actual_len(ex)) {
2023 + ext4_ext_get_actual_len(ex);
2024 if (block + num <
end)
2033 cbex.ec_block =
start;
2038 cbex.ec_len = ext4_ext_get_actual_len(ex);
2039 cbex.ec_start = ext4_ext_pblock(ex);
2047 err =
func(inode, next, &cbex, ex, cbdata);
2060 if (ext_depth(inode) != depth) {
2066 block = cbex.ec_block + cbex.ec_len;
2078 ext4_ext_put_in_cache(
struct inode *inode,
ext4_lblk_t block,
2083 spin_lock(&
EXT4_I(inode)->i_block_reservation_lock);
2084 trace_ext4_ext_put_in_cache(inode, block, len, start);
2085 cex = &
EXT4_I(inode)->i_cached_extent;
2089 spin_unlock(&
EXT4_I(inode)->i_block_reservation_lock);
2098 ext4_ext_put_gap_in_cache(
struct inode *inode,
struct ext4_ext_path *path,
2101 int depth = ext_depth(inode);
2115 ext_debug(
"cache gap(before): %u [%u:%u]",
2118 ext4_ext_get_actual_len(ex));
2120 + ext4_ext_get_actual_len(ex)) {
2123 + ext4_ext_get_actual_len(ex);
2125 next = ext4_ext_next_allocated_block(path);
2126 ext_debug(
"cache gap(after): [%u:%u] %u",
2128 ext4_ext_get_actual_len(ex),
2131 len = next - lblock;
2138 ext4_ext_put_in_cache(inode, lblock, len, 0);
2155 ext4_ext_in_cache(
struct inode *inode,
ext4_lblk_t block,
2159 struct ext4_sb_info *sbi;
2165 spin_lock(&
EXT4_I(inode)->i_block_reservation_lock);
2166 cex = &
EXT4_I(inode)->i_cached_extent;
2175 ext4_ext_store_pblock(ex, cex->
ec_start);
2183 trace_ext4_ext_in_cache(inode, block, ret);
2184 spin_unlock(&
EXT4_I(inode)->i_block_reservation_lock);
2192 static int ext4_ext_rm_idx(handle_t *handle,
struct inode *inode,
2200 leaf = ext4_idx_pblock(path->
p_idx);
2205 err = ext4_ext_get_access(handle, inode, path);
2215 le16_add_cpu(&path->
p_hdr->eh_entries, -1);
2219 ext_debug(
"index is empty, remove it, free block %llu\n", leaf);
2220 trace_ext4_ext_rm_idx(inode, leaf);
2238 int depth = ext_depth(inode);
2276 int depth = ext_depth(inode);
2286 static int ext4_remove_blocks(handle_t *handle,
struct inode *inode,
2292 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2298 else if (ext4_should_journal_data(inode))
2310 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2316 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2317 if (*partial_cluster && (
EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2320 sbi->s_cluster_ratio, flags);
2321 *partial_cluster = 0;
2324 #ifdef EXTENTS_STATS
2327 spin_lock(&sbi->s_ext_stats_lock);
2328 sbi->s_ext_blocks += ee_len;
2329 sbi->s_ext_extents++;
2330 if (ee_len < sbi->s_ext_min)
2331 sbi->s_ext_min = ee_len;
2332 if (ee_len > sbi->s_ext_max)
2333 sbi->s_ext_max = ee_len;
2334 if (ext_depth(inode) > sbi->s_depth_max)
2335 sbi->s_depth_max = ext_depth(inode);
2336 spin_unlock(&sbi->s_ext_stats_lock);
2345 pblk = ext4_ext_pblock(ex) + ee_len - num;
2346 ext_debug(
"free last %u blocks starting %llu\n", num, pblk);
2356 if (pblk & (sbi->s_cluster_ratio - 1) &&
2358 *partial_cluster =
EXT4_B2C(sbi, pblk);
2360 *partial_cluster = 0;
2368 start = ext4_ext_pblock(ex);
2370 ext_debug(
"free first %u blocks starting %llu\n", num, start);
2375 "%u-%u from %u:%u\n",
2394 ext4_ext_rm_leaf(handle_t *handle,
struct inode *inode,
2399 int err = 0, correct_index = 0;
2400 int depth = ext_depth(inode), credits;
2405 unsigned short ex_ee_len;
2406 unsigned uninitialized = 0;
2410 ext_debug(
"truncate since %u in leaf to %u\n", start, end);
2411 if (!path[depth].p_hdr)
2412 path[
depth].
p_hdr = ext_block_hdr(path[depth].p_bh);
2422 ex_ee_len = ext4_ext_get_actual_len(ex);
2424 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2427 ex_ee_block + ex_ee_len > start) {
2429 if (ext4_ext_is_uninitialized(ex))
2434 ext_debug(
"remove ext %u:[%d]%d\n", ex_ee_block,
2435 uninitialized, ex_ee_len);
2438 a = ex_ee_block > start ? ex_ee_block :
start;
2439 b = ex_ee_block+ex_ee_len - 1 < end ?
2440 ex_ee_block+ex_ee_len - 1 :
end;
2445 if (end < ex_ee_block) {
2448 ex_ee_len = ext4_ext_get_actual_len(ex);
2450 }
else if (b != ex_ee_block + ex_ee_len - 1) {
2452 "can not handle truncate %u:%u "
2454 start, end, ex_ee_block,
2455 ex_ee_block + ex_ee_len - 1);
2458 }
else if (a != ex_ee_block) {
2460 num = a - ex_ee_block;
2474 credits += (ext_depth(inode)) + 1;
2478 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2482 err = ext4_ext_get_access(handle, inode, path + depth);
2486 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2493 ext4_ext_store_pblock(ex, 0);
2500 if (uninitialized && num)
2501 ext4_ext_mark_uninitialized(ex);
2522 *partial_cluster = 0;
2528 ext_debug(
"new extent: %u:%u:%llu\n", ex_ee_block, num,
2529 ext4_ext_pblock(ex));
2532 ex_ee_len = ext4_ext_get_actual_len(ex);
2536 err = ext4_ext_correct_indexes(handle, inode, path);
2544 (
EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2545 *partial_cluster)) {
2553 sbi->s_cluster_ratio, flags);
2554 *partial_cluster = 0;
2560 err = ext4_ext_rm_idx(handle, inode, path + depth);
2587 static int ext4_ext_remove_space(
struct inode *inode,
ext4_lblk_t start,
2591 int depth = ext_depth(inode);
2597 ext_debug(
"truncate since %u to %u\n", start, end);
2600 handle = ext4_journal_start(inode, depth + 1);
2602 return PTR_ERR(handle);
2605 ext4_ext_invalidate_cache(inode);
2607 trace_ext4_ext_remove_space(inode, start, depth);
2624 return PTR_ERR(path);
2626 depth = ext_depth(inode);
2632 "path[%d].p_hdr == NULL",
2647 if (end >= ee_block &&
2648 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2651 if (ext4_ext_is_uninitialized(ex))
2659 err = ext4_split_extent_at(handle, inode, path,
2660 end + 1, split_flag,
2672 depth = ext_depth(inode);
2686 path[0].
p_hdr = ext_inode_hdr(inode);
2696 while (i >= 0 && err == 0) {
2699 err = ext4_ext_rm_leaf(handle, inode, path,
2700 &partial_cluster, start,
2703 brelse(path[i].p_bh);
2710 if (!path[i].p_hdr) {
2712 path[
i].
p_hdr = ext_block_hdr(path[i].p_bh);
2715 if (!path[i].p_idx) {
2719 ext_debug(
"init index ptr: hdr 0x%p, num %d\n",
2727 ext_debug(
"level %d - index, first 0x%p, cur 0x%p\n",
2730 if (ext4_ext_more_to_rm(path + i)) {
2731 struct buffer_head *bh;
2733 ext_debug(
"move to level %d (block %llu)\n",
2734 i + 1, ext4_idx_pblock(path[i].p_idx));
2735 memset(path + i + 1, 0,
sizeof(*path));
2736 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2747 depth - i - 1, bh)) {
2751 path[i + 1].
p_bh = bh;
2759 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2763 err = ext4_ext_rm_idx(handle, inode, path + i);
2766 brelse(path[i].p_bh);
2773 trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2774 path->
p_hdr->eh_entries);
2779 if (partial_cluster && path->
p_hdr->eh_entries == 0) {
2787 EXT4_SB(sb)->s_cluster_ratio, flags);
2788 partial_cluster = 0;
2792 if (path->
p_hdr->eh_entries == 0) {
2797 err = ext4_ext_get_access(handle, inode, path);
2799 ext_inode_hdr(inode)->eh_depth = 0;
2800 ext_inode_hdr(inode)->eh_max =
2827 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2829 #ifdef AGGRESSIVE_TEST
2830 ", aggressive tests"
2832 #ifdef CHECK_BINSEARCH
2835 #ifdef EXTENTS_STATS
2840 #ifdef EXTENTS_STATS
2842 EXT4_SB(sb)->s_ext_min = 1 << 30;
2856 #ifdef EXTENTS_STATS
2858 struct ext4_sb_info *sbi =
EXT4_SB(sb);
2859 printk(
KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2860 sbi->s_ext_blocks, sbi->s_ext_extents,
2861 sbi->s_ext_blocks / sbi->s_ext_extents);
2862 printk(
KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2863 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2869 static int ext4_ext_zeroout(
struct inode *inode,
struct ext4_extent *ex)
2872 unsigned int ee_len;
2875 ee_len = ext4_ext_get_actual_len(ex);
2876 ee_pblock = ext4_ext_pblock(ex);
2878 ret = sb_issue_zeroout(inode->
i_sb, ee_pblock, ee_len,
GFP_NOFS);
2906 static int ext4_split_extent_at(handle_t *handle,
2907 struct inode *inode,
2923 ext_debug(
"ext4_split_extents_at: inode %lu, logical"
2924 "block %llu\n", inode->
i_ino, (
unsigned long long)split);
2928 depth = ext_depth(inode);
2931 ee_len = ext4_ext_get_actual_len(ex);
2932 newblock = split - ee_block + ext4_ext_pblock(ex);
2934 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2936 err = ext4_ext_get_access(handle, inode, path + depth);
2940 if (split == ee_block) {
2947 ext4_ext_mark_uninitialized(ex);
2949 ext4_ext_mark_initialized(ex);
2952 ext4_ext_try_to_merge(handle, inode, path, ex);
2959 memcpy(&orig_ex, ex,
sizeof(orig_ex));
2962 ext4_ext_mark_uninitialized(ex);
2970 goto fix_extent_len;
2975 ext4_ext_store_pblock(ex2, newblock);
2976 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2977 ext4_ext_mark_uninitialized(ex2);
2983 err = ext4_ext_zeroout(inode, ex2);
2985 err = ext4_ext_zeroout(inode, ex);
2987 err = ext4_ext_zeroout(inode, &orig_ex);
2990 goto fix_extent_len;
2993 ext4_ext_try_to_merge(handle, inode, path, ex);
2997 goto fix_extent_len;
3020 static int ext4_split_extent(handle_t *handle,
3021 struct inode *inode,
3034 depth = ext_depth(inode);
3037 ee_len = ext4_ext_get_actual_len(ex);
3038 uninitialized = ext4_ext_is_uninitialized(ex);
3040 if (map->
m_lblk + map->
m_len < ee_block + ee_len) {
3044 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3048 err = ext4_split_extent_at(handle, inode, path,
3057 return PTR_ERR(path);
3059 if (map->
m_lblk >= ee_block) {
3060 split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
3064 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3066 err = ext4_split_extent_at(handle, inode, path,
3067 map->
m_lblk, split_flag1, flags);
3074 return err ? err : map->
m_len;
3097 static int ext4_ext_convert_to_initialized(handle_t *handle,
3098 struct inode *inode,
3102 struct ext4_sb_info *sbi;
3109 int allocated, max_zeroout = 0;
3113 ext_debug(
"ext4_ext_convert_to_initialized: inode %lu, logical"
3114 "block %llu, max_blocks %u\n", inode->
i_ino,
3118 eof_block = (inode->
i_size + inode->
i_sb->s_blocksize - 1) >>
3119 inode->
i_sb->s_blocksize_bits;
3120 if (eof_block < map->m_lblk + map->
m_len)
3123 depth = ext_depth(inode);
3127 ee_len = ext4_ext_get_actual_len(ex);
3130 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3133 BUG_ON(!ext4_ext_is_uninitialized(ex));
3154 if ((map->
m_lblk == ee_block) &&
3155 (map->
m_len < ee_len) &&
3160 unsigned int prev_len, write_len;
3164 prev_len = ext4_ext_get_actual_len(prev_ex);
3165 prev_pblk = ext4_ext_pblock(prev_ex);
3166 ee_pblk = ext4_ext_pblock(ex);
3167 write_len = map->
m_len;
3178 if ((!ext4_ext_is_uninitialized(prev_ex)) &&
3179 ((prev_lblk + prev_len) == ee_block) &&
3180 ((prev_pblk + prev_len) == ee_pblk) &&
3182 err = ext4_ext_get_access(handle, inode, path + depth);
3186 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3191 ext4_ext_store_pblock(ex, ee_pblk + write_len);
3193 ext4_ext_mark_uninitialized(ex);
3205 allocated = write_len;
3215 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3217 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3218 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3219 inode->
i_sb->s_blocksize_bits;
3222 if (max_zeroout && (ee_len <= max_zeroout)) {
3223 err = ext4_ext_zeroout(inode, ex);
3227 err = ext4_ext_get_access(handle, inode, path + depth);
3230 ext4_ext_mark_initialized(ex);
3231 ext4_ext_try_to_merge(handle, inode, path, ex);
3243 split_map.m_lblk = map->
m_lblk;
3244 split_map.m_len = map->
m_len;
3246 if (max_zeroout && (allocated > map->
m_len)) {
3247 if (allocated <= max_zeroout) {
3252 ext4_ext_store_pblock(&zero_ex,
3253 ext4_ext_pblock(ex) + map->
m_lblk - ee_block);
3254 err = ext4_ext_zeroout(inode, &zero_ex);
3257 split_map.m_lblk = map->
m_lblk;
3258 split_map.m_len = allocated;
3259 }
else if (map->
m_lblk - ee_block + map->
m_len < max_zeroout) {
3261 if (map->
m_lblk != ee_block) {
3265 ext4_ext_store_pblock(&zero_ex,
3266 ext4_ext_pblock(ex));
3267 err = ext4_ext_zeroout(inode, &zero_ex);
3273 split_map.m_len = map->
m_lblk - ee_block + map->
m_len;
3274 allocated = map->
m_len;
3278 allocated = ext4_split_extent(handle, inode, path,
3279 &split_map, split_flag, 0);
3284 return err ? err : allocated;
3309 static int ext4_split_unwritten_extents(handle_t *handle,
3310 struct inode *inode,
3319 int split_flag = 0,
depth;
3321 ext_debug(
"ext4_split_unwritten_extents: inode %lu, logical"
3322 "block %llu, max_blocks %u\n", inode->
i_ino,
3325 eof_block = (inode->
i_size + inode->
i_sb->s_blocksize - 1) >>
3326 inode->
i_sb->s_blocksize_bits;
3327 if (eof_block < map->m_lblk + map->
m_len)
3333 depth = ext_depth(inode);
3336 ee_len = ext4_ext_get_actual_len(ex);
3338 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3343 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3346 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3347 struct inode *inode,
3357 depth = ext_depth(inode);
3360 ee_len = ext4_ext_get_actual_len(ex);
3362 ext_debug(
"ext4_convert_unwritten_extents_endio: inode %lu, logical"
3363 "block %llu, max_blocks %u\n", inode->
i_ino,
3364 (
unsigned long long)ee_block, ee_len);
3367 if (ee_block != map->
m_lblk || ee_len > map->
m_len) {
3368 err = ext4_split_unwritten_extents(handle, inode, map, path,
3375 err = PTR_ERR(path);
3378 depth = ext_depth(inode);
3382 err = ext4_ext_get_access(handle, inode, path + depth);
3386 ext4_ext_mark_initialized(ex);
3391 ext4_ext_try_to_merge(handle, inode, path, ex);
3400 static void unmap_underlying_metadata_blocks(
struct block_device *bdev,
3404 for (i = 0; i <
count; i++)
3411 static int check_eofblocks_fl(handle_t *handle,
struct inode *inode,
3423 depth = ext_depth(inode);
3444 ext4_ext_get_actual_len(last_ex))
3453 for (i = depth-1; i >= 0; i--)
3474 static int ext4_find_delalloc_range(
struct inode *inode,
3477 int search_hint_reverse)
3480 struct buffer_head *
head, *bh =
NULL;
3490 search_hint_reverse = 0;
3492 if (search_hint_reverse)
3499 while ((i >= lblk_start) && (i <= lblk_end)) {
3504 if (!page_has_buffers(page))
3507 head = page_buffers(page);
3515 if (
unlikely(pg_lblk < lblk_start)) {
3530 if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3532 trace_ext4_find_delalloc_range(inode,
3533 lblk_start, lblk_end,
3534 search_hint_reverse,
3538 if (search_hint_reverse)
3542 }
while ((i >= lblk_start) && (i <= lblk_end) &&
3543 ((bh = bh->b_this_page) != head));
3551 if (search_hint_reverse)
3558 trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3559 search_hint_reverse, 0, 0);
3564 int search_hint_reverse)
3568 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3569 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3571 return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3572 search_hint_reverse);
3611 get_reserved_cluster_alloc(
struct inode *inode,
ext4_lblk_t lblk_start,
3612 unsigned int num_blks)
3615 ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3617 unsigned int allocated_clusters = 0;
3619 alloc_cluster_start =
EXT4_B2C(sbi, lblk_start);
3620 alloc_cluster_end =
EXT4_B2C(sbi, lblk_start + num_blks - 1);
3623 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3625 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3628 c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3630 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3631 lblk_to = lblk_from + c_offset - 1;
3633 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3634 allocated_clusters--;
3638 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3639 if (allocated_clusters && c_offset) {
3640 lblk_from = lblk_start + num_blks;
3641 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3643 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3644 allocated_clusters--;
3647 return allocated_clusters;
3651 ext4_ext_handle_uninitialized_extents(handle_t *handle,
struct inode *inode,
3660 ext_debug(
"ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3661 "block %llu, max_blocks %u, flags %x, allocated %u\n",
3666 trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3671 ret = ext4_split_unwritten_extents(handle, inode, map,
3681 ext4_set_io_unwritten_flag(inode, io);
3683 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3684 if (ext4_should_dioread_nolock(inode))
3690 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3693 ext4_update_inode_fsync_trans(handle, inode, 1);
3694 err = check_eofblocks_fl(handle, inode, map->
m_lblk,
3722 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3724 ext4_update_inode_fsync_trans(handle, inode, 1);
3739 if (allocated > map->
m_len) {
3740 unmap_underlying_metadata_blocks(inode->
i_sb->s_bdev,
3741 newblock + map->
m_len,
3742 allocated - map->
m_len);
3743 allocated = map->
m_len;
3754 unsigned int reserved_clusters;
3755 reserved_clusters = get_reserved_cluster_alloc(inode,
3757 if (reserved_clusters)
3766 err = check_eofblocks_fl(handle, inode, map->
m_lblk, path,
3772 if (allocated > map->
m_len)
3773 allocated = map->
m_len;
3776 map->
m_len = allocated;
3782 return err ? err : allocated;
3826 static int get_implied_cluster_alloc(
struct super_block *sb,
3831 struct ext4_sb_info *sbi =
EXT4_SB(sb);
3837 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3840 ex_cluster_start =
EXT4_B2C(sbi, ee_block);
3841 ex_cluster_end =
EXT4_B2C(sbi, ee_block + ee_len - 1);
3846 if ((rr_cluster_start == ex_cluster_end) ||
3847 (rr_cluster_start == ex_cluster_start)) {
3848 if (rr_cluster_start == ex_cluster_end)
3849 ee_start += ee_len - 1;
3850 map->
m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3853 (
unsigned) sbi->s_cluster_ratio - c_offset);
3863 if (map->
m_lblk < ee_block)
3875 if (map->
m_lblk > ee_block) {
3876 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3880 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3884 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3914 int free_on_err = 0, err = 0,
depth,
ret;
3915 unsigned int allocated = 0,
offset = 0;
3916 unsigned int allocated_clusters = 0;
3920 int set_unwritten = 0;
3922 ext_debug(
"blocks %u/%u requested for inode %lu\n",
3924 trace_ext4_ext_map_blocks_enter(inode, map->
m_lblk, map->
m_len, flags);
3927 if (ext4_ext_in_cache(inode, map->
m_lblk, &newex)) {
3929 if ((sbi->s_cluster_ratio > 1) &&
3933 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3943 if (sbi->s_cluster_ratio > 1)
3947 + ext4_ext_pblock(&newex);
3949 allocated = ext4_ext_get_actual_len(&newex) -
3958 err = PTR_ERR(path);
3963 depth = ext_depth(inode);
3970 if (
unlikely(path[depth].p_ext ==
NULL && depth != 0)) {
3972 "lblock: %lu, depth: %d pblock %lld",
3973 (
unsigned long) map->
m_lblk, depth,
3983 unsigned short ee_len;
3989 ee_len = ext4_ext_get_actual_len(ex);
3991 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3995 newblock = map->
m_lblk - ee_block + ee_start;
3997 allocated = ee_len - (map->
m_lblk - ee_block);
3999 ee_block, ee_len, newblock);
4005 if (!ext4_ext_is_uninitialized(ex)) {
4006 ext4_ext_put_in_cache(inode, ee_block,
4010 ret = ext4_ext_handle_uninitialized_extents(
4011 handle, inode, map, path, flags,
4012 allocated, newblock);
4017 if ((sbi->s_cluster_ratio > 1) &&
4025 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4030 ext4_ext_put_gap_in_cache(inode, path, map->
m_lblk);
4039 cluster_offset = map->
m_lblk & (sbi->s_cluster_ratio-1);
4045 if (cluster_offset && ex &&
4046 get_implied_cluster_alloc(inode->
i_sb, map, ex, path)) {
4050 goto got_allocated_blocks;
4055 err = ext4_ext_search_left(inode, path, &ar.
lleft, &ar.
pleft);
4060 err = ext4_ext_search_right(inode, path, &ar.
lright, &ar.
pright, &ex2);
4066 if ((sbi->s_cluster_ratio > 1) && ex2 &&
4067 get_implied_cluster_alloc(inode->
i_sb, map, ex2, path)) {
4071 goto got_allocated_blocks;
4081 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4084 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4089 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4091 allocated = ext4_ext_get_actual_len(&newex);
4093 allocated = map->
m_len;
4097 ar.
goal = ext4_ext_find_goal(inode, path, map->
m_lblk);
4121 ext_debug(
"allocate new block: goal %llu, found %llu/%u\n",
4122 ar.
goal, newblock, allocated);
4124 allocated_clusters = ar.
len;
4126 if (ar.
len > allocated)
4129 got_allocated_blocks:
4131 ext4_ext_store_pblock(&newex, newblock +
offset);
4134 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4135 ext4_ext_mark_uninitialized(&newex);
4143 if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4145 if (ext4_should_dioread_nolock(inode))
4150 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4151 err = check_eofblocks_fl(handle, inode, map->
m_lblk,
4157 if (!err && set_unwritten) {
4159 ext4_set_io_unwritten_flag(inode, io);
4161 ext4_set_inode_state(inode,
4162 EXT4_STATE_DIO_UNWRITTEN);
4165 if (err && free_on_err) {
4166 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4173 ext4_ext_get_actual_len(&newex), fb_flags);
4178 newblock = ext4_ext_pblock(&newex);
4179 allocated = ext4_ext_get_actual_len(&newex);
4180 if (allocated > map->
m_len)
4181 allocated = map->
m_len;
4188 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4189 unsigned int reserved_clusters;
4193 reserved_clusters = get_reserved_cluster_alloc(inode,
4196 if (reserved_clusters) {
4205 reserved_clusters, 0);
4208 BUG_ON(allocated_clusters < reserved_clusters);
4212 if (reserved_clusters < allocated_clusters) {
4214 int reservation = allocated_clusters -
4256 dquot_reserve_block(inode,
4269 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4270 ext4_ext_put_in_cache(inode, map->
m_lblk, allocated, newblock);
4271 ext4_update_inode_fsync_trans(handle, inode, 1);
4273 ext4_update_inode_fsync_trans(handle, inode, 0);
4275 if (allocated > map->
m_len)
4276 allocated = map->
m_len;
4280 map->
m_len = allocated;
4287 trace_ext4_ext_map_blocks_exit(inode, map->
m_lblk,
4288 newblock, map->
m_len, err ? err : allocated);
4290 return err ? err : allocated;
4312 handle = ext4_journal_start(inode, err);
4321 mapping, inode->
i_size, page_len, 0);
4331 ext4_ext_invalidate_cache(inode);
4347 err = ext4_ext_remove_space(inode, last_block,
EXT_MAX_BLOCKS - 1);
4353 ext4_handle_sync(handle);
4373 static void ext4_falloc_update_inode(
struct inode *inode,
4374 int mode, loff_t new_size,
int update_ctime)
4380 if (!timespec_equal(&inode->
i_ctime, &now))
4388 if (new_size > i_size_read(inode))
4389 i_size_write(inode, new_size);
4390 if (new_size >
EXT4_I(inode)->i_disksize)
4391 ext4_update_i_disksize(inode, new_size);
4397 if (new_size > i_size_read(inode))
4412 struct inode *inode = file->
f_path.dentry->d_inode;
4415 unsigned int max_blocks;
4421 unsigned int credits, blkbits = inode->
i_blkbits;
4437 trace_ext4_fallocate_enter(inode, offset, len, mode);
4438 map.
m_lblk = offset >> blkbits;
4453 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4457 if (mode & FALLOC_FL_KEEP_SIZE)
4470 while (ret >= 0 && ret < max_blocks) {
4472 map.
m_len = max_blocks = max_blocks -
ret;
4473 handle = ext4_journal_start(inode, credits);
4474 if (IS_ERR(handle)) {
4475 ret = PTR_ERR(handle);
4483 "returned error inode#%lu, block=%u, "
4484 "max_blocks=%u", __func__,
4492 blkbits) >> blkbits))
4493 new_size = offset + len;
4495 new_size = ((loff_t) map.
m_lblk + ret) << blkbits;
4497 ext4_falloc_update_inode(inode, mode, new_size,
4501 ext4_handle_sync(handle);
4512 trace_ext4_fallocate_exit(inode, offset, max_blocks,
4513 ret > 0 ? ret2 : ret);
4514 return ret > 0 ? ret2 :
ret;
4531 unsigned int max_blocks;
4535 unsigned int credits, blkbits = inode->
i_blkbits;
4537 map.
m_lblk = offset >> blkbits;
4548 while (ret >= 0 && ret < max_blocks) {
4551 handle = ext4_journal_start(inode, credits);
4552 if (IS_ERR(handle)) {
4553 ret = PTR_ERR(handle);
4561 "%s:%d: inode #%lu: block %u: len %u: "
4562 "ext4_ext_map_blocks returned %d",
4568 if (ret <= 0 || ret2 )
4571 return ret > 0 ? ret2 :
ret;
4577 static int ext4_ext_fiemap_cb(
struct inode *inode,
ext4_lblk_t next,
4587 unsigned char blksize_bits;
4589 blksize_bits = inode->
i_sb->s_blocksize_bits;
4613 struct buffer_head *bh =
NULL;
4614 struct buffer_head *head =
NULL;
4615 unsigned int nr_pages =
PAGE_SIZE /
sizeof(
struct page *);
4632 for (index = 0; index <
ret; index++)
4642 end = ((
__u64)pages[index]->index << PAGE_SHIFT) >>
4644 if (!page_has_buffers(pages[index]))
4646 head = page_buffers(pages[index]);
4660 if (buffer_mapped(bh) &&
4662 start_index = index - 1;
4664 goto found_mapped_buffer;
4667 bh = bh->b_this_page;
4669 }
while (bh != head);
4684 if (ret > 0 && pages[0]->index == last_offset)
4685 head = page_buffers(pages[0]);
4691 found_mapped_buffer:
4692 if (bh !=
NULL && buffer_delay(bh)) {
4694 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4701 logical = (
__u64)end << blksize_bits;
4705 if (!buffer_delay(bh))
4706 goto found_delayed_extent;
4707 bh = bh->b_this_page;
4709 }
while (bh != head);
4711 for (; index <
ret; index++) {
4712 if (!page_has_buffers(pages[index])) {
4716 head = page_buffers(pages[index]);
4722 if (pages[index]->index !=
4723 pages[start_index]->index + index
4731 if (!buffer_delay(bh))
4733 goto found_delayed_extent;
4734 bh = bh->b_this_page;
4736 }
while (bh != head);
4738 }
else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4742 found_delayed_extent:
4745 if (ret == nr_pages && bh !=
NULL &&
4749 for (index = 0; index <
ret; index++)
4754 for (index = 0; index <
ret; index++)
4762 if (ex && ext4_ext_is_uninitialized(ex))
4777 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4779 static int ext4_xattr_fiemap(
struct inode *inode,
4785 int blockbits = inode->
i_sb->s_blocksize_bits;
4789 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4790 struct ext4_iloc iloc;
4796 physical = iloc.bh->b_blocknr << blockbits;
4798 EXT4_I(inode)->i_extra_isize;
4804 physical =
EXT4_I(inode)->i_file_acl << blockbits;
4805 length = inode->
i_sb->s_blocksize;
4811 return (error < 0 ? error : 0);
4828 struct inode *inode = file->
f_path.dentry->d_inode;
4833 loff_t first_page, last_page, page_len;
4834 loff_t first_page_offset, last_page_offset;
4835 int credits, err = 0;
4843 offset, offset + length - 1);
4861 if (offset >= inode->
i_size)
4868 if (offset + length > inode->
i_size) {
4881 if (last_page_offset > first_page_offset) {
4883 last_page_offset - 1);
4887 ext4_inode_block_unlocked_dio(inode);
4894 handle = ext4_journal_start(inode, credits);
4895 if (IS_ERR(handle)) {
4896 err = PTR_ERR(handle);
4907 if (first_page > last_page) {
4913 mapping, offset, length, 0);
4922 page_len = first_page_offset -
offset;
4925 offset, page_len, 0);
4934 page_len = offset + length - last_page_offset;
4937 last_page_offset, page_len, 0);
4947 if (inode->
i_size >> PAGE_CACHE_SHIFT == last_page &&
4955 mapping, inode->
i_size, page_len, 0);
4967 if (first_block >= stop_block)
4971 ext4_ext_invalidate_cache(inode);
4974 err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4976 ext4_ext_invalidate_cache(inode);
4980 ext4_handle_sync(handle);
4989 ext4_inode_resume_unlocked_dio(inode);
5009 error = ext4_xattr_fiemap(inode, fieinfo);
5014 start_blk = start >> inode->
i_sb->s_blocksize_bits;
5015 last_blk = (start + len - 1) >> inode->
i_sb->s_blocksize_bits;
5018 len_blks = ((
ext4_lblk_t) last_blk) - start_blk + 1;
5024 error = ext4_ext_walk_space(inode, start_blk, len_blks,
5025 ext4_ext_fiemap_cb, fieinfo);