22 #include <linux/time.h>
27 #include <linux/string.h>
33 #include <linux/uio.h>
36 #include <linux/kernel.h>
38 #include <linux/slab.h>
48 #define MPAGE_DA_EXTENT_TAIL 0x01
58 csum_lo = raw->i_checksum_lo;
59 raw->i_checksum_lo = 0;
69 raw->i_checksum_lo = csum_lo;
80 __u32 provided, calculated;
89 calculated = ext4_inode_csum(inode, raw, ei);
96 return provided == calculated;
99 static void ext4_inode_csum_set(
struct inode *inode,
struct ext4_inode *raw,
110 csum = ext4_inode_csum(inode, raw, ei);
117 static inline int ext4_begin_ordered_truncate(
struct inode *inode,
120 trace_ext4_begin_ordered_truncate(inode, new_size);
127 if (!
EXT4_I(inode)->jinode)
134 static void ext4_invalidatepage(
struct page *
page,
unsigned long offset);
135 static int noalloc_get_block_write(
struct inode *inode,
sector_t iblock,
136 struct buffer_head *bh_result,
int create);
137 static int ext4_set_bh_endio(
struct buffer_head *bh,
struct inode *inode);
138 static void ext4_end_io_buffer_write(
struct buffer_head *bh,
int uptodate);
139 static int __ext4_journalled_writepage(
struct page *
page,
unsigned int len);
140 static int ext4_bh_delay_or_unwritten(handle_t *
handle,
struct buffer_head *bh);
141 static int ext4_discard_partial_page_buffers_no_lock(handle_t *
handle,
148 static int ext4_inode_is_fast_symlink(
struct inode *inode)
150 int ea_blocks =
EXT4_I(inode)->i_file_acl ?
151 (inode->
i_sb->s_blocksize >> 9) : 0;
173 jbd_debug(2,
"restarting handle %p\n", handle);
175 ret = ext4_journal_restart(handle, nblocks);
190 trace_ext4_evict_inode(inode);
213 if (ext4_should_journal_data(inode) &&
215 journal_t *journal =
EXT4_SB(inode->
i_sb)->s_journal;
229 if (ext4_should_order_data(inode))
230 ext4_begin_ordered_truncate(inode, 0);
240 sb_start_intwrite(inode->
i_sb);
241 handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
242 if (IS_ERR(handle)) {
243 ext4_std_error(inode->
i_sb, PTR_ERR(handle));
250 sb_end_intwrite(inode->
i_sb);
255 ext4_handle_sync(handle);
259 ext4_warning(inode->
i_sb,
260 "couldn't mark inode dirty (err %d)", err);
272 if (!ext4_handle_has_enough_credits(handle, 3)) {
273 err = ext4_journal_extend(handle, 3);
275 err = ext4_journal_restart(handle, 3);
277 ext4_warning(inode->
i_sb,
278 "couldn't extend journal (err %d)", err);
282 sb_end_intwrite(inode->
i_sb);
311 sb_end_intwrite(inode->
i_sb);
318 qsize_t *ext4_get_reserved_space(
struct inode *inode)
320 return &
EXT4_I(inode)->i_reserved_quota;
328 static int ext4_calc_metadata_amount(
struct inode *inode,
ext4_lblk_t lblock)
341 int used,
int quota_claim)
347 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
350 "with only %d reserved data blocks",
351 __func__, inode->
i_ino, used,
359 "with only %d reserved metadata blocks\n", __func__,
369 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
379 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
388 dquot_claim_block(inode,
EXT4_C2B(sbi, used));
395 dquot_release_reservation_block(inode,
EXT4_C2B(sbi, used));
408 static int __check_block_validity(
struct inode *inode,
const char *
func,
415 "lblock %lu mapped to illegal pblock "
416 "(length %d)", (
unsigned long) map->
m_lblk,
423 #define check_block_validity(inode, map) \
424 __check_block_validity((inode), __func__, __LINE__, (map))
431 unsigned int max_pages)
437 int i, nr_pages,
done = 0;
441 pagevec_init(&pvec, 0);
449 for (i = 0; i < nr_pages; i++) {
451 struct buffer_head *bh, *
head;
456 PageWriteback(page) ||
457 page->
index != idx) {
462 if (page_has_buffers(page)) {
463 bh = head = page_buffers(page);
465 if (!buffer_delay(bh) &&
466 !buffer_unwritten(bh))
468 bh = bh->b_this_page;
469 }
while (!done && (bh != head));
476 if (num >= max_pages) {
481 pagevec_release(&pvec);
489 static void set_buffers_da_mapped(
struct inode *inode,
501 pagevec_init(&pvec, 0);
502 while (index <= end) {
508 for (i = 0; i < nr_pages; i++) {
510 struct buffer_head *bh, *
head;
516 if (page_has_buffers(page)) {
517 bh = head = page_buffers(page);
519 set_buffer_da_mapped(bh);
520 bh = bh->b_this_page;
521 }
while (bh != head);
525 pagevec_release(&pvec);
557 ext_debug(
"ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
558 "logical block %lu\n", inode->
i_ino, flags, map->
m_len,
559 (
unsigned long) map->
m_lblk);
573 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
623 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
639 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
649 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
652 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
653 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
660 set_buffers_da_mapped(inode, map);
673 #define DIO_MAX_BLOCKS 4096
675 static int _ext4_get_block(
struct inode *inode,
sector_t iblock,
676 struct buffer_head *bh,
int flags)
678 handle_t *
handle = ext4_journal_current_handle();
686 if (flags && !handle) {
691 handle = ext4_journal_start(inode, dio_credits);
692 if (IS_ERR(handle)) {
693 ret = PTR_ERR(handle);
703 bh->b_size = inode->
i_sb->s_blocksize * map.
m_len;
712 struct buffer_head *bh,
int create)
714 return _ext4_get_block(inode, iblock, bh,
725 struct buffer_head *bh;
728 J_ASSERT(handle !=
NULL || create == 0);
749 J_ASSERT(create != 0);
750 J_ASSERT(handle !=
NULL);
760 BUFFER_TRACE(bh,
"call get_create_access");
762 if (!fatal && !buffer_uptodate(bh)) {
763 memset(bh->b_data, 0, inode->
i_sb->s_blocksize);
764 set_buffer_uptodate(bh);
767 BUFFER_TRACE(bh,
"call ext4_handle_dirty_metadata");
772 BUFFER_TRACE(bh,
"not a new buffer");
785 struct buffer_head *bh;
787 bh =
ext4_getblk(handle, inode, block, create, err);
790 if (buffer_uptodate(bh))
794 if (buffer_uptodate(bh))
801 static int walk_page_buffers(handle_t *
handle,
802 struct buffer_head *head,
806 int (*
fn)(handle_t *handle,
807 struct buffer_head *bh))
809 struct buffer_head *bh;
810 unsigned block_start, block_end;
811 unsigned blocksize = head->b_size;
813 struct buffer_head *
next;
815 for (bh = head, block_start = 0;
816 ret == 0 && (bh != head || !block_start);
817 block_start = block_end, bh =
next) {
818 next = bh->b_this_page;
819 block_end = block_start + blocksize;
820 if (block_end <= from || block_start >= to) {
821 if (partial && !buffer_uptodate(bh))
857 static int do_journal_get_write_access(handle_t *handle,
858 struct buffer_head *bh)
860 int dirty = buffer_dirty(bh);
863 if (!buffer_mapped(bh) || buffer_freed(bh))
874 clear_buffer_dirty(bh);
881 static int ext4_get_block_write(
struct inode *inode,
sector_t iblock,
882 struct buffer_head *bh_result,
int create);
884 loff_t
pos,
unsigned len,
unsigned flags,
885 struct page **pagep,
void **fsdata)
887 struct inode *inode = mapping->
host;
888 int ret, needed_blocks;
895 trace_ext4_write_begin(inode, pos, len, flags);
906 handle = ext4_journal_start(inode, needed_blocks);
907 if (IS_ERR(handle)) {
908 ret = PTR_ERR(handle);
924 if (ext4_should_dioread_nolock(inode))
929 if (!ret && ext4_should_journal_data(inode)) {
930 ret = walk_page_buffers(handle, page_buffers(page),
931 from, to,
NULL, do_journal_get_write_access);
949 if (pos + len > inode->
i_size) {
950 ext4_truncate_failed_write(inode);
969 static int write_end_fn(handle_t *handle,
struct buffer_head *bh)
971 if (!buffer_mapped(bh) || buffer_freed(bh))
973 set_buffer_uptodate(bh);
977 static int ext4_generic_write_end(
struct file *file,
979 loff_t pos,
unsigned len,
unsigned copied,
980 struct page *page,
void *fsdata)
982 int i_size_changed = 0;
983 struct inode *inode = mapping->
host;
984 handle_t *handle = ext4_journal_current_handle();
986 copied =
block_write_end(file, mapping, pos, len, copied, page, fsdata);
995 if (pos + copied > inode->
i_size) {
996 i_size_write(inode, pos + copied);
1000 if (pos + copied >
EXT4_I(inode)->i_disksize) {
1005 ext4_update_i_disksize(inode, (pos + copied));
1030 static int ext4_ordered_write_end(
struct file *file,
1032 loff_t pos,
unsigned len,
unsigned copied,
1033 struct page *page,
void *fsdata)
1035 handle_t *handle = ext4_journal_current_handle();
1036 struct inode *inode = mapping->
host;
1039 trace_ext4_ordered_write_end(inode, pos, len, copied);
1040 ret = ext4_jbd2_file_inode(handle, inode);
1043 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1063 if (pos + len > inode->
i_size) {
1064 ext4_truncate_failed_write(inode);
1075 return ret ? ret : copied;
1078 static int ext4_writeback_write_end(
struct file *file,
1080 loff_t pos,
unsigned len,
unsigned copied,
1081 struct page *page,
void *fsdata)
1083 handle_t *handle = ext4_journal_current_handle();
1084 struct inode *inode = mapping->
host;
1087 trace_ext4_writeback_write_end(inode, pos, len, copied);
1088 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1105 if (pos + len > inode->
i_size) {
1106 ext4_truncate_failed_write(inode);
1116 return ret ? ret : copied;
1119 static int ext4_journalled_write_end(
struct file *file,
1121 loff_t pos,
unsigned len,
unsigned copied,
1122 struct page *page,
void *fsdata)
1124 handle_t *handle = ext4_journal_current_handle();
1125 struct inode *inode = mapping->
host;
1131 trace_ext4_journalled_write_end(inode, pos, len, copied);
1135 BUG_ON(!ext4_handle_valid(handle));
1138 if (!PageUptodate(page))
1143 ret = walk_page_buffers(handle, page_buffers(page), from,
1144 to, &partial, write_end_fn);
1146 SetPageUptodate(page);
1147 new_i_size = pos + copied;
1148 if (new_i_size > inode->
i_size)
1149 i_size_write(inode, pos+copied);
1150 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1151 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1152 if (new_i_size >
EXT4_I(inode)->i_disksize) {
1153 ext4_update_i_disksize(inode, new_i_size);
1171 if (pos + len > inode->
i_size) {
1172 ext4_truncate_failed_write(inode);
1182 return ret ? ret : copied;
1188 static int ext4_da_reserve_space(
struct inode *inode,
ext4_lblk_t lblock)
1193 unsigned int md_needed;
1203 ret = dquot_reserve_block(inode,
EXT4_C2B(sbi, 1));
1221 ext4_calc_metadata_amount(inode, lblock));
1222 trace_ext4_da_reserve_space(inode, md_needed);
1236 dquot_release_reservation_block(inode,
EXT4_C2B(sbi, 1));
1246 static void ext4_da_release_space(
struct inode *inode,
int to_free)
1256 trace_ext4_da_release_space(inode, to_free);
1265 "ino %lu, to_free %d with only %d reserved "
1266 "data blocks", inode->
i_ino, to_free,
1281 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1288 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1292 dquot_release_reservation_block(inode,
EXT4_C2B(sbi, to_free));
1295 static void ext4_da_page_release_reservation(
struct page *page,
1299 struct buffer_head *
head, *bh;
1300 unsigned int curr_off = 0;
1301 struct inode *inode = page->
mapping->host;
1305 head = page_buffers(page);
1308 unsigned int next_off = curr_off + bh->b_size;
1310 if ((offset <= curr_off) && (buffer_delay(bh))) {
1312 clear_buffer_delay(bh);
1313 clear_buffer_da_mapped(bh);
1315 curr_off = next_off;
1316 }
while ((bh = bh->b_this_page) != head);
1321 while (num_clusters > 0) {
1324 ((num_clusters - 1) << sbi->s_cluster_bits);
1325 if (sbi->s_cluster_ratio == 1 ||
1327 ext4_da_release_space(inode, 1);
1355 int ret = 0,
err, nr_pages,
i;
1356 struct inode *inode = mpd->
inode;
1358 loff_t
size = i_size_read(inode);
1359 unsigned int len, block_start;
1360 struct buffer_head *bh, *page_bufs =
NULL;
1361 int journal_data = ext4_should_journal_data(inode);
1362 sector_t pblock = 0, cur_logical = 0;
1366 memset(&io_submit, 0,
sizeof(io_submit));
1376 pagevec_init(&pvec, 0);
1377 while (index <= end) {
1381 for (i = 0; i < nr_pages; i++) {
1382 int commit_write = 0, skip_page = 0;
1383 struct page *page = pvec.
pages[
i];
1385 index = page->
index;
1389 if (index == size >> PAGE_CACHE_SHIFT)
1394 cur_logical = index << (PAGE_CACHE_SHIFT -
1396 pblock = map->
m_pblk + (cur_logical -
1401 BUG_ON(!PageLocked(page));
1402 BUG_ON(PageWriteback(page));
1410 if (!page_has_buffers(page)) {
1412 noalloc_get_block_write)) {
1420 bh = page_bufs = page_buffers(page);
1425 if (map && (cur_logical >= map->
m_lblk) &&
1426 (cur_logical <= (map->
m_lblk +
1427 (map->
m_len - 1)))) {
1428 if (buffer_delay(bh)) {
1429 clear_buffer_delay(bh);
1430 bh->b_blocknr = pblock;
1432 if (buffer_da_mapped(bh))
1433 clear_buffer_da_mapped(bh);
1434 if (buffer_unwritten(bh) ||
1436 BUG_ON(bh->b_blocknr != pblock);
1438 set_buffer_uninit(bh);
1439 clear_buffer_unwritten(bh);
1446 if (ext4_bh_delay_or_unwritten(
NULL, bh))
1448 bh = bh->b_this_page;
1449 block_start += bh->b_size;
1452 }
while (bh != page_bufs);
1467 if (
unlikely(journal_data && PageChecked(page)))
1468 err = __ext4_journalled_writepage(page, len);
1472 else if (buffer_uninit(page_bufs)) {
1473 ext4_set_bh_endio(page_bufs, inode);
1475 noalloc_get_block_write,
1476 mpd->
wbc, ext4_end_io_buffer_write);
1479 noalloc_get_block_write, mpd->
wbc);
1490 pagevec_release(&pvec);
1496 static void ext4_da_block_invalidatepages(
struct mpage_da_data *mpd)
1501 struct inode *inode = mpd->
inode;
1506 while (index <= end) {
1510 for (i = 0; i < nr_pages; i++) {
1511 struct page *page = pvec.
pages[
i];
1512 if (page->
index > end)
1514 BUG_ON(!PageLocked(page));
1515 BUG_ON(PageWriteback(page));
1517 ClearPageUptodate(page);
1520 index = pvec.pages[nr_pages - 1]->index + 1;
1521 pagevec_release(&pvec);
1526 static void ext4_print_free_blocks(
struct inode *inode)
1537 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1540 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1543 EXT4_I(inode)->i_reserved_data_blocks);
1545 EXT4_I(inode)->i_reserved_meta_blocks);
1558 static void mpage_da_map_and_submit(
struct mpage_da_data *mpd)
1560 int err, blks, get_blocks_flags;
1563 unsigned max_blocks = mpd->
b_size >> mpd->
inode->i_blkbits;
1565 handle_t *handle =
NULL;
1571 if ((mpd->
b_size == 0) ||
1572 ((mpd->
b_state & (1 << BH_Mapped)) &&
1573 !(mpd->
b_state & (1 << BH_Delay)) &&
1574 !(mpd->
b_state & (1 << BH_Unwritten))))
1577 handle = ext4_journal_current_handle();
1599 map.
m_len = max_blocks;
1601 if (ext4_should_dioread_nolock(mpd->
inode))
1603 if (mpd->
b_state & (1 << BH_Delay))
1631 if (!(
EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1633 "delayed block allocation failed for inode %lu "
1634 "at logical offset %llu with max blocks %zd "
1635 "with error %d", mpd->
inode->i_ino,
1636 (
unsigned long long) next,
1639 "This should not happen!! Data will be lost\n");
1641 ext4_print_free_blocks(mpd->
inode);
1644 ext4_da_block_invalidatepages(mpd);
1657 for (i = 0; i < map.
m_len; i++)
1660 if (ext4_should_order_data(mpd->
inode)) {
1661 err = ext4_jbd2_file_inode(handle, mpd->
inode);
1673 disksize = ((loff_t) next + blks) << mpd->
inode->i_blkbits;
1674 if (disksize > i_size_read(mpd->
inode))
1675 disksize = i_size_read(mpd->
inode);
1677 ext4_update_i_disksize(mpd->
inode, disksize);
1680 ext4_error(mpd->
inode->i_sb,
1681 "Failed to mark inode %lu dirty",
1686 mpage_da_submit_io(mpd, mapp);
1690 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1691 (1 << BH_Delay) | (1 << BH_Unwritten))
1702 static void mpage_add_bh_to_extent(
struct mpage_da_data *mpd,
1704 unsigned long b_state)
1707 int nrblocks = mpd->
b_size >> mpd->
inode->i_blkbits;
1715 if (nrblocks >= 8*1024*1024/mpd->
inode->i_sb->s_blocksize)
1728 }
else if ((nrblocks + (b_size >> mpd->
inode->i_blkbits)) >
1736 mpd->
inode->i_blkbits;
1764 mpage_da_map_and_submit(mpd);
1768 static int ext4_bh_delay_or_unwritten(handle_t *handle,
struct buffer_head *bh)
1770 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1779 static int ext4_da_map_blocks(
struct inode *inode,
sector_t iblock,
1781 struct buffer_head *bh)
1786 if (invalid_block < ext4_blocks_count(
EXT4_SB(inode->
i_sb)->s_es))
1790 ext_debug(
"ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1791 "logical block %lu\n", inode->
i_ino, map->
m_len,
1792 (
unsigned long) map->
m_lblk);
1811 retval = ext4_da_reserve_space(inode, iblock);
1822 map_bh(bh, inode->
i_sb, invalid_block);
1824 set_buffer_delay(bh);
1845 static int ext4_da_get_block_prep(
struct inode *inode,
sector_t iblock,
1846 struct buffer_head *bh,
int create)
1852 BUG_ON(bh->b_size != inode->
i_sb->s_blocksize);
1862 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1869 if (buffer_unwritten(bh)) {
1877 set_buffer_mapped(bh);
1896 static int noalloc_get_block_write(
struct inode *inode,
sector_t iblock,
1897 struct buffer_head *bh_result,
int create)
1899 BUG_ON(bh_result->b_size != inode->
i_sb->s_blocksize);
1900 return _ext4_get_block(inode, iblock, bh_result, 0);
1903 static int bget_one(handle_t *handle,
struct buffer_head *bh)
1909 static int bput_one(handle_t *handle,
struct buffer_head *bh)
1915 static int __ext4_journalled_writepage(
struct page *page,
1919 struct inode *inode = mapping->
host;
1920 struct buffer_head *page_bufs;
1921 handle_t *handle =
NULL;
1925 ClearPageChecked(page);
1926 page_bufs = page_buffers(page);
1928 walk_page_buffers(handle, page_bufs, 0, len,
NULL, bget_one);
1934 if (IS_ERR(handle)) {
1935 ret = PTR_ERR(handle);
1939 BUG_ON(!ext4_handle_valid(handle));
1941 ret = walk_page_buffers(handle, page_bufs, 0, len,
NULL,
1942 do_journal_get_write_access);
1944 err = walk_page_buffers(handle, page_bufs, 0, len,
NULL,
1948 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1953 walk_page_buffers(handle, page_bufs, 0, len,
NULL, bput_one);
1954 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
2000 static int ext4_writepage(
struct page *page,
2003 int ret = 0, commit_write = 0;
2006 struct buffer_head *page_bufs =
NULL;
2007 struct inode *inode = page->
mapping->host;
2009 trace_ext4_writepage(page);
2010 size = i_size_read(inode);
2011 if (page->
index == size >> PAGE_CACHE_SHIFT)
2021 if (!page_has_buffers(page)) {
2023 noalloc_get_block_write)) {
2031 page_bufs = page_buffers(page);
2032 if (walk_page_buffers(
NULL, page_bufs, 0, len,
NULL,
2033 ext4_bh_delay_or_unwritten)) {
2050 if (PageChecked(page) && ext4_should_journal_data(inode))
2055 return __ext4_journalled_writepage(page, len);
2057 if (buffer_uninit(page_bufs)) {
2058 ext4_set_bh_endio(page_bufs, inode);
2060 wbc, ext4_end_io_buffer_write);
2076 static int ext4_da_writepages_trans_blocks(
struct inode *inode)
2078 int max_blocks =
EXT4_I(inode)->i_reserved_data_blocks;
2099 static int write_cache_pages_da(
struct address_space *mapping,
2104 struct buffer_head *bh, *
head;
2105 struct inode *inode = mapping->
host;
2107 unsigned int nr_pages;
2111 int i,
tag, ret = 0;
2116 pagevec_init(&pvec, 0);
2125 *done_index =
index;
2126 while (index <= end) {
2132 for (i = 0; i < nr_pages; i++) {
2133 struct page *page = pvec.
pages[
i];
2142 if (page->
index > end)
2145 *done_index = page->
index + 1;
2153 mpage_da_map_and_submit(mpd);
2154 goto ret_extent_tail;
2167 if (!PageDirty(page) ||
2168 (PageWriteback(page) &&
2175 wait_on_page_writeback(page);
2176 BUG_ON(PageWriteback(page));
2184 if (!page_has_buffers(page)) {
2185 mpage_add_bh_to_extent(mpd, logical,
2187 (1 << BH_Dirty) | (1 << BH_Uptodate));
2189 goto ret_extent_tail;
2195 head = page_buffers(page);
2198 BUG_ON(buffer_locked(bh));
2205 if (ext4_bh_delay_or_unwritten(
NULL, bh)) {
2206 mpage_add_bh_to_extent(mpd, logical,
2210 goto ret_extent_tail;
2211 }
else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2227 }
while ((bh = bh->b_this_page) != head);
2230 if (nr_to_write > 0) {
2232 if (nr_to_write == 0 &&
2247 pagevec_release(&pvec);
2254 pagevec_release(&pvec);
2260 static int ext4_da_writepages(
struct address_space *mapping,
2264 int range_whole = 0;
2265 handle_t *handle =
NULL;
2267 struct inode *inode = mapping->
host;
2268 int pages_written = 0;
2269 unsigned int max_pages;
2270 int range_cyclic, cycled = 1, io_done = 0;
2271 int needed_blocks, ret = 0;
2272 long desired_nr_to_write, nr_to_writebump = 0;
2274 struct ext4_sb_info *sbi =
EXT4_SB(mapping->
host->i_sb);
2279 trace_ext4_da_writepages(inode, wbc);
2299 if (
unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2336 if (!range_cyclic && range_whole) {
2342 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2344 if (desired_nr_to_write > max_pages)
2345 desired_nr_to_write = max_pages;
2348 nr_to_writebump = desired_nr_to_write - wbc->
nr_to_write;
2365 BUG_ON(ext4_should_journal_data(inode));
2366 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2369 handle = ext4_journal_start(inode, needed_blocks);
2370 if (IS_ERR(handle)) {
2371 ret = PTR_ERR(handle);
2373 "%ld pages, ino %lu; err %d", __func__,
2376 goto out_writepages;
2384 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2391 mpage_da_map_and_submit(&mpd);
2394 trace_ext4_da_write_pages(inode, &mpd);
2424 if (!io_done && !cycled) {
2444 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2448 #define FALL_BACK_TO_NONDELALLOC 1
2449 static int ext4_nonda_switch(
struct super_block *sb)
2451 s64 free_blocks, dirty_blocks;
2452 struct ext4_sb_info *sbi =
EXT4_SB(sb);
2463 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2464 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2468 if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
2475 if (2 * free_blocks < 3 * dirty_blocks ||
2476 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2486 static int ext4_da_write_begin(
struct file *file,
struct address_space *mapping,
2487 loff_t pos,
unsigned len,
unsigned flags,
2488 struct page **pagep,
void **fsdata)
2490 int ret, retries = 0;
2493 struct inode *inode = mapping->
host;
2498 if (ext4_nonda_switch(inode->
i_sb)) {
2500 return ext4_write_begin(file, mapping, pos,
2501 len, flags, pagep, fsdata);
2503 *fsdata = (
void *)0;
2504 trace_ext4_da_write_begin(inode, pos, len, flags);
2512 handle = ext4_journal_start(inode, 1);
2513 if (IS_ERR(handle)) {
2514 ret = PTR_ERR(handle);
2539 if (pos + len > inode->
i_size)
2540 ext4_truncate_failed_write(inode);
2553 static int ext4_da_should_update_i_disksize(
struct page *page,
2554 unsigned long offset)
2556 struct buffer_head *bh;
2557 struct inode *inode = page->
mapping->host;
2561 bh = page_buffers(page);
2564 for (i = 0; i <
idx; i++)
2565 bh = bh->b_this_page;
2567 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2572 static int ext4_da_write_end(
struct file *file,
2574 loff_t pos,
unsigned len,
unsigned copied,
2575 struct page *page,
void *fsdata)
2577 struct inode *inode = mapping->
host;
2579 handle_t *handle = ext4_journal_current_handle();
2582 int write_mode = (
int)(
unsigned long)fsdata;
2585 switch (ext4_inode_journal_mode(inode)) {
2587 return ext4_ordered_write_end(file, mapping, pos,
2588 len, copied, page, fsdata);
2590 return ext4_writeback_write_end(file, mapping, pos,
2591 len, copied, page, fsdata);
2597 trace_ext4_da_write_end(inode, pos, len, copied);
2599 end = start + copied - 1;
2607 new_i_size = pos + copied;
2608 if (copied && new_i_size >
EXT4_I(inode)->i_disksize) {
2609 if (ext4_da_should_update_i_disksize(page, end)) {
2611 if (new_i_size >
EXT4_I(inode)->i_disksize) {
2616 if (ext4_should_order_data(inode))
2617 ret = ext4_jbd2_file_inode(handle,
2620 EXT4_I(inode)->i_disksize = new_i_size;
2639 return ret ? ret : copied;
2642 static void ext4_da_invalidatepage(
struct page *page,
unsigned long offset)
2647 BUG_ON(!PageLocked(page));
2648 if (!page_has_buffers(page))
2651 ext4_da_page_release_reservation(page, offset);
2654 ext4_invalidatepage(page, offset);
2664 trace_ext4_alloc_da_blocks(inode);
2666 if (!
EXT4_I(inode)->i_reserved_data_blocks &&
2667 !
EXT4_I(inode)->i_reserved_meta_blocks)
2720 struct inode *inode = mapping->
host;
2735 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2754 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2767 static int ext4_readpage(
struct file *file,
struct page *page)
2769 trace_ext4_readpage(page);
2774 ext4_readpages(
struct file *file,
struct address_space *mapping,
2780 static void ext4_invalidatepage_free_endio(
struct page *page,
unsigned long offset)
2782 struct buffer_head *
head, *bh;
2783 unsigned int curr_off = 0;
2785 if (!page_has_buffers(page))
2787 head = bh = page_buffers(page);
2789 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2792 bh->b_private =
NULL;
2793 bh->b_end_io =
NULL;
2795 curr_off = curr_off + bh->b_size;
2796 bh = bh->b_this_page;
2797 }
while (bh != head);
2800 static void ext4_invalidatepage(
struct page *page,
unsigned long offset)
2804 trace_ext4_invalidatepage(page, offset);
2809 if (ext4_should_dioread_nolock(page->
mapping->host))
2810 ext4_invalidatepage_free_endio(page, offset);
2815 ClearPageChecked(page);
2823 static int ext4_releasepage(
struct page *page,
gfp_t wait)
2827 trace_ext4_releasepage(page);
2830 if (!page_has_buffers(page))
2843 static int ext4_get_block_write(
struct inode *inode,
sector_t iblock,
2844 struct buffer_head *bh_result,
int create)
2846 ext4_debug(
"ext4_get_block_write: inode %lu, create flag %d\n",
2847 inode->
i_ino, create);
2848 return _ext4_get_block(inode, iblock, bh_result,
2852 static int ext4_get_block_write_nolock(
struct inode *inode,
sector_t iblock,
2853 struct buffer_head *bh_result,
int flags)
2855 handle_t *handle = ext4_journal_current_handle();
2859 ext4_debug(
"ext4_get_block_write_nolock: inode %lu, flag %d\n",
2860 inode->
i_ino, flags);
2872 bh_result->b_size = inode->
i_sb->s_blocksize * map.
m_len;
2878 static void ext4_end_io_dio(
struct kiocb *
iocb, loff_t offset,
2879 ssize_t size,
void *
private,
int ret,
2882 struct inode *inode = iocb->
ki_filp->f_path.dentry->d_inode;
2886 if (!io_end || !size)
2889 ext_debug(
"ext4_end_io_dio(): io_end 0x%p "
2890 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2909 io_end->
iocb = iocb;
2916 static void ext4_end_io_buffer_write(
struct buffer_head *bh,
int uptodate)
2919 struct inode *
inode;
2921 if (!test_clear_buffer_uninit(bh) || !io_end)
2926 "sb umounted, discard end_io request for inode %lu",
2927 io_end->
inode->i_ino);
2936 inode = io_end->
inode;
2937 ext4_set_io_unwritten_flag(inode, io_end);
2940 bh->b_private =
NULL;
2941 bh->b_end_io =
NULL;
2942 clear_buffer_uninit(bh);
2946 static int ext4_set_bh_endio(
struct buffer_head *bh,
struct inode *inode)
2949 struct page *page = bh->b_page;
2951 size_t size = bh->b_size;
2968 get_page(io_end->
page);
2970 bh->b_private = io_end;
2971 bh->b_end_io = ext4_end_io_buffer_write;
2995 const struct iovec *iov, loff_t offset,
2996 unsigned long nr_segs)
2998 struct file *file = iocb->
ki_filp;
2999 struct inode *inode = file->
f_mapping->host;
3001 size_t count = iov_length(iov, nr_segs);
3003 loff_t final_size = offset +
count;
3010 overwrite = *((
int *)iocb->
private);
3039 ext4_inode_aio_set(inode,
NULL);
3040 if (!is_sync_kiocb(iocb)) {
3056 ext4_inode_aio_set(inode, io_end);
3061 inode->
i_sb->s_bdev, iov,
3063 ext4_get_block_write_nolock,
3069 inode->
i_sb->s_bdev, iov,
3071 ext4_get_block_write,
3076 ext4_inode_aio_set(inode,
NULL);
3091 if (ret != -
EIOCBQUEUED && ret <= 0 && iocb->
private) {
3094 }
else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3095 EXT4_STATE_DIO_UNWRITTEN)) {
3105 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3123 static ssize_t ext4_direct_IO(
int rw,
struct kiocb *iocb,
3124 const struct iovec *iov, loff_t offset,
3125 unsigned long nr_segs)
3127 struct file *file = iocb->
ki_filp;
3128 struct inode *inode = file->
f_mapping->host;
3134 if (ext4_should_journal_data(inode))
3137 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3139 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3142 trace_ext4_direct_IO_exit(inode, offset,
3143 iov_length(iov, nr_segs), rw, ret);
3160 static int ext4_journalled_set_page_dirty(
struct page *page)
3162 SetPageChecked(page);
3167 .readpage = ext4_readpage,
3168 .readpages = ext4_readpages,
3169 .writepage = ext4_writepage,
3170 .write_begin = ext4_write_begin,
3171 .write_end = ext4_ordered_write_end,
3173 .invalidatepage = ext4_invalidatepage,
3174 .releasepage = ext4_releasepage,
3175 .direct_IO = ext4_direct_IO,
3182 .readpage = ext4_readpage,
3183 .readpages = ext4_readpages,
3184 .writepage = ext4_writepage,
3185 .write_begin = ext4_write_begin,
3186 .write_end = ext4_writeback_write_end,
3188 .invalidatepage = ext4_invalidatepage,
3189 .releasepage = ext4_releasepage,
3190 .direct_IO = ext4_direct_IO,
3197 .readpage = ext4_readpage,
3198 .readpages = ext4_readpages,
3199 .writepage = ext4_writepage,
3200 .write_begin = ext4_write_begin,
3201 .write_end = ext4_journalled_write_end,
3202 .set_page_dirty = ext4_journalled_set_page_dirty,
3204 .invalidatepage = ext4_invalidatepage,
3205 .releasepage = ext4_releasepage,
3206 .direct_IO = ext4_direct_IO,
3212 .readpage = ext4_readpage,
3213 .readpages = ext4_readpages,
3214 .writepage = ext4_writepage,
3215 .writepages = ext4_da_writepages,
3216 .write_begin = ext4_da_write_begin,
3217 .write_end = ext4_da_write_end,
3219 .invalidatepage = ext4_da_invalidatepage,
3220 .releasepage = ext4_releasepage,
3221 .direct_IO = ext4_direct_IO,
3229 switch (ext4_inode_journal_mode(inode)) {
3232 inode->
i_mapping->a_ops = &ext4_da_aops;
3234 inode->
i_mapping->a_ops = &ext4_ordered_aops;
3238 inode->
i_mapping->a_ops = &ext4_da_aops;
3240 inode->
i_mapping->a_ops = &ext4_writeback_aops;
3243 inode->
i_mapping->a_ops = &ext4_journalled_aops;
3261 loff_t
length,
int flags)
3263 struct inode *inode = mapping->
host;
3268 mapping_gfp_mask(mapping) & ~
__GFP_FS);
3272 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3273 from, length, flags);
3313 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3314 struct inode *inode,
struct page *page, loff_t from,
3315 loff_t
length,
int flags)
3319 unsigned int blocksize,
max,
pos;
3321 struct buffer_head *bh;
3324 blocksize = inode->
i_sb->s_blocksize;
3327 if (index != page->
index)
3334 if (length > max || length < 0)
3337 iblock = index << (PAGE_CACHE_SHIFT - inode->
i_sb->s_blocksize_bits);
3339 if (!page_has_buffers(page))
3343 bh = page_buffers(page);
3345 while (offset >= pos) {
3346 bh = bh->b_this_page;
3352 while (pos < offset + length) {
3353 unsigned int end_of_block, range_to_discard;
3358 range_to_discard = offset + length -
pos;
3361 end_of_block = blocksize - (pos & (blocksize-1));
3367 if (range_to_discard > end_of_block)
3368 range_to_discard = end_of_block;
3380 if (range_to_discard == blocksize) {
3381 clear_buffer_dirty(bh);
3383 clear_buffer_mapped(bh);
3384 clear_buffer_req(bh);
3385 clear_buffer_new(bh);
3386 clear_buffer_delay(bh);
3387 clear_buffer_unwritten(bh);
3388 clear_buffer_uptodate(bh);
3389 zero_user(page, pos, range_to_discard);
3390 BUFFER_TRACE(bh,
"Buffer discarded");
3401 if (!buffer_mapped(bh)) {
3406 BUFFER_TRACE(bh,
"unmapped");
3409 if (!buffer_mapped(bh)) {
3410 BUFFER_TRACE(bh,
"still unmapped");
3416 if (PageUptodate(page))
3417 set_buffer_uptodate(bh);
3419 if (!buffer_uptodate(bh)) {
3424 if (!buffer_uptodate(bh))
3428 if (ext4_should_journal_data(inode)) {
3429 BUFFER_TRACE(bh,
"get write access");
3435 zero_user(page, pos, range_to_discard);
3438 if (ext4_should_journal_data(inode)) {
3443 BUFFER_TRACE(bh,
"Partial buffer zeroed");
3445 bh = bh->b_this_page;
3447 pos += range_to_discard;
3460 return !ext4_inode_is_fast_symlink(inode);
3477 struct inode *inode = file->
f_path.dentry->d_inode;
3524 trace_ext4_truncate_enter(inode);
3532 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3539 trace_ext4_truncate_exit(inode);
3548 static int __ext4_get_inode_loc(
struct inode *inode,
3549 struct ext4_iloc *iloc,
int in_mem)
3552 struct buffer_head *bh;
3555 int inodes_per_block, inode_offset;
3558 if (!ext4_valid_inum(sb, inode->
i_ino))
3569 inodes_per_block =
EXT4_SB(sb)->s_inodes_per_block;
3570 inode_offset = ((inode->
i_ino - 1) %
3573 iloc->offset = (inode_offset % inodes_per_block) *
EXT4_INODE_SIZE(sb);
3575 bh = sb_getblk(sb, block);
3578 "unable to read itable block");
3581 if (!buffer_uptodate(bh)) {
3590 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3591 set_buffer_uptodate(bh);
3593 if (buffer_uptodate(bh)) {
3605 struct buffer_head *bitmap_bh;
3608 start = inode_offset & ~(inodes_per_block - 1);
3620 if (!buffer_uptodate(bitmap_bh)) {
3624 for (i = start; i < start + inodes_per_block; i++) {
3625 if (i == inode_offset)
3631 if (i == start + inodes_per_block) {
3633 memset(bh->b_data, 0, bh->b_size);
3634 set_buffer_uptodate(bh);
3645 if (
EXT4_SB(sb)->s_inode_readahead_blks) {
3651 b = block & ~(
EXT4_SB(sb)->s_inode_readahead_blks-1);
3654 end = b +
EXT4_SB(sb)->s_inode_readahead_blks;
3656 if (ext4_has_group_desc_csum(sb))
3658 table += num / inodes_per_block;
3662 sb_breadahead(sb, b++);
3670 trace_ext4_load_inode(inode);
3675 if (!buffer_uptodate(bh)) {
3677 "unable to read itable block");
3690 return __ext4_get_inode_loc(inode, iloc,
3691 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3696 unsigned int flags =
EXT4_I(inode)->i_flags;
3714 unsigned int vfs_fl;
3715 unsigned long old_fl, new_fl;
3750 return i_blocks << (inode->
i_blkbits - 9);
3761 struct ext4_iloc iloc;
3764 struct inode *
inode;
3765 journal_t *journal =
EXT4_SB(sb)->s_journal;
3780 ret = __ext4_get_inode_loc(inode, &iloc, 0);
3783 raw_inode = ext4_raw_inode(&iloc);
3805 csum = ext4_chksum(sbi, sbi->s_csum_seed, (
__u8 *)&inum,
3811 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3821 i_uid |=
le16_to_cpu(raw_inode->i_uid_high) << 16;
3822 i_gid |=
le16_to_cpu(raw_inode->i_gid_high) << 16;
3824 i_uid_write(inode, i_uid);
3825 i_gid_write(inode, i_gid);
3828 ext4_clear_state_flags(ei);
3837 if (inode->
i_mode == 0 ||
3849 inode->
i_blocks = ext4_inode_blocks(raw_inode, ei);
3854 inode->
i_size = ext4_isize(raw_inode);
3857 ei->i_reserved_quota = 0;
3882 if (journal->j_running_transaction)
3883 transaction = journal->j_running_transaction;
3885 transaction = journal->j_committing_transaction;
3887 tid = transaction->t_tid;
3889 tid = journal->j_commit_sequence;
3899 EXT4_GOOD_OLD_INODE_SIZE;
3905 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3931 !ext4_inode_is_fast_symlink(inode)))
3936 !ext4_inode_is_fast_symlink(inode))) {
3938 ret = ext4_ind_check_inode(inode);
3951 if (ext4_inode_is_fast_symlink(inode)) {
3981 return ERR_PTR(ret);
3984 static int ext4_inode_blocks_set(handle_t *handle,
3992 if (i_blocks <= ~0
U) {
3998 raw_inode->i_blocks_high = 0;
4005 if (i_blocks <= 0xffffffffffffULL) {
4011 raw_inode->i_blocks_high =
cpu_to_le16(i_blocks >> 32);
4016 i_blocks = i_blocks >> (inode->
i_blkbits - 9);
4018 raw_inode->i_blocks_high =
cpu_to_le16(i_blocks >> 32);
4030 static int ext4_do_update_inode(handle_t *handle,
4031 struct inode *inode,
4032 struct ext4_iloc *iloc)
4034 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4036 struct buffer_head *bh = iloc->bh;
4038 int need_datasync = 0;
4044 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4049 i_uid = i_uid_read(inode);
4050 i_gid = i_gid_read(inode);
4059 raw_inode->i_uid_high =
4061 raw_inode->i_gid_high =
4064 raw_inode->i_uid_high = 0;
4065 raw_inode->i_gid_high = 0;
4070 raw_inode->i_uid_high = 0;
4071 raw_inode->i_gid_high = 0;
4080 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4086 raw_inode->i_file_acl_high =
4089 if (ei->
i_disksize != ext4_isize(raw_inode)) {
4097 EXT4_SB(sb)->s_es->s_rev_level ==
4109 ext4_handle_sync(handle);
4115 if (old_valid_dev(inode->
i_rdev)) {
4137 ext4_inode_csum_set(inode, raw_inode, ei);
4139 BUFFER_TRACE(bh,
"call ext4_handle_dirty_metadata");
4143 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4145 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4148 ext4_std_error(inode->
i_sb, err);
4195 if (ext4_journal_current_handle()) {
4196 jbd_debug(1,
"called recursively, non-PF_MEMALLOC!\n");
4206 struct ext4_iloc iloc;
4208 err = __ext4_get_inode_loc(inode, &iloc, 0);
4213 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4215 "IO error syncing inode");
4249 struct inode *inode = dentry->
d_inode;
4252 const unsigned int ia_valid = attr->
ia_valid;
4258 if (is_quota_modification(inode, attr))
4268 if (IS_ERR(handle)) {
4269 error = PTR_ERR(handle);
4292 if (attr->
ia_size > sbi->s_bitmap_maxbytes)
4302 handle = ext4_journal_start(inode, 3);
4303 if (IS_ERR(handle)) {
4304 error = PTR_ERR(handle);
4307 if (ext4_handle_valid(handle)) {
4317 if (ext4_should_order_data(inode)) {
4318 error = ext4_begin_ordered_truncate(inode,
4322 handle = ext4_journal_start(inode, 3);
4323 if (IS_ERR(handle)) {
4336 if (attr->
ia_size != i_size_read(inode)) {
4342 ext4_inode_block_unlocked_dio(inode);
4344 ext4_inode_resume_unlocked_dio(inode);
4352 mark_inode_dirty(inode);
4366 ext4_std_error(inode->
i_sb, error);
4375 struct inode *
inode;
4376 unsigned long delalloc_blocks;
4392 EXT4_I(inode)->i_reserved_data_blocks);
4394 stat->
blocks += (delalloc_blocks << inode->
i_sb->s_blocksize_bits)>>9;
4398 static int ext4_index_trans_blocks(
struct inode *inode,
int nrblocks,
int chunk)
4416 static int ext4_meta_trans_blocks(
struct inode *inode,
int nrblocks,
int chunk)
4431 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4446 if (groups > ngroups)
4452 ret += groups + gdpblocks;
4472 int bpp = ext4_journal_blocks_per_page(inode);
4475 ret = ext4_meta_trans_blocks(inode, bpp, 0);
4478 if (ext4_should_journal_data(inode))
4494 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4502 struct inode *inode,
struct ext4_iloc *iloc)
4507 inode_inc_iversion(inode);
4513 err = ext4_do_update_inode(handle, inode, iloc);
4525 struct ext4_iloc *iloc)
4531 BUFFER_TRACE(iloc->bh,
"get_write_access");
4538 ext4_std_error(inode->
i_sb, err);
4546 static int ext4_expand_extra_isize(
struct inode *inode,
4547 unsigned int new_extra_isize,
4548 struct ext4_iloc iloc,
4554 if (
EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4557 raw_inode = ext4_raw_inode(&iloc);
4559 header =
IHDR(inode, raw_inode);
4562 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4566 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4590 struct ext4_iloc iloc;
4592 static unsigned int mnt_count;
4596 trace_ext4_mark_inode_dirty(inode,
_RET_IP_);
4598 if (ext4_handle_valid(handle) &&
4599 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4600 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4610 ret = ext4_expand_extra_isize(inode,
4611 sbi->s_want_extra_isize,
4614 ext4_set_inode_state(inode,
4615 EXT4_STATE_NO_EXPAND);
4618 ext4_warning(inode->
i_sb,
4619 "Unable to expand inode %lu. Delete"
4620 " some EAs or run e2fsck.",
4651 handle = ext4_journal_start(inode, 2);
4670 static int ext4_pin_inode(handle_t *handle,
struct inode *inode)
4672 struct ext4_iloc iloc;
4678 BUFFER_TRACE(iloc.bh,
"get_write_access");
4687 ext4_std_error(inode->
i_sb, err);
4711 if (is_journal_aborted(journal))
4726 ext4_inode_block_unlocked_dio(inode);
4748 ext4_inode_resume_unlocked_dio(inode);
4752 handle = ext4_journal_start(inode, 1);
4754 return PTR_ERR(handle);
4757 ext4_handle_sync(handle);
4759 ext4_std_error(inode->
i_sb, err);
4764 static int ext4_bh_unmapped(handle_t *handle,
struct buffer_head *bh)
4766 return !buffer_mapped(bh);
4771 struct page *page = vmf->page;
4775 struct file *file = vma->
vm_file;
4776 struct inode *inode = file->
f_path.dentry->d_inode;
4782 sb_start_pagefault(inode->
i_sb);
4786 !ext4_should_journal_data(inode) &&
4787 !ext4_nonda_switch(inode->
i_sb)) {
4790 ext4_da_get_block_prep);
4791 }
while (ret == -
ENOSPC &&
4797 size = i_size_read(inode);
4801 ret = VM_FAULT_NOPAGE;
4805 if (page->
index == size >> PAGE_CACHE_SHIFT)
4813 if (page_has_buffers(page)) {
4814 if (!walk_page_buffers(
NULL, page_buffers(page), 0, len,
NULL,
4815 ext4_bh_unmapped)) {
4817 wait_on_page_writeback(page);
4818 ret = VM_FAULT_LOCKED;
4824 if (ext4_should_dioread_nolock(inode))
4825 get_block = ext4_get_block_write;
4830 if (IS_ERR(handle)) {
4831 ret = VM_FAULT_SIGBUS;
4835 if (!ret && ext4_should_journal_data(inode)) {
4836 if (walk_page_buffers(handle, page_buffers(page), 0,
4839 ret = VM_FAULT_SIGBUS;
4843 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4849 ret = block_page_mkwrite_return(ret);
4851 sb_end_pagefault(inode->
i_sb);