25 #include <linux/time.h>
38 static int __ext2_write_inode(
struct inode *
inode,
int do_sync);
43 static inline int ext2_inode_is_fast_symlink(
struct inode *
inode)
45 int ea_blocks = EXT2_I(inode)->i_file_acl ?
46 (inode->
i_sb->s_blocksize >> 9) : 0;
52 static void ext2_truncate_blocks(
struct inode *inode, loff_t
offset);
56 struct inode *inode = mapping->
host;
60 ext2_truncate_blocks(inode, inode->
i_size);
82 sb_start_intwrite(inode->
i_sb);
85 mark_inode_dirty(inode);
90 ext2_truncate_blocks(inode, 0);
97 rsv = EXT2_I(inode)->i_block_alloc_info;
98 EXT2_I(inode)->i_block_alloc_info =
NULL;
104 sb_end_intwrite(inode->
i_sb);
111 struct buffer_head *
bh;
114 static inline void add_chain(
Indirect *
p,
struct buffer_head *bh,
__le32 *
v)
116 p->
key = *(p->
p =
v);
122 while (from <= to && from->
key == *from->
p)
157 static int ext2_block_to_path(
struct inode *inode,
158 long i_block,
int offsets[4],
int *boundary)
163 indirect_blocks =
ptrs,
164 double_blocks = (1 << (ptrs_bits * 2));
170 "warning: %s: block < 0", __func__);
171 }
else if (i_block < direct_blocks) {
172 offsets[n++] = i_block;
173 final = direct_blocks;
174 }
else if ( (i_block -= direct_blocks) < indirect_blocks) {
176 offsets[n++] = i_block;
178 }
else if ((i_block -= indirect_blocks) < double_blocks) {
180 offsets[n++] = i_block >> ptrs_bits;
181 offsets[n++] = i_block & (ptrs - 1);
183 }
else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) <
ptrs) {
185 offsets[n++] = i_block >> (ptrs_bits * 2);
186 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
187 offsets[n++] = i_block & (ptrs - 1);
191 "warning: %s: block is too big", __func__);
194 *boundary =
final - 1 - (i_block & (ptrs - 1));
228 static Indirect *ext2_get_branch(
struct inode *inode,
236 struct buffer_head *bh;
240 add_chain (chain,
NULL, EXT2_I(inode)->i_data + *offsets);
248 if (!verify_chain(chain, p))
250 add_chain(++p, bh, (
__le32*)bh->b_data + *++offsets);
297 for (p = ind->
p - 1; p >= start; p--)
303 return ind->
bh->b_blocknr;
312 return bg_start + colour;
329 block_i = EXT2_I(inode)->i_block_alloc_info;
340 return ext2_find_near(inode, partial);
357 int blocks_to_boundary)
359 unsigned long count = 0;
367 if (blks < blocks_to_boundary + 1)
370 count += blocks_to_boundary + 1;
375 while (count < blks && count <= blocks_to_boundary
392 static int ext2_alloc_blocks(
struct inode *inode,
397 unsigned long count = 0;
410 target = blks + indirect_blks;
421 while (index < indirect_blks && count) {
422 new_blocks[index++] = current_block++;
431 new_blocks[
index] = current_block;
438 for (i = 0; i <
index; i++)
441 mark_inode_dirty(inode);
470 static int ext2_alloc_branch(
struct inode *inode,
474 int blocksize = inode->
i_sb->s_blocksize;
477 struct buffer_head *bh;
482 num = ext2_alloc_blocks(inode, goal, indirect_blks,
483 *blks, new_blocks, &err);
491 for (n = 1; n <= indirect_blks; n++) {
497 bh = sb_getblk(inode->
i_sb, new_blocks[n-1]);
500 memset(bh->b_data, 0, blocksize);
501 branch[
n].
p = (
__le32 *) bh->b_data + offsets[n];
503 *branch[
n].
p = branch[
n].
key;
504 if ( n == indirect_blks) {
505 current_block = new_blocks[
n];
511 for (i=1; i < num; i++)
514 set_buffer_uptodate(bh);
540 static void ext2_splice_branch(
struct inode *inode,
541 long block,
Indirect *where,
int num,
int blks)
547 block_i = EXT2_I(inode)->i_block_alloc_info;
552 *where->
p = where->
key;
558 if (num == 0 && blks > 1) {
560 for (i = 1; i < blks; i++)
582 mark_inode_dirty(inode);
603 static int ext2_get_blocks(
struct inode *inode,
604 sector_t iblock,
unsigned long maxblocks,
605 struct buffer_head *bh_result,
614 int blocks_to_boundary = 0;
620 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
625 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
629 clear_buffer_new(bh_result);
632 while (count < maxblocks && count <= blocks_to_boundary) {
635 if (!verify_chain(chain, chain + depth - 1)) {
647 if (blk == first_block + count)
657 if (!create || err == -
EIO)
673 if (err == -
EAGAIN || !verify_chain(chain, partial)) {
674 while (partial > chain) {
678 partial = ext2_get_branch(inode, depth, offsets, chain, &err);
684 clear_buffer_new(bh_result);
696 goal = ext2_find_goal(inode, iblock, partial);
699 indirect_blks = (chain +
depth) - partial - 1;
704 count = ext2_blks_to_allocate(partial, indirect_blks,
705 maxblocks, blocks_to_boundary);
709 err = ext2_alloc_branch(inode, indirect_blks, &count, goal,
710 offsets + (partial - chain), partial);
729 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
731 set_buffer_new(bh_result);
734 if (count > blocks_to_boundary)
735 set_buffer_boundary(bh_result);
738 partial = chain + depth - 1;
740 while (partial > chain) {
749 unsigned max_blocks = bh_result->b_size >> inode->
i_blkbits;
750 int ret = ext2_get_blocks(inode, iblock, max_blocks,
753 bh_result->b_size = (ret << inode->
i_blkbits);
786 loff_t
pos,
unsigned len,
unsigned flags,
787 struct page **pagep,
void **fsdata)
794 ext2_write_failed(mapping, pos + len);
798 static int ext2_write_end(
struct file *file,
struct address_space *mapping,
799 loff_t pos,
unsigned len,
unsigned copied,
806 ext2_write_failed(mapping, pos + len);
811 ext2_nobh_write_begin(
struct file *file,
struct address_space *mapping,
812 loff_t pos,
unsigned len,
unsigned flags,
813 struct page **pagep,
void **fsdata)
820 ext2_write_failed(mapping, pos + len);
824 static int ext2_nobh_writepage(
struct page *page,
837 loff_t
offset,
unsigned long nr_segs)
839 struct file *file = iocb->
ki_filp;
841 struct inode *inode = mapping->
host;
844 ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
846 if (ret < 0 && (rw &
WRITE))
847 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
858 .readpage = ext2_readpage,
859 .readpages = ext2_readpages,
860 .writepage = ext2_writepage,
861 .write_begin = ext2_write_begin,
862 .write_end = ext2_write_end,
864 .direct_IO = ext2_direct_IO,
865 .writepages = ext2_writepages,
877 .readpage = ext2_readpage,
878 .readpages = ext2_readpages,
879 .writepage = ext2_nobh_writepage,
880 .write_begin = ext2_nobh_write_begin,
883 .direct_IO = ext2_direct_IO,
884 .writepages = ext2_writepages,
936 static Indirect *ext2_find_shared(
struct inode *inode,
946 for (k = depth; k > 1 && !offsets[k-1]; k--)
948 partial = ext2_get_branch(inode, k, offsets, chain, &err);
950 partial = chain + k-1;
956 if (!partial->
key && *partial->
p) {
960 for (p=partial; p>chain && all_zeroes((
__le32*)p->
bh->b_data,p->
p); p--)
968 if (p == chain + k - 1 && p > chain) {
995 static inline void ext2_free_data(
struct inode *inode,
__le32 *p,
__le32 *q)
997 unsigned long block_to_free = 0, count = 0;
1000 for ( ; p <
q ; p++) {
1007 else if (block_to_free == nr - count)
1011 mark_inode_dirty(inode);
1020 mark_inode_dirty(inode);
1035 static void ext2_free_branches(
struct inode *inode,
__le32 *p,
__le32 *q,
int depth)
1037 struct buffer_head * bh;
1042 for ( ; p <
q ; p++) {
1047 bh = sb_bread(inode->
i_sb, nr);
1054 "Read failure, inode=%ld, block=%ld",
1058 ext2_free_branches(inode,
1060 (
__le32*)bh->b_data + addr_per_block,
1064 mark_inode_dirty(inode);
1067 ext2_free_data(inode, p, q);
1070 static void __ext2_truncate_blocks(
struct inode *inode, loff_t offset)
1072 __le32 *i_data = EXT2_I(inode)->i_data;
1082 blocksize = inode->
i_sb->s_blocksize;
1085 n = ext2_block_to_path(inode, iblock, offsets,
NULL);
1096 ext2_free_data(inode, i_data+offsets[0],
1101 partial = ext2_find_shared(inode, n, offsets, chain, &nr);
1104 if (partial == chain)
1105 mark_inode_dirty(inode);
1108 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
1111 while (partial > chain) {
1112 ext2_free_branches(inode,
1114 (
__le32*)partial->
bh->b_data+addr_per_block,
1115 (chain+n-1) - partial);
1117 brelse (partial->
bh);
1122 switch (offsets[0]) {
1127 mark_inode_dirty(inode);
1128 ext2_free_branches(inode, &nr, &nr+1, 1);
1134 mark_inode_dirty(inode);
1135 ext2_free_branches(inode, &nr, &nr+1, 2);
1141 mark_inode_dirty(inode);
1142 ext2_free_branches(inode, &nr, &nr+1, 3);
1153 static void ext2_truncate_blocks(
struct inode *inode, loff_t offset)
1166 if (ext2_inode_is_fast_symlink(inode))
1170 __ext2_truncate_blocks(inode, offset);
1173 static int ext2_setsize(
struct inode *inode, loff_t newsize)
1180 if (ext2_inode_is_fast_symlink(inode))
1199 __ext2_truncate_blocks(inode, newsize);
1206 mark_inode_dirty(inode);
1213 struct buffer_head **p)
1215 struct buffer_head * bh;
1217 unsigned long block;
1223 ino >
le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
1236 if (!(bh = sb_bread(sb, block)))
1244 ext2_error(sb,
"ext2_get_inode",
"bad inode number: %lu",
1245 (
unsigned long) ino);
1249 "unable to read inode block - inode=%lu, block=%lu",
1250 (
unsigned long) ino, block);
1252 return ERR_PTR(-
EIO);
1257 unsigned int flags = EXT2_I(inode)->i_flags;
1275 unsigned int flags = ei->
vfs_inode.i_flags;
1294 struct buffer_head * bh;
1296 struct inode *
inode;
1311 raw_inode = ext2_get_inode(inode->
i_sb, ino, &bh);
1312 if (IS_ERR(raw_inode)) {
1313 ret = PTR_ERR(raw_inode);
1321 i_uid |=
le16_to_cpu(raw_inode->i_uid_high) << 16;
1322 i_gid |=
le16_to_cpu(raw_inode->i_gid_high) << 16;
1324 i_uid_write(inode, i_uid);
1325 i_gid_write(inode, i_gid);
1384 inode->
i_mapping->a_ops = &ext2_nobh_aops;
1388 if (ext2_inode_is_fast_symlink(inode)) {
1415 return ERR_PTR(ret);
1418 static int __ext2_write_inode(
struct inode *inode,
int do_sync)
1425 struct buffer_head * bh;
1426 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
1430 if (IS_ERR(raw_inode))
1436 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size);
1451 raw_inode->i_uid_high = 0;
1452 raw_inode->i_gid_high = 0;
1457 raw_inode->i_uid_high = 0;
1458 raw_inode->i_gid_high = 0;
1477 if (inode->
i_size > 0x7fffffffULL) {
1480 EXT2_SB(sb)->s_es->s_rev_level ==
1485 spin_lock(&EXT2_SB(sb)->s_lock);
1489 spin_unlock(&EXT2_SB(sb)->s_lock);
1497 if (old_valid_dev(inode->
i_rdev)) {
1512 if (buffer_req(bh) && !buffer_uptodate(bh)) {
1513 printk (
"IO error syncing ext2 inode [%s:%08lx]\n",
1514 sb->
s_id, (
unsigned long) ino);
1530 struct inode *inode = dentry->
d_inode;
1537 if (is_quota_modification(inode, iattr))
1546 error = ext2_setsize(inode, iattr->
ia_size);
1553 mark_inode_dirty(inode);