6 struct buffer_head *bh;
19 while (from <= to && from->
key == *from->
p)
24 static inline block_t *block_end(
struct buffer_head *bh)
26 return (
block_t *)((
char*)bh->b_data + bh->b_size);
37 struct buffer_head *bh;
41 add_chain (
chain,
NULL, i_data(inode) + *offsets);
45 bh = sb_bread(sb, block_to_cpu(p->
key));
49 if (!verify_chain(
chain, p))
51 add_chain(++p, bh, (
block_t *)bh->b_data + *++offsets);
69 static int alloc_branch(
struct inode *inode,
78 branch[0].
key = cpu_to_block(parent);
79 if (parent)
for (n = 1; n < num; n++) {
80 struct buffer_head *bh;
85 branch[
n].
key = cpu_to_block(nr);
86 bh = sb_getblk(inode->
i_sb, parent);
88 memset(bh->b_data, 0, bh->b_size);
90 branch[
n].
p = (
block_t*) bh->b_data + offsets[n];
91 *branch[n].
p = branch[n].
key;
92 set_buffer_uptodate(bh);
101 for (i = 1; i <
n; i++)
102 bforget(branch[i].bh);
103 for (i = 0; i <
n; i++)
108 static inline int splice_branch(
struct inode *inode,
118 if (!verify_chain(
chain, where-1) || *where->
p)
121 *where->
p = where->
key;
133 mark_inode_dirty(inode);
138 for (i = 1; i < num; i++)
139 bforget(where[i].bh);
140 for (i = 0; i < num; i++)
145 static inline int get_block(
struct inode * inode,
sector_t block,
146 struct buffer_head *bh,
int create)
153 int depth = block_to_path(inode, block, offsets);
159 partial = get_branch(inode, depth, offsets, chain, &err);
164 map_bh(bh, inode->
i_sb, block_to_cpu(chain[depth-1].
key));
166 partial = chain+depth-1;
171 if (!create || err == -
EIO) {
173 while (partial > chain) {
189 left = (chain +
depth) - partial;
190 err = alloc_branch(inode, left, offsets+(partial-chain), partial);
194 if (splice_branch(inode, chain, partial, left) < 0)
201 while (partial > chain) {
216 static Indirect *find_shared(
struct inode *inode,
226 for (k = depth; k > 1 && !offsets[k-1]; k--)
228 partial = get_branch(inode, k, offsets, chain, &err);
232 partial = chain + k-1;
233 if (!partial->
key && *partial->
p) {
237 for (p=partial;p>chain && all_zeroes((
block_t*)p->
bh->b_data,p->
p);p--)
239 if (p == chain + k - 1 && p > chain) {
256 static inline void free_data(
struct inode *inode,
block_t *p,
block_t *q)
260 for ( ; p <
q ; p++) {
261 nr = block_to_cpu(*p);
269 static void free_branches(
struct inode *inode,
block_t *p,
block_t *q,
int depth)
271 struct buffer_head * bh;
275 for ( ; p <
q ; p++) {
276 nr = block_to_cpu(*p);
280 bh = sb_bread(inode->
i_sb, nr);
283 free_branches(inode, (
block_t*)bh->b_data,
284 block_end(bh), depth);
287 mark_inode_dirty(inode);
290 free_data(inode, p, q);
293 static inline void truncate (
struct inode * inode)
296 block_t *idata = i_data(inode);
308 n = block_to_path(inode, iblock, offsets);
313 free_data(inode, idata+offsets[0], idata +
DIRECT);
318 first_whole = offsets[0] + 1 -
DIRECT;
319 partial = find_shared(inode, n, offsets, chain, &nr);
321 if (partial == chain)
322 mark_inode_dirty(inode);
325 free_branches(inode, &nr, &nr+1, (chain+n-1) - partial);
328 while (partial > chain) {
329 free_branches(inode, partial->
p + 1, block_end(partial->
bh),
330 (chain+n-1) - partial);
332 brelse (partial->
bh);
337 while (first_whole < DEPTH-1) {
338 nr = idata[DIRECT+first_whole];
340 idata[DIRECT+first_whole] = 0;
341 mark_inode_dirty(inode);
342 free_branches(inode, &nr, &nr+1, first_whole+1);
347 mark_inode_dirty(inode);
356 while (--i && blocks > direct) {