11 #include <linux/errno.h>
20 const hfsplus_btree_key *k2)
30 if (k1->ext.fork_type != k2->ext.fork_type)
31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
33 k1s = k1->ext.start_block;
34 k2s = k2->ext.start_block;
40 static void hfsplus_ext_build_key(hfsplus_btree_key *
key,
u32 cnid,
46 key->ext.fork_type =
type;
55 for (i = 0; i < 8; ext++, i++) {
70 for (i = 0; i < 8; ext++, i++)
80 for (i = 0; i < 7; ext--, i++)
86 static void __hfsplus_ext_write_extent(
struct inode *
inode,
122 static int hfsplus_ext_write_extent_locked(
struct inode *inode)
132 __hfsplus_ext_write_extent(inode, &fd);
143 res = hfsplus_ext_write_extent_locked(inode);
149 static inline int __hfsplus_ext_read_extent(
struct hfs_find_data *fd,
155 hfsplus_ext_build_key(fd->
search_key, cnid, block, type);
158 if (res && res != -
ENOENT)
170 static inline int __hfsplus_ext_cache_extent(
struct hfs_find_data *fd,
171 struct inode *inode,
u32 block)
179 __hfsplus_ext_write_extent(inode, fd);
196 static int hfsplus_ext_read_extent(
struct inode *inode,
u32 block)
208 res = __hfsplus_ext_cache_extent(&fd, inode, block);
216 struct buffer_head *bh_result,
int create)
258 res = hfsplus_ext_read_extent(inode, ablock);
269 inode->
i_ino, (
long long)iblock, dblock);
274 map_bh(bh_result, sb, sector);
277 set_buffer_new(bh_result);
282 if (create || was_dirty)
283 mark_inode_dirty(inode);
292 for (i = 0; i < 8; i++)
304 hfsplus_dump_extent(extent);
305 for (i = 0; i < 8; extent++, i++) {
307 if (offset == count) {
309 if (alloc_block != start + count) {
315 block_count +=
count;
318 }
else if (offset < count)
333 hfsplus_dump_extent(extent);
334 for (i = 0; i < 8; extent++, i++) {
338 else if (offset < count)
347 if (count <= block_nr) {
379 for (i = 0; i < 8; i++)
382 res = hfsplus_free_extents(sb, fork->
extents, blocks, blocks);
385 if (total_blocks == blocks)
392 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
397 hfsplus_free_extents(sb, ext_entry,
398 total_blocks - start,
401 total_blocks =
start;
402 }
while (total_blocks > blocks);
430 res = hfsplus_ext_read_extent(inode, hip->
alloc_blocks);
475 }
else if (res == -
ENOSPC)
488 res = hfsplus_ext_write_extent_locked(inode);
531 mark_inode_dirty(inode);
536 blk_cnt = (inode->
i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
537 HFSPLUS_SB(sb)->alloc_blksz_shift;
539 if (blk_cnt == alloc_cnt)
552 alloc_cnt, alloc_cnt - blk_cnt);
557 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
562 alloc_cnt - start, alloc_cnt - blk_cnt);
564 if (blk_cnt > start) {