29 #include <linux/slab.h>
37 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1)
42 struct buffer_head *bh,
44 struct buffer_head *,
void *))
59 set_buffer_mapped(bh);
68 set_buffer_uptodate(bh);
70 nilfs_mdt_mark_dirty(inode);
74 static int nilfs_mdt_create_block(
struct inode *inode,
unsigned long block,
75 struct buffer_head **out_bh,
82 struct buffer_head *bh;
93 if (buffer_uptodate(bh))
97 if (buffer_uptodate(bh))
101 err = nilfs_mdt_insert_new_block(inode, block, bh,
init_block);
122 nilfs_mdt_submit_block(
struct inode *inode,
unsigned long blkoff,
123 int mode,
struct buffer_head **out_bh)
125 struct buffer_head *bh;
134 if (buffer_uptodate(bh))
138 if (!trylock_buffer(bh)) {
145 if (buffer_uptodate(bh)) {
150 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum);
173 static int nilfs_mdt_read_block(
struct inode *inode,
unsigned long block,
174 int readahead,
struct buffer_head **out_bh)
176 struct buffer_head *first_bh, *bh;
177 unsigned long blkoff;
181 err = nilfs_mdt_submit_block(inode, block,
READ, &first_bh);
190 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
191 err = nilfs_mdt_submit_block(inode, blkoff,
READA, &bh);
194 else if (err != -
EBUSY)
197 if (!buffer_locked(first_bh))
202 wait_on_buffer(first_bh);
206 if (!buffer_uptodate(first_bh))
244 struct buffer_head *,
void *),
245 struct buffer_head **out_bh)
251 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
252 if (!create || ret != -
ENOENT)
255 ret = nilfs_mdt_create_block(inode, blkoff, out_bh,
init_block);
281 if (!err || err == -
ENOENT) {
282 nilfs_mdt_mark_dirty(inode);
308 unsigned long first_block;
316 wait_on_page_writeback(page);
318 first_block = (
unsigned long)index <<
320 if (page_has_buffers(page)) {
321 struct buffer_head *bh;
323 bh = nilfs_page_get_nth_block(page, block - first_block);
326 still_dirty = PageDirty(page);
352 struct buffer_head *bh;
355 err = nilfs_mdt_read_block(inode, block, 0, &bh);
359 nilfs_mdt_mark_dirty(inode);
401 .writepage = nilfs_mdt_write_page,
420 mapping_set_gfp_mask(inode->
i_mapping, gfp_mask);
423 inode->
i_op = &def_mdt_iops;
424 inode->
i_fop = &def_mdt_fops;
431 unsigned header_size)
488 struct buffer_head *bh_frozen;
492 page = grab_cache_page(&shadow->
frozen_data, bh->b_page->index);
496 if (!page_has_buffers(page))
499 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits);
501 if (!buffer_uptodate(bh_frozen))
503 if (list_empty(&bh_frozen->b_assoc_buffers)) {
506 set_buffer_nilfs_redirected(bh);
520 struct buffer_head *bh_frozen =
NULL;
526 if (page_has_buffers(page)) {
528 bh_frozen = nilfs_page_get_nth_block(page, n);
539 struct buffer_head *bh;
541 while (!list_empty(head)) {
544 list_del_init(&bh->b_assoc_buffers);
585 nilfs_release_frozen_buffers(shadow);