15 #include <linux/kernel.h>
16 #include <linux/export.h>
18 #include <linux/kdev_t.h>
25 #include <linux/prefetch.h>
44 static void mpage_end_io(
struct bio *bio,
int err)
46 const int uptodate =
test_bit(BIO_UPTODATE, &bio->bi_flags);
47 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
52 if (--bvec >= bio->bi_io_vec)
54 if (bio_data_dir(bio) ==
READ) {
56 SetPageUptodate(page);
58 ClearPageUptodate(page);
70 }
while (bvec >= bio->bi_io_vec);
74 static struct bio *mpage_bio_submit(
int rw,
struct bio *bio)
76 bio->bi_end_io = mpage_end_io;
88 bio = bio_alloc(gfp_flags, nr_vecs);
91 while (!bio && (nr_vecs /= 2))
92 bio = bio_alloc(gfp_flags, nr_vecs);
97 bio->bi_sector = first_sector;
113 map_buffer_to_page(
struct page *page,
struct buffer_head *bh,
int page_block)
116 struct buffer_head *page_bh, *
head;
119 if (!page_has_buffers(page)) {
125 buffer_uptodate(bh)) {
126 SetPageUptodate(page);
131 head = page_buffers(page);
134 if (block == page_block) {
135 page_bh->b_state = bh->b_state;
136 page_bh->b_bdev = bh->b_bdev;
137 page_bh->b_blocknr = bh->b_blocknr;
140 page_bh = page_bh->b_this_page;
142 }
while (page_bh != head);
155 do_mpage_readpage(
struct bio *bio,
struct page *page,
unsigned nr_pages,
156 sector_t *last_block_in_bio,
struct buffer_head *map_bh,
157 unsigned long *first_logical_block,
get_block_t get_block)
159 struct inode *inode = page->
mapping->host;
160 const unsigned blkbits = inode->
i_blkbits;
162 const unsigned blocksize = 1 << blkbits;
168 unsigned first_hole = blocks_per_page;
171 int fully_mapped = 1;
173 unsigned relative_block;
175 if (page_has_buffers(page))
179 last_block = block_in_file + nr_pages * blocks_per_page;
180 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
181 if (last_block > last_block_in_file)
182 last_block = last_block_in_file;
188 nblocks = map_bh->b_size >> blkbits;
189 if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
190 block_in_file < (*first_logical_block + nblocks)) {
191 unsigned map_offset = block_in_file - *first_logical_block;
192 unsigned last = nblocks - map_offset;
194 for (relative_block = 0; ; relative_block++) {
195 if (relative_block == last) {
196 clear_buffer_mapped(map_bh);
199 if (page_block == blocks_per_page)
201 blocks[page_block] = map_bh->b_blocknr + map_offset +
206 bdev = map_bh->b_bdev;
212 map_bh->b_page =
page;
213 while (page_block < blocks_per_page) {
217 if (block_in_file < last_block) {
218 map_bh->b_size = (last_block-block_in_file) << blkbits;
219 if (get_block(inode, block_in_file, map_bh, 0))
221 *first_logical_block = block_in_file;
224 if (!buffer_mapped(map_bh)) {
226 if (first_hole == blocks_per_page)
227 first_hole = page_block;
239 if (buffer_uptodate(map_bh)) {
240 map_buffer_to_page(page, map_bh, page_block);
244 if (first_hole != blocks_per_page)
248 if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
250 nblocks = map_bh->b_size >> blkbits;
251 for (relative_block = 0; ; relative_block++) {
252 if (relative_block == nblocks) {
253 clear_buffer_mapped(map_bh);
255 }
else if (page_block == blocks_per_page)
257 blocks[page_block] = map_bh->b_blocknr+relative_block;
261 bdev = map_bh->b_bdev;
264 if (first_hole != blocks_per_page) {
266 if (first_hole == 0) {
267 SetPageUptodate(page);
271 }
else if (fully_mapped) {
272 SetPageMappedToDisk(page);
275 if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
276 cleancache_get_page(page) == 0) {
277 SetPageUptodate(page);
284 if (bio && (*last_block_in_bio != blocks[0] - 1))
285 bio = mpage_bio_submit(
READ, bio);
289 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
296 length = first_hole << blkbits;
298 bio = mpage_bio_submit(
READ, bio);
302 relative_block = block_in_file - *first_logical_block;
303 nblocks = map_bh->b_size >> blkbits;
304 if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
305 (first_hole != blocks_per_page))
306 bio = mpage_bio_submit(
READ, bio);
308 *last_block_in_bio = blocks[blocks_per_page - 1];
314 bio = mpage_bio_submit(
READ, bio);
315 if (!PageUptodate(page))
369 struct bio *bio =
NULL;
372 struct buffer_head map_bh;
373 unsigned long first_logical_block = 0;
377 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
384 bio = do_mpage_readpage(bio, page,
386 &last_block_in_bio, &map_bh,
387 &first_logical_block,
392 BUG_ON(!list_empty(pages));
394 mpage_bio_submit(
READ, bio);
404 struct bio *bio =
NULL;
406 struct buffer_head map_bh;
407 unsigned long first_logical_block = 0;
411 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
412 &map_bh, &first_logical_block, get_block);
414 mpage_bio_submit(
READ, bio);
447 struct bio *bio = mpd->
bio;
449 struct inode *inode = page->
mapping->host;
450 const unsigned blkbits = inode->
i_blkbits;
451 unsigned long end_index;
457 unsigned first_unmapped = blocks_per_page;
463 struct buffer_head map_bh;
464 loff_t i_size = i_size_read(inode);
467 if (page_has_buffers(page)) {
468 struct buffer_head *head = page_buffers(page);
469 struct buffer_head *bh =
head;
474 BUG_ON(buffer_locked(bh));
475 if (!buffer_mapped(bh)) {
480 if (buffer_dirty(bh))
482 if (first_unmapped == blocks_per_page)
483 first_unmapped = page_block;
487 if (first_unmapped != blocks_per_page)
490 if (!buffer_dirty(bh) || !buffer_uptodate(bh))
493 if (bh->b_blocknr != blocks[page_block-1] + 1)
496 blocks[page_block++] = bh->b_blocknr;
497 boundary = buffer_boundary(bh);
499 boundary_block = bh->b_blocknr;
500 boundary_bdev = bh->b_bdev;
503 }
while ((bh = bh->b_this_page) != head);
520 BUG_ON(!PageUptodate(page));
522 last_block = (i_size - 1) >> blkbits;
523 map_bh.b_page =
page;
524 for (page_block = 0; page_block < blocks_per_page; ) {
527 map_bh.b_size = 1 << blkbits;
528 if (mpd->
get_block(inode, block_in_file, &map_bh, 1))
530 if (buffer_new(&map_bh))
533 if (buffer_boundary(&map_bh)) {
534 boundary_block = map_bh.b_blocknr;
535 boundary_bdev = map_bh.b_bdev;
538 if (map_bh.b_blocknr != blocks[page_block-1] + 1)
541 blocks[page_block++] = map_bh.b_blocknr;
542 boundary = buffer_boundary(&map_bh);
543 bdev = map_bh.b_bdev;
544 if (block_in_file == last_block)
550 first_unmapped = page_block;
554 if (page->
index >= end_index) {
565 if (page->
index > end_index || !offset)
574 bio = mpage_bio_submit(
WRITE, bio);
578 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
589 length = first_unmapped << blkbits;
591 bio = mpage_bio_submit(
WRITE, bio);
599 if (page_has_buffers(page)) {
600 struct buffer_head *head = page_buffers(page);
601 struct buffer_head *bh =
head;
602 unsigned buffer_counter = 0;
605 if (buffer_counter++ == first_unmapped)
607 clear_buffer_dirty(bh);
608 bh = bh->b_this_page;
609 }
while (bh != head);
620 BUG_ON(PageWriteback(page));
621 set_page_writeback(page);
623 if (boundary || (first_unmapped != blocks_per_page)) {
624 bio = mpage_bio_submit(
WRITE, bio);
625 if (boundary_block) {
627 boundary_block, 1 << blkbits);
636 bio = mpage_bio_submit(
WRITE, bio);
639 ret = mapping->
a_ops->writepage(page, wbc);
647 mapping_set_error(mapping, ret);
686 .last_block_in_bio = 0,
705 .last_block_in_bio = 0,
709 int ret = __mpage_writepage(page, wbc, &mpd);