45 struct buffer_head *bh, *
head;
47 *delalloc = *unwritten = 0;
49 bh = head = page_buffers(page);
51 if (buffer_unwritten(bh))
53 else if (buffer_delay(bh))
55 }
while ((bh = bh->b_this_page) != head);
62 struct xfs_inode *
ip = XFS_I(inode);
63 struct xfs_mount *
mp = ip->i_mount;
66 return mp->m_rtdev_targp->bt_bdev;
68 return mp->m_ddev_targp->bt_bdev;
81 struct buffer_head *bh, *
next;
102 static inline bool xfs_ioend_is_append(
struct xfs_ioend *ioend)
105 XFS_I(ioend->
io_inode)->i_d.di_size;
112 struct xfs_mount *
mp = XFS_I(ioend->
io_inode)->i_mount;
113 struct xfs_trans *tp;
147 struct xfs_inode *
ip = XFS_I(ioend->
io_inode);
168 ip->i_d.di_size = isize;
186 struct xfs_mount *
mp = XFS_I(ioend->
io_inode)->i_mount;
205 struct xfs_inode *
ip = XFS_I(ioend->
io_inode);
217 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
254 ASSERT(!xfs_ioend_is_append(ioend));
319 struct xfs_inode *
ip = XFS_I(inode);
320 struct xfs_mount *
mp = ip->i_mount;
327 if (XFS_FORCED_SHUTDOWN(mp))
343 if (offset + count > mp->m_super->s_maxbytes)
344 count = mp->m_super->s_maxbytes -
offset;
348 imap, &nimaps, bmapi_flags);
358 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
370 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
400 bio->bi_private =
NULL;
401 bio->bi_end_io =
NULL;
414 bio->bi_private = ioend;
421 struct buffer_head *bh)
424 struct bio *bio = bio_alloc(
GFP_NOIO, nvecs);
427 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
428 bio->bi_bdev = bh->b_bdev;
434 struct buffer_head *bh)
436 ASSERT(buffer_mapped(bh));
437 ASSERT(buffer_locked(bh));
438 ASSERT(!buffer_delay(bh));
439 ASSERT(!buffer_unwritten(bh));
442 set_buffer_uptodate(bh);
443 clear_buffer_dirty(bh);
453 ASSERT(!PageWriteback(page));
456 set_page_writeback(page);
463 static inline int bio_add_buffer(
struct bio *bio,
struct buffer_head *bh)
465 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
498 struct buffer_head *bh;
507 }
while ((ioend = next) !=
NULL);
532 }
else if (bh->b_blocknr != lastblock + 1) {
537 if (bio_add_buffer(bio, bh) != bh->b_size) {
542 lastblock = bh->b_blocknr;
547 }
while ((ioend = next) !=
NULL);
560 struct buffer_head *bh, *next_bh;
566 next_bh = bh->b_private;
567 clear_buffer_async_write(bh);
569 }
while ((bh = next_bh) !=
NULL);
572 }
while ((ioend = next) !=
NULL);
584 struct buffer_head *bh,
592 if (!ioend || need_ioend || type != ioend->
io_type) {
607 bh->b_private =
NULL;
614 struct buffer_head *bh,
619 struct xfs_mount *
m = XFS_I(inode)->i_mount;
627 ((offset - iomap_offset) >> inode->
i_blkbits);
632 set_buffer_mapped(bh);
638 struct buffer_head *bh,
646 set_buffer_mapped(bh);
647 clear_buffer_delay(bh);
648 clear_buffer_unwritten(bh);
660 if (PageWriteback(page))
663 if (page->
mapping && page_has_buffers(page)) {
664 struct buffer_head *bh, *
head;
667 bh = head = page_buffers(page);
669 if (buffer_unwritten(bh))
671 else if (buffer_delay(bh))
673 else if (buffer_dirty(bh) && buffer_mapped(bh))
677 }
while ((bh = bh->b_this_page) !=
head);
701 struct buffer_head *bh, *
head;
703 unsigned long p_offset;
709 if (page->
index != tindex)
711 if (!trylock_page(page))
713 if (PageWriteback(page))
714 goto fail_unlock_page;
716 goto fail_unlock_page;
718 goto fail_unlock_page;
733 end_offset =
min_t(
unsigned long long,
741 page_dirty = p_offset / len;
743 bh = head = page_buffers(page);
745 if (offset >= end_offset)
747 if (!buffer_uptodate(bh))
749 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
754 if (buffer_unwritten(bh) || buffer_delay(bh) ||
756 if (buffer_unwritten(bh))
758 else if (buffer_delay(bh))
779 }
while (offset += len, (bh = bh->b_this_page) != head);
781 if (uptodate && bh == head)
782 SetPageUptodate(page);
814 pagevec_init(&pvec, 0);
815 while (!done && tindex <= tlast) {
821 for (
i = 0;
i < pagevec_count(&pvec);
i++) {
828 pagevec_release(&pvec);
838 trace_xfs_invalidatepage(page->
mapping->host, page, offset);
863 struct xfs_inode *
ip = XFS_I(inode);
864 struct buffer_head *bh, *
head;
870 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
873 xfs_alert(ip->i_mount,
874 "page discard on page %p, inode 0x%llx, offset %llu.",
875 page, ip->i_ino, offset);
878 bh = head = page_buffers(page);
883 if (!buffer_delay(bh))
890 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
891 xfs_alert(ip->i_mount,
892 "page discard unable to remove delalloc mapping.");
899 }
while ((bh = bh->b_this_page) != head);
921 struct buffer_head *bh, *
head;
929 int err, imap_valid = 0, uptodate = 1;
933 trace_xfs_writepage(inode, page, 0);
935 ASSERT(page_has_buffers(page));
959 offset = i_size_read(inode);
961 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
962 if (page->
index >= end_index) {
969 if (page->
index >= end_index + 1 || offset_into_page == 0) {
985 end_offset =
min_t(
unsigned long long,
986 (xfs_off_t)(page->
index + 1) << PAGE_CACHE_SHIFT,
990 bh = head = page_buffers(page);
1000 if (offset >= end_offset)
1002 if (!buffer_uptodate(bh))
1011 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1016 if (buffer_unwritten(bh)) {
1021 }
else if (buffer_delay(bh)) {
1026 }
else if (buffer_uptodate(bh)) {
1032 if (PageUptodate(page))
1033 ASSERT(buffer_mapped(bh));
1074 }
while (offset += len, ((bh = bh->b_this_page) != head));
1076 if (uptodate && bh == head)
1077 SetPageUptodate(page);
1093 xfs_off_t end_index;
1101 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1104 if (end_index > last_index)
1105 end_index = last_index;
1131 ClearPageUptodate(page);
1146 xfs_iflags_clear(XFS_I(mapping->
host), XFS_ITRUNCATED);
1162 int delalloc, unwritten;
1164 trace_xfs_releasepage(page->
mapping->host, page, 0);
1180 struct buffer_head *bh_result,
1184 struct xfs_inode *
ip = XFS_I(inode);
1185 struct xfs_mount *
mp = ip->i_mount;
1195 if (XFS_FORCED_SHUTDOWN(mp))
1198 offset = (xfs_off_t)iblock << inode->
i_blkbits;
1200 size = bh_result->b_size;
1202 if (!create && direct && offset >= i_size_read(inode))
1212 if (create && !direct) {
1213 lockmode = XFS_ILOCK_EXCL;
1220 if (offset + size > mp->m_super->s_maxbytes)
1221 size = mp->m_super->s_maxbytes -
offset;
1265 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1266 }
else if (nimaps) {
1267 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1270 trace_xfs_get_blocks_notfound(ip, offset, size);
1284 bh_result->b_private =
inode;
1285 set_buffer_unwritten(bh_result);
1305 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1306 (offset >= i_size_read(inode)) ||
1308 set_buffer_new(bh_result);
1313 set_buffer_uptodate(bh_result);
1314 set_buffer_mapped(bh_result);
1315 set_buffer_delay(bh_result);
1323 if (direct || size > (1 << inode->
i_blkbits)) {
1324 xfs_off_t mapping_size;
1329 ASSERT(mapping_size > 0);
1330 if (mapping_size > size)
1331 mapping_size =
size;
1335 bh_result->b_size = mapping_size;
1349 struct buffer_head *bh_result,
1359 struct buffer_head *bh_result,
1393 if (offset + size > i_size_read(ioend->
io_inode))
1394 i_size_write(ioend->
io_inode, offset + size);
1407 if (
private && size > 0)
1422 const struct iovec *iov,
1424 unsigned long nr_segs)
1432 size_t size = iov_length(iov, nr_segs);
1441 if (offset + size > XFS_I(inode)->i_d.di_size) {
1444 goto out_destroy_ioend;
1453 goto out_trans_cancel;
1489 struct xfs_inode *
ip = XFS_I(inode);
1496 if (end_fsb <= start_fsb)
1501 end_fsb - start_fsb);
1504 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1505 xfs_alert(ip->i_mount,
1506 "xfs_vm_write_failed: unable to clean up ino %lld",
1524 loff_t to = from + len;
1525 struct buffer_head *bh, *
head;
1527 ASSERT(block_offset + from == pos);
1529 head = page_buffers(page);
1531 for (bh = head; bh != head || !block_start;
1532 bh = bh->b_this_page, block_start = block_end,
1533 block_offset += bh->b_size) {
1534 block_end = block_start + bh->b_size;
1537 if (block_end <= from)
1541 if (block_start >= to)
1544 if (!buffer_delay(bh))
1547 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1551 block_offset + bh->b_size);
1569 struct page **pagep,
1590 if (pos + len > i_size_read(inode))
1623 size_t isize = i_size_read(inode);
1624 loff_t to = pos + len;
1640 struct xfs_inode *
ip = XFS_I(inode);
1642 trace_xfs_vm_bmap(XFS_I(inode));