19 #include <linux/stddef.h>
20 #include <linux/errno.h>
26 #include <linux/sysctl.h>
31 #include <linux/hash.h>
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
57 #define xb_to_gfp(flags) \
58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
95 if (list_empty(&bp->
b_lru)) {
119 if (list_empty(&bp->
b_lru))
123 if (!list_empty(&bp->
b_lru)) {
124 list_del_init(&bp->
b_lru);
154 if (!list_empty(&bp->
b_lru)) {
158 if (!list_empty(&bp->
b_lru) &&
160 list_del_init(&bp->
b_lru);
177 if (map_count == 1) {
226 INIT_LIST_HEAD(&bp->
b_lru);
227 INIT_LIST_HEAD(&bp->
b_list);
229 sema_init(&bp->
b_sema, 0);
239 error = xfs_buf_get_maps(bp, nmaps);
241 kmem_zone_free(xfs_buf_zone, bp);
247 for (i = 0; i < nmaps; i++) {
320 if (xfs_buf_is_vmapped(bp))
332 xfs_buf_free_maps(bp);
333 kmem_zone_free(xfs_buf_zone, bp);
347 unsigned short page_count,
i;
383 page_count = end -
start;
409 if (!(++retries % 100))
411 "possible memory allocation deadlock in %s (mode:0x%x)",
457 }
while (retried++ <= 1);
489 xfs_daddr_t blkno = map[0].
bm_bn;
493 for (i = 0; i < nmaps; i++)
494 numblks += map[i].bm_len;
495 numbytes =
BBTOB(numblks);
506 spin_lock(&pag->pag_buf_lock);
507 rbp = &pag->pag_buf_tree.rb_node;
514 if (blkno < bp->b_bn)
516 else if (blkno > bp->
b_bn)
539 rb_link_node(&new_bp->
b_rbnode, parent, rbp);
543 spin_unlock(&pag->pag_buf_lock);
546 spin_unlock(&pag->pag_buf_lock);
552 spin_unlock(&pag->pag_buf_lock);
575 trace_xfs_buf_find(bp, flags,
_RET_IP_);
624 "%s: failed to map pages\n", __func__);
631 trace_xfs_buf_get(bp, flags,
_RET_IP_);
665 trace_xfs_buf_read(bp, flags,
_RET_IP_);
696 if (bdi_read_congested(target->
bt_bdi))
760 static inline struct page *
764 if ((!is_vmalloc_addr(addr))) {
779 unsigned long pageaddr;
785 offset = (
unsigned long)mem - pageaddr;
803 bp->
b_pages[
i] = mem_to_page((
void *)pageaddr);
819 unsigned long page_count;
833 for (i = 0; i < page_count; i++) {
843 "%s: failed to map pages\n", __func__);
847 trace_xfs_buf_get_uncached(bp,
_RET_IP_);
855 xfs_buf_free_maps(bp);
856 kmem_zone_free(xfs_buf_zone, bp);
901 spin_unlock(&pag->pag_buf_lock);
906 spin_unlock(&pag->pag_buf_lock);
937 trace_xfs_buf_trylock(bp,
_RET_IP_);
961 trace_xfs_buf_lock_done(bp,
_RET_IP_);
1016 trace_xfs_buf_iodone(bp,
_RET_IP_);
1039 ASSERT(error >= 0 && error <= 0xffff);
1041 trace_xfs_buf_ioerror(bp, error,
_RET_IP_);
1050 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1063 #ifdef XFSERRORDEBUG
1128 trace_xfs_bdstrat_shut(bp,
_RET_IP_);
1160 SHUTDOWN_META_IO_ERROR);
1172 struct xfs_mount *
mp,
1175 if (XFS_FORCED_SHUTDOWN(mp)) {
1176 trace_xfs_bdstrat_shut(bp,
_RET_IP_);
1208 invalidate_kernel_vmap_range(bp->
b_addr, xfs_buf_vmap_len(bp));
1215 xfs_buf_ioapply_map(
1234 offset = *buf_offset;
1246 *buf_offset +=
size;
1251 if (nr_pages > total_nr_pages)
1252 nr_pages = total_nr_pages;
1254 bio = bio_alloc(
GFP_NOIO, nr_pages);
1258 bio->bi_private = bp;
1261 for (; size && nr_pages; nr_pages--, page_index++) {
1269 if (rbytes < nbytes)
1273 sector +=
BTOBB(nbytes);
1278 if (
likely(bio->bi_size)) {
1279 if (xfs_buf_is_vmapped(bp)) {
1280 flush_kernel_vmap_range(bp->
b_addr,
1281 xfs_buf_vmap_len(bp));
1336 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1349 trace_xfs_buf_iorequest(bp,
_RET_IP_);
1377 trace_xfs_buf_iowait(bp,
_RET_IP_);
1382 trace_xfs_buf_iowait_done(bp,
_RET_IP_);
1414 bend = boff + bsize;
1415 while (boff < bend) {
1421 page = bp->
b_pages[page_index];
1460 while (!list_empty(&btp->
bt_lru)) {
1494 while (!list_empty(&btp->
bt_lru)) {
1495 if (nr_to_scan-- <= 0)
1505 if (!atomic_add_unless(&bp->
b_lru_ref, -1, 0)) {
1514 list_move(&bp->
b_lru, &dispose);
1520 while (!list_empty(&dispose)) {
1522 list_del_init(&bp->
b_lru);
1531 struct xfs_mount *
mp,
1536 if (mp->m_flags & XFS_MOUNT_BARRIER)
1545 unsigned int blocksize,
1559 "Cannot set_blocksize to %u on device %s\n",
1578 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1584 unsigned int blocksize,
1592 struct xfs_mount *
mp,
1608 INIT_LIST_HEAD(&btp->
bt_lru);
1647 trace_xfs_buf_delwri_queued(bp,
_RET_IP_);
1651 trace_xfs_buf_delwri_queue(bp,
_RET_IP_);
1662 if (list_empty(&bp->
b_list)) {
1694 __xfs_buf_delwri_submit(
1705 if (xfs_buf_ispinned(bp)) {
1722 list_del_init(&bp->
b_list);
1727 list_move_tail(&bp->
b_list, io_list);
1728 trace_xfs_buf_delwri_split(bp,
_RET_IP_);
1740 list_del_init(&bp->
b_list);
1763 return __xfs_buf_delwri_submit(buffer_list, &io_list,
false);
1779 int error = 0, error2;
1782 __xfs_buf_delwri_submit(buffer_list, &io_list,
true);
1785 while (!list_empty(&io_list)) {
1788 list_del_init(&bp->
b_list);
1801 xfs_buf_zone = kmem_zone_init_flags(
sizeof(
xfs_buf_t),
"xfs_buf",
1808 if (!xfslogd_workqueue)
1809 goto out_free_buf_zone;
1814 kmem_zone_destroy(xfs_buf_zone);
1823 kmem_zone_destroy(xfs_buf_zone);