30 #include <linux/slab.h>
33 #include <asm/unaligned.h>
50 #include <asm/cpufeature.h>
55 static void free_fs_root(
struct btrfs_root *root);
58 static void btrfs_destroy_ordered_operations(
struct btrfs_root *root);
59 static void btrfs_destroy_ordered_extents(
struct btrfs_root *root);
63 static void btrfs_destroy_delalloc_inodes(
struct btrfs_root *root);
64 static int btrfs_destroy_marked_extents(
struct btrfs_root *root,
67 static int btrfs_destroy_pinned_extent(
struct btrfs_root *root,
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 # if BTRFS_MAX_LEVEL != 8
137 static struct btrfs_lockdep_keyset {
139 const char *name_stem;
142 } btrfs_lockdep_keysets[] = {
153 { .id = 0, .name_stem =
"tree" },
156 void __init btrfs_init_lockdep(
void)
161 for (i = 0; i <
ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
162 struct btrfs_lockdep_keyset *
ks = &btrfs_lockdep_keysets[
i];
165 snprintf(ks->names[j],
sizeof(ks->names[j]),
166 "btrfs-%s-%02d", ks->name_stem,
j);
173 struct btrfs_lockdep_keyset *
ks;
178 for (ks = btrfs_lockdep_keysets; ks->id; ks++)
179 if (ks->id == objectid)
183 &ks->keys[level], ks->names[level]);
204 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
219 em->
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
240 return crc32c(seed, data, len);
255 u16 csum_size = btrfs_super_csum_size(root->
fs_info->super_copy);
258 unsigned long cur_len;
261 unsigned long map_start;
262 unsigned long map_len;
265 unsigned long inline_result;
270 &kaddr, &map_start, &map_len);
273 cur_len =
min(len, map_len - (offset - map_start));
279 if (csum_size >
sizeof(inline_result)) {
280 result = kzalloc(csum_size *
sizeof(
char),
GFP_NOFS);
284 result = (
char *)&inline_result;
293 memcpy(&found, result, csum_size);
297 "failed on %llu wanted %X found %X "
300 (
unsigned long long)buf->
start, val, found,
301 btrfs_header_level(buf));
302 if (result != (
char *)&inline_result)
309 if (result != (
char *)&inline_result)
327 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
336 btrfs_header_generation(eb) == parent_transid) {
342 (
unsigned long long)eb->
start,
343 (
unsigned long long)parent_transid,
344 (
unsigned long long)btrfs_header_generation(eb));
357 static int btree_read_extent_buffer_pages(
struct btrfs_root *root,
359 u64 start,
u64 parent_transid)
366 int failed_mirror = 0;
369 io_tree = &BTRFS_I(root->
fs_info->btree_inode)->io_tree;
373 btree_get_extent, mirror_num);
375 if (!verify_parent_transid(io_tree, eb,
395 if (!failed_mirror) {
401 if (mirror_num == failed_mirror)
404 if (mirror_num > num_copies)
408 if (failed && !ret && failed_mirror)
419 static int csum_dirty_buffer(
struct btrfs_root *root,
struct page *page)
426 tree = &BTRFS_I(page->
mapping->host)->io_tree;
431 found_start = btrfs_header_bytenr(eb);
432 if (found_start != start) {
436 if (!PageUptodate(page)) {
440 csum_tree_block(root, eb, 0);
444 static int check_tree_block_fsid(
struct btrfs_root *root,
458 fs_devices = fs_devices->
seed;
463 #define CORRUPT(reason, eb, root, slot) \
464 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
465 "root=%llu, slot=%d\n", reason, \
466 (unsigned long long)btrfs_header_bytenr(eb), \
467 (unsigned long long)root->objectid, slot)
481 if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
483 CORRUPT(
"invalid item offset size pair", leaf, root, 0);
494 for (slot = 0; slot < nritems - 1; slot++) {
495 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
496 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
500 CORRUPT(
"bad key order", leaf, root, slot);
509 if (btrfs_item_offset_nr(leaf, slot) !=
510 btrfs_item_end_nr(leaf, slot + 1)) {
511 CORRUPT(
"slot offset bad", leaf, root, slot);
520 if (btrfs_item_end_nr(leaf, slot) >
522 CORRUPT(
"slot end outside of leaf", leaf, root, slot);
531 struct page *page,
int max_walk)
538 if (start < max_walk)
541 min_start = start - max_walk;
543 while (start >= min_start) {
550 if (eb->
start <= target &&
565 static int btree_readpage_end_io_hook(
struct page *page,
u64 start,
u64 end,
579 tree = &BTRFS_I(page->
mapping->host)->io_tree;
585 extent_buffer_get(eb);
597 found_start = btrfs_header_bytenr(eb);
598 if (found_start != eb->
start) {
601 (
unsigned long long)found_start,
602 (
unsigned long long)eb->
start);
606 if (check_tree_block_fsid(root, eb)) {
608 (
unsigned long long)eb->
start);
612 found_level = btrfs_header_level(eb);
614 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
617 ret = csum_tree_block(root, eb, 1);
628 if (found_level == 0 && check_leaf(root, eb)) {
648 static int btree_io_failed_hook(
struct page *page,
int failed_mirror)
661 static void end_workqueue_bio(
struct bio *bio,
int err)
666 fs_info = end_io_wq->
info;
668 end_io_wq->
work.func = end_workqueue_fn;
669 end_io_wq->
work.flags = 0;
701 struct end_io_wq *end_io_wq;
706 end_io_wq->
private = bio->bi_private;
707 end_io_wq->
end_io = bio->bi_end_io;
709 end_io_wq->
error = 0;
713 bio->bi_private = end_io_wq;
714 bio->bi_end_io = end_workqueue_bio;
739 static void run_one_async_done(
struct btrfs_work *work)
746 fs_info = BTRFS_I(async->
inode)->root->fs_info;
749 limit = limit * 2 / 3;
766 static void run_one_async_free(
struct btrfs_work *work)
775 int rw,
struct bio *bio,
int mirror_num,
794 async->
work.func = run_one_async_start;
795 async->
work.ordered_func = run_one_async_done;
796 async->
work.ordered_free = run_one_async_free;
798 async->
work.flags = 0;
820 static int btree_csum_one_bio(
struct bio *bio)
822 struct bio_vec *bvec = bio->bi_io_vec;
828 while (bio_index < bio->bi_vcnt) {
829 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
830 ret = csum_dirty_buffer(root, bvec->bv_page);
839 static int __btree_submit_bio_start(
struct inode *inode,
int rw,
840 struct bio *bio,
int mirror_num,
841 unsigned long bio_flags,
848 return btree_csum_one_bio(bio);
851 static int __btree_submit_bio_done(
struct inode *inode,
int rw,
struct bio *bio,
852 int mirror_num,
unsigned long bio_flags,
859 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
862 static int check_async_write(
struct inode *inode,
unsigned long bio_flags)
873 static int btree_submit_bio_hook(
struct inode *inode,
int rw,
struct bio *bio,
874 int mirror_num,
unsigned long bio_flags,
877 int async = check_async_write(inode, bio_flags);
893 ret = btree_csum_one_bio(bio);
905 inode, rw, bio, mirror_num, 0,
907 __btree_submit_bio_start,
908 __btree_submit_bio_done);
911 #ifdef CONFIG_MIGRATION
913 struct page *newpage,
struct page *page,
926 if (page_has_private(page) &&
938 tree = &BTRFS_I(mapping->
host)->io_tree;
942 unsigned long thresh = 32 * 1024 * 1024;
948 num_dirty = root->
fs_info->dirty_metadata_bytes;
949 if (num_dirty < thresh)
955 static int btree_readpage(
struct file *
file,
struct page *page)
958 tree = &BTRFS_I(page->
mapping->host)->io_tree;
962 static int btree_releasepage(
struct page *page,
gfp_t gfp_flags)
964 if (PageWriteback(page) || PageDirty(page))
976 static void btree_invalidatepage(
struct page *page,
unsigned long offset)
979 tree = &BTRFS_I(page->
mapping->host)->io_tree;
982 if (PagePrivate(page)) {
984 "on page %llu\n", (
unsigned long long)
page_offset(page));
985 ClearPagePrivate(page);
986 set_page_private(page, 0);
991 static int btree_set_page_dirty(
struct page *page)
995 BUG_ON(!PagePrivate(page));
1005 .readpage = btree_readpage,
1006 .writepages = btree_writepages,
1007 .releasepage = btree_releasepage,
1008 .invalidatepage = btree_invalidatepage,
1009 #ifdef CONFIG_MIGRATION
1010 .migratepage = btree_migratepage,
1012 .set_page_dirty = btree_set_page_dirty,
1019 struct inode *btree_inode = root->
fs_info->btree_inode;
1026 buf, 0,
WAIT_NONE, btree_get_extent, 0);
1035 struct inode *btree_inode = root->
fs_info->btree_inode;
1046 btree_get_extent, mirror_num);
1066 struct inode *btree_inode = root->
fs_info->btree_inode;
1076 struct inode *btree_inode = root->
fs_info->btree_inode;
1098 u32 blocksize,
u64 parent_transid)
1107 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1115 if (btrfs_header_generation(buf) ==
1116 root->
fs_info->running_transaction->transid) {
1120 spin_lock(&root->
fs_info->delalloc_lock);
1121 if (root->
fs_info->dirty_metadata_bytes >= buf->
len)
1122 root->
fs_info->dirty_metadata_bytes -= buf->
len;
1124 spin_unlock(&root->
fs_info->delalloc_lock);
1126 "Can't clear %lu bytes from "
1127 " dirty_mdatadata_bytes (%llu)",
1129 root->
fs_info->dirty_metadata_bytes);
1131 spin_unlock(&root->
fs_info->delalloc_lock);
1135 btrfs_set_lock_blocking(buf);
1210 root, fs_info, objectid);
1218 generation = btrfs_root_generation(&root->
root_item);
1219 blocksize = btrfs_level_size(root, btrfs_root_level(&root->
root_item));
1222 blocksize, generation);
1251 root = btrfs_alloc_root(fs_info);
1257 root, fs_info, objectid);
1263 0, objectid,
NULL, 0, 0, 0);
1265 ret = PTR_ERR(leaf);
1269 bytenr = leaf->
start;
1271 btrfs_set_header_bytenr(leaf, leaf->
start);
1272 btrfs_set_header_generation(leaf, trans->
transid);
1274 btrfs_set_header_owner(leaf, objectid);
1278 (
unsigned long)btrfs_header_fsid(leaf),
1281 (
unsigned long)btrfs_header_chunk_tree_uuid(leaf),
1293 btrfs_set_root_level(&root->
root_item, 0);
1294 btrfs_set_root_refs(&root->
root_item, 1);
1296 btrfs_set_root_last_snapshot(&root->
root_item, 0);
1297 btrfs_set_root_dirid(&root->
root_item, 0);
1311 return ERR_PTR(ret);
1323 root = btrfs_alloc_root(fs_info);
1347 return ERR_CAST(leaf);
1351 btrfs_set_header_bytenr(leaf, leaf->
start);
1352 btrfs_set_header_generation(leaf, trans->
transid);
1358 (
unsigned long)btrfs_header_fsid(root->
node),
1370 log_root = alloc_log_tree(trans, fs_info);
1371 if (IS_ERR(log_root))
1372 return PTR_ERR(log_root);
1384 log_root = alloc_log_tree(trans, root->
fs_info);
1385 if (IS_ERR(log_root))
1386 return PTR_ERR(log_root);
1391 inode_item = &log_root->
root_item.inode;
1419 root = btrfs_alloc_root(fs_info);
1423 ret = find_and_setup_root(tree_root, fs_info,
1427 return ERR_PTR(ret);
1434 root, fs_info, location->
objectid);
1444 slot = path->
slots[0];
1453 return ERR_PTR(ret);
1456 generation = btrfs_root_generation(&root->
root_item);
1457 blocksize = btrfs_level_size(root, btrfs_root_level(&root->
root_item));
1459 blocksize, generation);
1493 (
unsigned long)location->
objectid);
1519 if (btrfs_root_refs(&root->
root_item) == 0) {
1536 (
unsigned long)root->
root_key.objectid,
1542 radix_tree_preload_end();
1557 return ERR_PTR(ret);
1560 static int btrfs_congested_fn(
void *congested_data,
int bdi_bits)
1572 if (bdi && bdi_congested(bdi, bdi_bits)) {
1604 static void end_workqueue_fn(
struct btrfs_work *work)
1607 struct end_io_wq *end_io_wq;
1611 end_io_wq =
container_of(work,
struct end_io_wq, work);
1612 bio = end_io_wq->
bio;
1613 fs_info = end_io_wq->
info;
1615 error = end_io_wq->
error;
1616 bio->bi_private = end_io_wq->
private;
1617 bio->bi_end_io = end_io_wq->
end_io;
1635 if (!try_to_freeze()) {
1645 static int transaction_kthread(
void *arg)
1652 unsigned long delay;
1656 cannot_commit =
false;
1660 spin_lock(&root->
fs_info->trans_lock);
1661 cur = root->
fs_info->running_transaction;
1663 spin_unlock(&root->
fs_info->trans_lock);
1670 spin_unlock(&root->
fs_info->trans_lock);
1675 spin_unlock(&root->
fs_info->trans_lock);
1679 if (IS_ERR(trans)) {
1680 if (PTR_ERR(trans) != -
ENOENT)
1681 cannot_commit =
true;
1684 if (transid == trans->
transid) {
1693 if (!try_to_freeze()) {
1714 static int find_newest_super_backup(
struct btrfs_fs_info *info,
u64 newest_gen)
1717 int newest_index = -1;
1723 cur = btrfs_backup_tree_root_gen(root_backup);
1724 if (cur == newest_gen)
1729 if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1731 cur = btrfs_backup_tree_root_gen(root_backup);
1732 if (cur == newest_gen)
1735 return newest_index;
1744 static void find_oldest_super_backup(
struct btrfs_fs_info *info,
1747 int newest_index = -1;
1749 newest_index = find_newest_super_backup(info, newest_gen);
1751 if (newest_index == -1) {
1770 last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1771 BTRFS_NUM_BACKUP_ROOTS;
1778 if (btrfs_backup_tree_root_gen(root_backup) ==
1779 btrfs_header_generation(info->
tree_root->node))
1780 next_backup = last_backup;
1788 memset(root_backup, 0,
sizeof(*root_backup));
1792 btrfs_set_backup_tree_root(root_backup, info->
tree_root->node->start);
1793 btrfs_set_backup_tree_root_gen(root_backup,
1794 btrfs_header_generation(info->
tree_root->node));
1796 btrfs_set_backup_tree_root_level(root_backup,
1797 btrfs_header_level(info->
tree_root->node));
1799 btrfs_set_backup_chunk_root(root_backup, info->
chunk_root->node->start);
1800 btrfs_set_backup_chunk_root_gen(root_backup,
1801 btrfs_header_generation(info->
chunk_root->node));
1802 btrfs_set_backup_chunk_root_level(root_backup,
1805 btrfs_set_backup_extent_root(root_backup, info->
extent_root->node->start);
1806 btrfs_set_backup_extent_root_gen(root_backup,
1807 btrfs_header_generation(info->
extent_root->node));
1808 btrfs_set_backup_extent_root_level(root_backup,
1816 btrfs_set_backup_fs_root(root_backup,
1818 btrfs_set_backup_fs_root_gen(root_backup,
1819 btrfs_header_generation(info->
fs_root->node));
1820 btrfs_set_backup_fs_root_level(root_backup,
1821 btrfs_header_level(info->
fs_root->node));
1824 btrfs_set_backup_dev_root(root_backup, info->
dev_root->node->start);
1825 btrfs_set_backup_dev_root_gen(root_backup,
1826 btrfs_header_generation(info->
dev_root->node));
1827 btrfs_set_backup_dev_root_level(root_backup,
1828 btrfs_header_level(info->
dev_root->node));
1830 btrfs_set_backup_csum_root(root_backup, info->
csum_root->node->start);
1831 btrfs_set_backup_csum_root_gen(root_backup,
1832 btrfs_header_generation(info->
csum_root->node));
1833 btrfs_set_backup_csum_root_level(root_backup,
1834 btrfs_header_level(info->
csum_root->node));
1836 btrfs_set_backup_total_bytes(root_backup,
1838 btrfs_set_backup_bytes_used(root_backup,
1840 btrfs_set_backup_num_devices(root_backup,
1849 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1862 int *num_backups_tried,
int *backup_index)
1865 int newest = *backup_index;
1867 if (*num_backups_tried == 0) {
1868 u64 gen = btrfs_super_generation(super);
1870 newest = find_newest_super_backup(info, gen);
1874 *backup_index = newest;
1875 *num_backups_tried = 1;
1876 }
else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1881 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1882 BTRFS_NUM_BACKUP_ROOTS;
1883 *backup_index = newest;
1884 *num_backups_tried += 1;
1888 btrfs_set_super_generation(super,
1889 btrfs_backup_tree_root_gen(root_backup));
1890 btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1891 btrfs_set_super_root_level(super,
1892 btrfs_backup_tree_root_level(root_backup));
1893 btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1899 btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1900 btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1954 struct buffer_head *bh;
1966 int num_backups_tried = 0;
1967 int backup_index = 0;
1969 tree_root = fs_info->
tree_root = btrfs_alloc_root(fs_info);
1970 extent_root = fs_info->
extent_root = btrfs_alloc_root(fs_info);
1971 csum_root = fs_info->
csum_root = btrfs_alloc_root(fs_info);
1972 chunk_root = fs_info->
chunk_root = btrfs_alloc_root(fs_info);
1973 dev_root = fs_info->
dev_root = btrfs_alloc_root(fs_info);
1974 quota_root = fs_info->
quota_root = btrfs_alloc_root(fs_info);
1976 if (!tree_root || !extent_root || !csum_root ||
1977 !chunk_root || !dev_root || !quota_root) {
1988 ret = setup_bdi(fs_info, &fs_info->
bdi);
2072 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2073 fs_info->check_integrity_print_mask = 0;
2096 fs_info->
btree_inode->i_mapping->a_ops = &btree_aops;
2097 fs_info->
btree_inode->i_mapping->backing_dev_info = &fs_info->
bdi;
2102 BTRFS_I(fs_info->
btree_inode)->io_tree.track_uptodate = 0;
2105 BTRFS_I(fs_info->
btree_inode)->io_tree.ops = &btree_extent_io_ops;
2150 __setup_root(4096, 4096, 4096, 4096, tree_root,
2168 if (!btrfs_super_root(disk_super))
2172 fs_info->
fs_state |= btrfs_super_flags(disk_super);
2185 generation = btrfs_super_generation(disk_super);
2186 find_oldest_super_backup(fs_info, generation);
2200 features = btrfs_super_incompat_flags(disk_super) &
2204 "unsupported optional features (%Lx).\n",
2205 (
unsigned long long)features);
2210 if (btrfs_super_leafsize(disk_super) !=
2211 btrfs_super_nodesize(disk_super)) {
2213 "blocksizes don't match. node %d leaf %d\n",
2214 btrfs_super_nodesize(disk_super),
2215 btrfs_super_leafsize(disk_super));
2221 "blocksize (%d) was too large\n",
2222 btrfs_super_leafsize(disk_super));
2227 features = btrfs_super_incompat_flags(disk_super);
2242 nodesize = btrfs_super_nodesize(disk_super);
2243 leafsize = btrfs_super_leafsize(disk_super);
2244 sectorsize = btrfs_super_sectorsize(disk_super);
2245 stripesize = btrfs_super_stripesize(disk_super);
2252 (sectorsize != leafsize)) {
2254 "are not allowed for mixed block groups on %s\n",
2259 btrfs_set_super_incompat_flags(disk_super, features);
2261 features = btrfs_super_compat_ro_flags(disk_super) &
2265 "unsupported option features (%Lx).\n",
2266 (
unsigned long long)features);
2272 "genwork", 1,
NULL);
2296 fs_info->
workers.idle_thresh = 16;
2355 goto fail_sb_buffer;
2358 fs_info->
bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2359 fs_info->
bdi.ra_pages =
max(fs_info->
bdi.ra_pages,
2371 sizeof(disk_super->
magic))) {
2373 goto fail_sb_buffer;
2378 "found on %s\n", (
unsigned long)sectorsize, sb->
s_id);
2379 goto fail_sb_buffer;
2387 "array on %s\n", sb->
s_id);
2388 goto fail_sb_buffer;
2391 blocksize = btrfs_level_size(tree_root,
2392 btrfs_super_chunk_root_level(disk_super));
2393 generation = btrfs_super_chunk_root_generation(disk_super);
2395 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2399 btrfs_super_chunk_root(disk_super),
2400 blocksize, generation);
2405 goto fail_tree_roots;
2411 (
unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->
node),
2418 goto fail_tree_roots;
2426 goto fail_tree_roots;
2430 blocksize = btrfs_level_size(tree_root,
2431 btrfs_super_root_level(disk_super));
2432 generation = btrfs_super_generation(disk_super);
2435 btrfs_super_root(disk_super),
2436 blocksize, generation);
2437 if (!tree_root->
node ||
2442 goto recovery_tree_root;
2448 ret = find_and_setup_root(tree_root, fs_info,
2451 goto recovery_tree_root;
2454 ret = find_and_setup_root(tree_root, fs_info,
2457 goto recovery_tree_root;
2460 ret = find_and_setup_root(tree_root, fs_info,
2463 goto recovery_tree_root;
2466 ret = find_and_setup_root(tree_root, fs_info,
2483 goto fail_block_groups;
2490 goto fail_block_groups;
2496 goto fail_block_groups;
2502 goto fail_block_groups;
2510 goto fail_block_groups;
2514 "btrfs-transaction");
2526 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2530 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2532 fs_info->check_integrity_print_mask);
2535 " integrity check module %s\n", sb->
s_id);
2540 goto fail_trans_kthread;
2543 if (btrfs_super_log_root(disk_super) != 0) {
2544 u64 bytenr = btrfs_super_log_root(disk_super);
2553 btrfs_level_size(tree_root,
2554 btrfs_super_log_root_level(disk_super));
2556 log_tree_root = btrfs_alloc_root(fs_info);
2557 if (!log_tree_root) {
2562 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2572 "Failed to recover log tree");
2574 kfree(log_tree_root);
2575 goto fail_trans_kthread;
2581 goto fail_trans_kthread;
2587 goto fail_trans_kthread;
2592 goto fail_trans_kthread;
2597 "btrfs: failed to recover relocation\n");
2610 if (IS_ERR(fs_info->
fs_root)) {
2611 err = PTR_ERR(fs_info->
fs_root);
2654 free_root_pointers(fs_info, 1);
2686 goto fail_tree_roots;
2688 free_root_pointers(fs_info, 0);
2691 btrfs_set_super_log_root(disk_super, 0);
2696 ret = next_root_backup(fs_info, fs_info->
super_copy,
2697 &num_backups_tried, &backup_index);
2699 goto fail_block_groups;
2700 goto retry_root_backup;
2703 static void btrfs_end_buffer_write_sync(
struct buffer_head *bh,
int uptodate)
2706 set_buffer_uptodate(bh);
2712 "I/O error on %s\n",
2717 clear_buffer_uptodate(bh);
2726 struct buffer_head *bh;
2727 struct buffer_head *latest =
NULL;
2738 for (i = 0; i < 1; i++) {
2739 bytenr = btrfs_sb_offset(i);
2740 if (bytenr + 4096 >= i_size_read(bdev->
bd_inode))
2742 bh =
__bread(bdev, bytenr / 4096, 4096);
2747 if (btrfs_super_bytenr(super) != bytenr ||
2749 sizeof(super->
magic))) {
2754 if (!latest || btrfs_super_generation(super) > transid) {
2757 transid = btrfs_super_generation(super);
2776 static int write_dev_supers(
struct btrfs_device *device,
2778 int do_barriers,
int wait,
int max_mirrors)
2780 struct buffer_head *bh;
2787 if (max_mirrors == 0)
2790 for (i = 0; i < max_mirrors; i++) {
2791 bytenr = btrfs_sb_offset(i);
2800 if (!buffer_uptodate(bh))
2810 btrfs_set_super_bytenr(sb, bytenr);
2830 set_buffer_uptodate(bh);
2832 bh->b_end_io = btrfs_end_buffer_write_sync;
2833 bh->b_private = device;
2844 return errors < i ? 0 : -1;
2851 static void btrfs_end_empty_barrier(
struct bio *bio,
int err)
2855 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2856 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2858 if (bio->bi_private)
2870 static int write_dev_flush(
struct btrfs_device *device,
int wait)
2885 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2889 }
else if (!bio_flagged(bio, BIO_UPTODATE)) {
2911 bio->bi_end_io = btrfs_end_empty_barrier;
2912 bio->bi_bdev = device->
bdev;
2931 int errors_send = 0;
2932 int errors_wait = 0;
2937 list_for_each_entry_rcu(dev, head,
dev_list) {
2945 ret = write_dev_flush(dev, 0);
2951 list_for_each_entry_rcu(dev, head,
dev_list) {
2959 ret = write_dev_flush(dev, 1);
2981 int num_tolerated_disk_barrier_failures =
2984 for (i = 0; i < num_types; i++) {
2990 if (tmp->
flags == types[i]) {
3010 flags = space.
flags;
3023 if (num_tolerated_disk_barrier_failures > 0 &&
3028 num_tolerated_disk_barrier_failures = 0;
3029 else if (num_tolerated_disk_barrier_failures > 1
3033 num_tolerated_disk_barrier_failures = 1;
3039 return num_tolerated_disk_barrier_failures;
3051 int total_errors = 0;
3054 max_errors = btrfs_super_num_devices(root->
fs_info->super_copy) - 1;
3056 backup_super_roots(root->
fs_info);
3058 sb = root->
fs_info->super_for_commit;
3062 head = &root->
fs_info->fs_devices->devices;
3065 ret = barrier_all_devices(root->
fs_info);
3068 &root->
fs_info->fs_devices->device_list_mutex);
3070 "errors while submitting device barriers.");
3075 list_for_each_entry_rcu(dev, head,
dev_list) {
3083 btrfs_set_stack_device_generation(dev_item, 0);
3084 btrfs_set_stack_device_type(dev_item, dev->
type);
3085 btrfs_set_stack_device_id(dev_item, dev->
devid);
3086 btrfs_set_stack_device_total_bytes(dev_item, dev->
total_bytes);
3087 btrfs_set_stack_device_bytes_used(dev_item, dev->
bytes_used);
3088 btrfs_set_stack_device_io_align(dev_item, dev->
io_align);
3089 btrfs_set_stack_device_io_width(dev_item, dev->
io_width);
3090 btrfs_set_stack_device_sector_size(dev_item, dev->
sector_size);
3094 flags = btrfs_super_flags(sb);
3097 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
3101 if (total_errors > max_errors) {
3110 list_for_each_entry_rcu(dev, head,
dev_list) {
3116 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
3121 if (total_errors > max_errors) {
3123 "%d errors while writing supers", total_errors);
3142 (
unsigned long)root->
root_key.objectid);
3145 if (btrfs_root_refs(&root->
root_item) == 0)
3153 static void free_fs_root(
struct btrfs_root *root)
3193 for (i = 0; i <
ret; i++)
3207 (
void **)gang, root_objectid,
3212 root_objectid = gang[ret - 1]->
root_key.objectid + 1;
3213 for (i = 0; i <
ret; i++) {
3216 root_objectid = gang[
i]->
root_key.objectid;
3242 return PTR_ERR(trans);
3249 return PTR_ERR(trans);
3256 "Failed to sync btree inode to disk.");
3325 del_fs_roots(fs_info);
3343 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3361 struct inode *btree_inode = buf->
pages[0]->mapping->host;
3367 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3368 parent_transid, atomic);
3381 struct btrfs_root *root = BTRFS_I(buf->
pages[0]->mapping->host)->root;
3382 u64 transid = btrfs_header_generation(buf);
3386 if (transid != root->
fs_info->generation) {
3388 "found %llu running %llu\n",
3389 (
unsigned long long)buf->
start,
3390 (
unsigned long long)transid,
3391 (
unsigned long long)root->
fs_info->generation);
3396 spin_lock(&root->
fs_info->delalloc_lock);
3397 root->
fs_info->dirty_metadata_bytes += buf->
len;
3398 spin_unlock(&root->
fs_info->delalloc_lock);
3409 unsigned long thresh = 32 * 1024 * 1024;
3416 num_dirty = root->
fs_info->dirty_metadata_bytes;
3418 if (num_dirty > thresh) {
3420 root->
fs_info->btree_inode->i_mapping, 1);
3432 unsigned long thresh = 32 * 1024 * 1024;
3437 num_dirty = root->
fs_info->dirty_metadata_bytes;
3439 if (num_dirty > thresh) {
3441 root->
fs_info->btree_inode->i_mapping, 1);
3448 struct btrfs_root *root = BTRFS_I(buf->
pages[0]->mapping->host)->root;
3449 return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3452 static int btrfs_check_super_valid(
struct btrfs_fs_info *fs_info,
3479 static void btrfs_destroy_ordered_operations(
struct btrfs_root *root)
3484 INIT_LIST_HEAD(&splice);
3487 spin_lock(&root->
fs_info->ordered_extent_lock);
3489 list_splice_init(&root->
fs_info->ordered_operations, &splice);
3490 while (!list_empty(&splice)) {
3491 btrfs_inode =
list_entry(splice.next,
struct btrfs_inode,
3492 ordered_operations);
3499 spin_unlock(&root->
fs_info->ordered_extent_lock);
3503 static void btrfs_destroy_ordered_extents(
struct btrfs_root *root)
3507 struct inode *
inode;
3509 INIT_LIST_HEAD(&splice);
3511 spin_lock(&root->
fs_info->ordered_extent_lock);
3513 list_splice_init(&root->
fs_info->ordered_extents, &splice);
3514 while (!list_empty(&splice)) {
3524 spin_unlock(&root->
fs_info->ordered_extent_lock);
3531 spin_lock(&root->
fs_info->ordered_extent_lock);
3534 spin_unlock(&root->
fs_info->ordered_extent_lock);
3547 spin_lock(&delayed_refs->
lock);
3549 spin_unlock(&delayed_refs->
lock);
3558 if (btrfs_delayed_ref_is_head(ref)) {
3561 head = btrfs_delayed_node_to_head(ref);
3564 spin_unlock(&delayed_refs->
lock);
3569 btrfs_put_delayed_ref(ref);
3571 spin_lock(&delayed_refs->
lock);
3577 if (list_empty(&head->
cluster))
3579 list_del_init(&head->
cluster);
3585 spin_unlock(&delayed_refs->
lock);
3586 btrfs_put_delayed_ref(ref);
3589 spin_lock(&delayed_refs->
lock);
3592 spin_unlock(&delayed_refs->
lock);
3602 INIT_LIST_HEAD(&splice);
3606 while (!list_empty(&splice)) {
3611 list_del_init(&snapshot->
list);
3617 static void btrfs_destroy_delalloc_inodes(
struct btrfs_root *root)
3619 struct btrfs_inode *btrfs_inode;
3622 INIT_LIST_HEAD(&splice);
3624 spin_lock(&root->
fs_info->delalloc_lock);
3625 list_splice_init(&root->
fs_info->delalloc_inodes, &splice);
3627 while (!list_empty(&splice)) {
3628 btrfs_inode =
list_entry(splice.next,
struct btrfs_inode,
3636 spin_unlock(&root->
fs_info->delalloc_lock);
3639 static int btrfs_destroy_marked_extents(
struct btrfs_root *root,
3645 struct inode *btree_inode = root->
fs_info->btree_inode;
3650 unsigned long index;
3659 while (start <= end) {
3669 &(&BTRFS_I(page->
mapping->host)->io_tree)->buffer,
3670 offset >> PAGE_CACHE_SHIFT);
3675 if (PageWriteback(page))
3679 if (PageDirty(page)) {
3681 spin_lock_irq(&page->
mapping->tree_lock);
3685 spin_unlock_irq(&page->
mapping->tree_lock);
3696 static int btrfs_destroy_pinned_extent(
struct btrfs_root *root,
3705 unpin = pinned_extents;
3725 if (unpin == &root->
fs_info->freed_extents[0])
3726 unpin = &root->
fs_info->freed_extents[1];
3728 unpin = &root->
fs_info->freed_extents[0];
3739 btrfs_destroy_delayed_refs(cur_trans, root);
3757 btrfs_destroy_pending_snapshots(cur_trans);
3759 btrfs_destroy_marked_extents(root, &cur_trans->
dirty_pages,
3761 btrfs_destroy_pinned_extent(root,
3762 root->
fs_info->pinned_extents);
3777 spin_lock(&root->
fs_info->trans_lock);
3778 list_splice_init(&root->
fs_info->trans_list, &
list);
3779 root->
fs_info->trans_no_join = 1;
3780 spin_unlock(&root->
fs_info->trans_lock);
3782 while (!list_empty(&
list)) {
3787 btrfs_destroy_ordered_operations(root);
3789 btrfs_destroy_ordered_extents(root);
3791 btrfs_destroy_delayed_refs(t, root);
3794 &root->
fs_info->trans_block_rsv,
3801 if (waitqueue_active(&root->
fs_info->transaction_blocked_wait))
3806 if (waitqueue_active(&root->
fs_info->transaction_wait))
3817 btrfs_destroy_pending_snapshots(t);
3819 btrfs_destroy_delalloc_inodes(root);
3821 spin_lock(&root->
fs_info->trans_lock);
3823 spin_unlock(&root->
fs_info->trans_lock);
3825 btrfs_destroy_marked_extents(root, &t->
dirty_pages,
3828 btrfs_destroy_pinned_extent(root,
3829 root->
fs_info->pinned_extents);
3832 list_del_init(&t->
list);
3833 memset(t, 0,
sizeof(*t));
3837 spin_lock(&root->
fs_info->trans_lock);
3838 root->
fs_info->trans_no_join = 0;
3839 spin_unlock(&root->
fs_info->trans_lock);
3846 .readpage_end_io_hook = btree_readpage_end_io_hook,
3847 .readpage_io_failed_hook = btree_io_failed_hook,
3848 .submit_bio_hook = btree_submit_bio_hook,