19 #include <linux/sched.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
147 #define MAX_EXTENTS 128
191 #define MOVE_DATA_EXTENTS 0
192 #define UPDATE_DATA_PTRS 1
210 INIT_LIST_HEAD(&cache->
pending[i]);
211 INIT_LIST_HEAD(&cache->
changed);
213 INIT_LIST_HEAD(&cache->
leaves);
216 static void backref_cache_cleanup(
struct backref_cache *cache)
221 while (!list_empty(&cache->
detached)) {
224 remove_backref_node(cache, node);
227 while (!list_empty(&cache->
leaves)) {
230 remove_backref_node(cache, node);
248 node = kzalloc(
sizeof(*node),
GFP_NOFS);
250 INIT_LIST_HEAD(&node->
list);
251 INIT_LIST_HEAD(&node->
upper);
252 INIT_LIST_HEAD(&node->
lower);
272 edge = kzalloc(
sizeof(*edge),
GFP_NOFS);
298 if (bytenr < entry->bytenr)
300 else if (bytenr > entry->
bytenr)
306 rb_link_node(node, parent, p);
319 if (bytenr < entry->bytenr)
321 else if (bytenr > entry->
bytenr)
337 fs_info = bnode->
root->fs_info;
338 btrfs_panic(fs_info, errno,
"Inconsistency in backref cache "
339 "found at offset %llu\n", (
unsigned long long)bytenr);
352 while (!list_empty(&node->
upper)) {
374 edge = edges[idx - 1];
382 edges[idx - 1] = edge;
390 static void unlock_node_buffer(
struct backref_node *node)
401 unlock_node_buffer(node);
412 drop_node_buffer(node);
417 free_backref_node(tree, node);
433 while (!list_empty(&node->
upper)) {
439 free_backref_edge(cache, edge);
443 drop_backref_node(cache, node);
452 if (list_empty(&upper->
lower)) {
458 drop_backref_node(cache, node);
494 while (!list_empty(&cache->
detached)) {
497 remove_backref_node(cache, node);
500 while (!list_empty(&cache->
changed)) {
503 list_del_init(&node->
list);
505 update_backref_node(cache, node, node->
new_bytenr);
517 update_backref_node(cache, node, node->
new_bytenr);
526 static int should_ignore_root(
struct btrfs_root *root)
537 if (btrfs_root_last_snapshot(&reloc_root->
root_item) ==
538 root->
fs_info->running_transaction->transid - 1)
554 struct rb_node *rb_node;
587 if (is_cowonly_root(root_objectid))
595 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
596 static noinline_for_stack
602 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
607 root = read_fs_root(rc->
extent_root->fs_info, root_objectid);
611 generation != btrfs_root_generation(&root->
root_item))
618 static noinline_for_stack
620 unsigned long *
ptr,
unsigned long *
end)
626 item_size = btrfs_item_size_nr(leaf, slot);
627 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
628 if (item_size <
sizeof(*ei)) {
634 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
637 if (item_size <=
sizeof(*ei) +
sizeof(*bi)) {
638 WARN_ON(item_size <
sizeof(*ei) +
sizeof(*bi));
643 *ptr = (
unsigned long)(bi + 1);
644 *end = (
unsigned long)ei + item_size;
662 static noinline_for_stack
665 int level,
u64 bytenr)
678 struct rb_node *rb_node;
690 if (!path1 || !path2) {
697 node = alloc_backref_node(cache);
727 if (!list_empty(&cur->
upper)) {
749 eb = path1->
nodes[0];
752 if (path1->
slots[0] >= btrfs_header_nritems(eb)) {
760 eb = path1->
nodes[0];
763 btrfs_item_key_to_cpu(eb, &
key, path1->
slots[0]);
770 ret = find_inline_backref(eb, path1->
slots[0],
781 key.type = btrfs_extent_inline_ref_type(eb, iref);
782 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
796 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
803 if (
key.objectid ==
key.offset) {
804 root = find_tree_root(rc, eb, ref0);
805 if (root && !should_ignore_root(root))
808 list_add(&cur->
list, &useless);
811 if (is_cowonly_root(btrfs_ref_root_v0(eb,
819 if (
key.objectid ==
key.offset) {
824 root = find_reloc_root(rc, cur->
bytenr);
830 edge = alloc_backref_edge(cache);
835 rb_node = tree_search(&cache->
rb_root,
key.offset);
837 upper = alloc_backref_node(cache);
839 free_backref_edge(cache, edge);
879 if (should_ignore_root(root))
880 list_add(&cur->
list, &useless);
886 level = cur->
level + 1;
901 if (ret > 0 && path2->
slots[level] > 0)
910 if (!path2->
nodes[level]) {
913 if (should_ignore_root(root))
914 list_add(&lower->
list, &useless);
920 edge = alloc_backref_edge(cache);
929 upper = alloc_backref_node(cache);
931 free_backref_edge(cache, edge);
936 upper->
owner = btrfs_header_owner(eb);
959 level == cur->
level + 1) {
970 upper->
owner = btrfs_header_owner(eb);
984 ptr += btrfs_extent_inline_ref_size(
key.type);
1000 if (!list_empty(&
list)) {
1024 while (!list_empty(&
list)) {
1031 free_backref_edge(cache, edge);
1032 if (list_empty(&lower->
upper))
1033 list_add(&lower->
list, &useless);
1039 list_del_init(&upper->
lower);
1068 while (!list_empty(&useless)) {
1070 list_del_init(&upper->
list);
1075 list_del_init(&upper->
lower);
1078 while (!list_empty(&upper->
lower)) {
1084 free_backref_edge(cache, edge);
1086 if (list_empty(&lower->
upper))
1087 list_add(&lower->
list, &useless);
1089 __mark_block_processed(rc, upper);
1090 if (upper->
level > 0) {
1095 free_backref_node(cache, upper);
1102 while (!list_empty(&useless)) {
1105 list_del_init(&lower->
upper);
1108 INIT_LIST_HEAD(&
list);
1112 free_backref_node(cache, upper);
1115 if (list_empty(&
list))
1122 free_backref_edge(cache, edge);
1124 return ERR_PTR(err);
1146 struct rb_node *rb_node;
1149 update_backref_cache(trans, cache);
1161 rb_node = tree_search(&cache->
rb_root,
1173 new_node = alloc_backref_node(cache);
1185 new_edge = alloc_backref_edge(cache);
1211 while (!list_empty(&new_node->
lower)) {
1215 free_backref_edge(cache, new_edge);
1217 free_backref_node(cache, new_node);
1226 struct rb_node *rb_node;
1243 "for start=%llu while inserting into relocation "
1257 static int __update_reloc_root(
struct btrfs_root *root,
int del)
1259 struct rb_node *rb_node;
1283 spin_lock(&root->
fs_info->trans_lock);
1285 spin_unlock(&root->
fs_info->trans_lock);
1307 if (root->
root_key.objectid == objectid) {
1313 btrfs_set_root_last_snapshot(&root->
root_item,
1329 btrfs_set_root_bytenr(root_item, eb->
start);
1330 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1331 btrfs_set_root_generation(root_item, trans->
transid);
1333 if (root->
root_key.objectid == objectid) {
1334 btrfs_set_root_refs(root_item, 0);
1344 &root_key, root_item);
1350 BUG_ON(IS_ERR(reloc_root));
1381 reloc_root = create_reloc_root(trans, root, root->
root_key.objectid);
1385 ret = __add_reloc_root(reloc_root);
1408 if (root->
fs_info->reloc_ctl->merge_reloc_tree &&
1409 btrfs_root_refs(root_item) == 0) {
1414 __update_reloc_root(reloc_root, del);
1436 struct rb_node *
node;
1437 struct rb_node *
prev;
1449 if (objectid < btrfs_ino(&entry->
vfs_inode))
1451 else if (objectid > btrfs_ino(&entry->
vfs_inode))
1459 if (objectid <= btrfs_ino(&entry->
vfs_inode)) {
1474 objectid = btrfs_ino(&entry->
vfs_inode) + 1;
1484 static int in_block_group(
u64 bytenr,
1487 if (bytenr >= block_group->
key.objectid &&
1488 bytenr < block_group->
key.objectid + block_group->
key.offset)
1496 static int get_new_location(
struct inode *reloc_inode,
u64 *new_bytenr,
1499 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1509 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1519 leaf = path->
nodes[0];
1523 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1524 btrfs_file_extent_compression(leaf, fi) ||
1525 btrfs_file_extent_encryption(leaf, fi) ||
1526 btrfs_file_extent_other_encoding(leaf, fi));
1528 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1533 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1544 static noinline_for_stack
1552 struct inode *inode =
NULL;
1569 parent = leaf->
start;
1573 nritems = btrfs_header_nritems(leaf);
1574 for (i = 0; i <
nritems; i++) {
1576 btrfs_item_key_to_cpu(leaf, &
key, i);
1580 if (btrfs_file_extent_type(leaf, fi) ==
1583 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1584 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1596 inode = find_next_inode(root,
key.objectid);
1598 }
else if (inode && btrfs_ino(inode) <
key.objectid) {
1600 inode = find_next_inode(root,
key.objectid);
1602 if (inode && btrfs_ino(inode) ==
key.objectid) {
1604 btrfs_file_extent_num_bytes(leaf, fi);
1621 ret = get_new_location(rc->
data_inode, &new_bytenr,
1629 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1632 key.offset -= btrfs_file_extent_offset(leaf, fi);
1635 btrfs_header_owner(leaf),
1636 key.objectid,
key.offset, 1);
1640 parent, btrfs_header_owner(leaf),
1641 key.objectid,
key.offset, 1);
1651 static noinline_for_stack
1659 return memcmp(&key1, &key2,
sizeof(key1));
1671 static noinline_for_stack
1694 last_snapshot = btrfs_root_last_snapshot(&src->
root_item);
1696 slot = path->
slots[lowest_level];
1697 btrfs_node_key_to_cpu(path->
nodes[lowest_level], &
key, slot);
1700 btrfs_set_lock_blocking(eb);
1701 level = btrfs_header_level(eb);
1703 if (level < lowest_level) {
1713 btrfs_set_lock_blocking(eb);
1723 level = btrfs_header_level(parent);
1724 BUG_ON(level < lowest_level);
1727 if (ret && slot > 0)
1730 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1731 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1733 old_bytenr = btrfs_node_blockptr(parent, slot);
1734 blocksize = btrfs_level_size(dest, level - 1);
1735 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1737 if (level <= max_level) {
1739 new_bytenr = btrfs_node_blockptr(eb,
1740 path->
slots[level]);
1741 new_ptr_gen = btrfs_node_ptr_generation(eb,
1742 path->
slots[level]);
1748 if (new_bytenr > 0 && new_bytenr == old_bytenr) {
1754 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1755 memcmp_node_keys(parent, slot, path, level)) {
1756 if (level <= lowest_level) {
1770 btrfs_set_lock_blocking(eb);
1786 btrfs_node_key_to_cpu(path->
nodes[level], &
key,
1787 path->
slots[level]);
1798 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1799 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1802 btrfs_set_node_blockptr(path->
nodes[level],
1803 path->
slots[level], old_bytenr);
1804 btrfs_set_node_ptr_generation(path->
nodes[level],
1805 path->
slots[level], old_ptr_gen);
1809 path->
nodes[level]->start,
1810 src->
root_key.objectid, level - 1, 0,
1814 0, dest->
root_key.objectid, level - 1,
1819 path->
nodes[level]->start,
1820 src->
root_key.objectid, level - 1, 0,
1825 0, dest->
root_key.objectid, level - 1,
1842 static noinline_for_stack
1851 last_snapshot = btrfs_root_last_snapshot(&root->
root_item);
1853 for (i = 0; i < *
level; i++) {
1858 for (i = *level; i < BTRFS_MAX_LEVEL && path->
nodes[
i]; i++) {
1860 nritems = btrfs_header_nritems(eb);
1861 while (path->
slots[i] + 1 < nritems) {
1863 if (btrfs_node_ptr_generation(eb, path->
slots[i]) <=
1879 static noinline_for_stack
1891 last_snapshot = btrfs_root_last_snapshot(&root->
root_item);
1893 for (i = *level; i > 0; i--) {
1895 nritems = btrfs_header_nritems(eb);
1896 while (path->
slots[i] < nritems) {
1897 ptr_gen = btrfs_node_ptr_generation(eb, path->
slots[i]);
1898 if (ptr_gen > last_snapshot)
1902 if (path->
slots[i] >= nritems) {
1913 bytenr = btrfs_node_blockptr(eb, path->
slots[i]);
1914 blocksize = btrfs_level_size(root, i - 1);
1916 BUG_ON(btrfs_header_level(eb) != i - 1);
1918 path->
slots[i - 1] = 0;
1927 static int invalidate_extent_cache(
struct btrfs_root *root,
1931 struct inode *inode =
NULL;
1944 inode = find_next_inode(root, objectid);
1947 ino = btrfs_ino(inode);
1977 if (max_key->
offset == 0)
1988 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1995 static int find_next_key(
struct btrfs_path *path,
int level,
1999 while (level < BTRFS_MAX_LEVEL) {
2000 if (!path->
nodes[level])
2002 if (path->
slots[level] + 1 <
2003 btrfs_header_nritems(path->
nodes[level])) {
2004 btrfs_node_key_to_cpu(path->
nodes[level], key,
2005 path->
slots[level] + 1);
2017 static noinline_for_stack
int merge_reloc_root(
struct reloc_control *rc,
2044 if (btrfs_disk_key_objectid(&root_item->
drop_progress) == 0) {
2045 level = btrfs_root_level(root_item);
2046 extent_buffer_get(reloc_root->
node);
2062 btrfs_node_key_to_cpu(path->
nodes[level], &next_key,
2063 path->
slots[level]);
2069 min_reserved = root->
nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2070 memset(&next_key, 0,
sizeof(next_key));
2088 ret = walk_down_reloc_tree(reloc_root, path, &level);
2096 if (!find_next_key(path, level, &key) &&
2100 ret = replace_path(trans, root, reloc_root, path,
2101 &next_key, level, max_level);
2110 btrfs_node_key_to_cpu(path->
nodes[level], &key,
2111 path->
slots[level]);
2115 ret = walk_up_reloc_tree(reloc_root, path, &level);
2125 path->
slots[level]);
2134 invalidate_extent_cache(root, &key, &next_key);
2154 btrfs_set_root_refs(root_item, 0);
2164 invalidate_extent_cache(root, &key, &next_key);
2169 static noinline_for_stack
2193 if (IS_ERR(trans)) {
2197 return PTR_ERR(trans);
2216 root = read_fs_root(reloc_root->
fs_info,
2226 btrfs_set_root_refs(&reloc_root->
root_item, 1);
2229 list_add(&reloc_root->
root_list, &reloc_roots);
2241 static noinline_for_stack
2262 while (!list_empty(&reloc_roots)) {
2267 if (btrfs_root_refs(&reloc_root->
root_item) > 0) {
2268 root = read_fs_root(reloc_root->
fs_info,
2273 ret = merge_reloc_root(rc, root);
2290 static void free_block_list(
struct rb_root *blocks)
2293 struct rb_node *rb_node;
2294 while ((rb_node =
rb_first(blocks))) {
2309 root = read_fs_root(reloc_root->
fs_info, reloc_root->
root_key.offset);
2316 static noinline_for_stack
2329 next = walk_up_backref(next, edges, &index);
2335 record_reloc_root_in_trans(trans, root);
2349 __mark_block_processed(rc, next);
2355 next = walk_down_backref(edges, &index);
2380 static noinline_for_stack
2393 next = walk_up_backref(next, edges, &index);
2407 next = walk_down_backref(edges, &index);
2417 static noinline_for_stack
2432 if (next->
processed && (reserve || next != node))
2438 if (list_empty(&next->
upper))
2443 edges[index++] = edge;
2446 next = walk_down_backref(edges, &index);
2459 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2472 static void release_metadata_space(
struct reloc_control *rc,
2475 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
2513 root = select_reloc_root(trans, rc, upper, edges, &nr);
2519 upper->
level, &slot);
2521 bytenr = btrfs_node_blockptr(upper->
eb, slot);
2522 if (node->
eb->start == bytenr)
2525 drop_node_buffer(upper);
2554 bytenr = btrfs_node_blockptr(upper->
eb, slot);
2558 if (node->
eb->start == bytenr)
2562 blocksize = btrfs_level_size(root, node->
level);
2563 generation = btrfs_node_ptr_generation(upper->
eb, slot);
2570 btrfs_set_lock_blocking(eb);
2583 btrfs_set_node_blockptr(upper->
eb, slot,
2585 btrfs_set_node_ptr_generation(upper->
eb, slot,
2590 node->
eb->start, blocksize,
2592 btrfs_header_owner(upper->
eb),
2601 drop_node_buffer(upper);
2603 unlock_node_buffer(upper);
2609 drop_node_buffer(node);
2626 btrfs_node_key_to_cpu(node->
eb, &key, 0);
2627 return do_relocation(trans, rc, node, &key, path, 0);
2641 while (!list_empty(&cache->
pending[level])) {
2644 list_move_tail(&node->
list, &
list);
2648 ret = link_to_upper(trans, rc, node, path);
2659 u64 bytenr,
u32 blocksize)
2665 static void __mark_block_processed(
struct reloc_control *rc,
2669 if (node->
level == 0 ||
2672 mark_block_processed(rc, node->
bytenr, blocksize);
2681 static void update_processed_blocks(
struct reloc_control *rc,
2695 __mark_block_processed(rc, next);
2697 if (list_empty(&next->
upper))
2702 edges[index++] = edge;
2705 next = walk_down_backref(edges, &index);
2709 static int tree_block_processed(
u64 bytenr,
u32 blocksize,
2725 block->
key.objectid, block->
key.offset);
2728 if (block->
level == 0)
2729 btrfs_item_key_to_cpu(eb, &block->
key, 0);
2731 btrfs_node_key_to_cpu(eb, &block->
key, 0);
2742 block->
key.objectid, block->
key.offset);
2763 root = select_one_root(trans, node);
2764 if (root == ERR_PTR(-
ENOENT)) {
2765 update_processed_blocks(rc, node);
2770 ret = reserve_metadata_space(trans, rc, node);
2793 update_processed_blocks(rc, node);
2795 ret = do_relocation(trans, rc, node, key, path, 1);
2800 release_metadata_space(rc, node);
2809 static noinline_for_stack
2816 struct rb_node *rb_node;
2828 reada_tree_block(rc, block);
2836 get_tree_block_key(rc, block);
2844 node = build_backref_tree(rc, &block->
key,
2847 err = PTR_ERR(node);
2851 ret = relocate_tree_block(trans, rc, node, &block->
key,
2861 free_block_list(blocks);
2862 err = finish_pending_nodes(trans, rc, path, err);
2868 static noinline_for_stack
2869 int prealloc_file_extent_cluster(
struct inode *inode,
2884 1 - cluster->
start);
2888 while (nr < cluster->nr) {
2890 if (nr + 1 < cluster->
nr)
2895 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2896 num_bytes = end + 1 -
start;
2898 num_bytes, num_bytes,
2899 end + 1, &alloc_hint);
2906 1 - cluster->
start);
2912 static noinline_for_stack
2913 int setup_extent_mapping(
struct inode *inode,
u64 start,
u64 end,
2916 struct btrfs_root *root = BTRFS_I(inode)->root;
2929 em->
bdev = root->
fs_info->fs_devices->latest_bdev;
2932 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2947 static int relocate_file_extent_cluster(
struct inode *inode,
2952 u64 offset = BTRFS_I(inode)->index_cnt;
2953 unsigned long index;
2954 unsigned long last_index;
2964 ra = kzalloc(
sizeof(*ra),
GFP_NOFS);
2968 ret = prealloc_file_extent_cluster(inode, cluster);
2974 ret = setup_extent_mapping(inode, cluster->
start - offset,
2975 cluster->
end - offset, cluster->
start);
2981 while (index <= last_index) {
2990 last_index + 1 - index);
3001 if (PageReadahead(page)) {
3003 ra,
NULL, page, index,
3004 last_index + 1 - index);
3007 if (!PageUptodate(page)) {
3010 if (!PageUptodate(page)) {
3023 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3027 if (nr < cluster->nr &&
3028 page_start + offset == cluster->
boundary[nr]) {
3030 page_start, page_end,
3039 page_start, page_end);
3044 balance_dirty_pages_ratelimited(inode->
i_mapping);
3053 static noinline_for_stack
3054 int relocate_data_extent(
struct inode *inode,
struct btrfs_key *extent_key,
3059 if (cluster->
nr > 0 && extent_key->
objectid != cluster->
end + 1) {
3060 ret = relocate_file_extent_cluster(inode, cluster);
3075 ret = relocate_file_extent_cluster(inode, cluster);
3083 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3087 u64 *ref_objectid,
int *path_change)
3095 leaf = path->
nodes[0];
3096 slot = path->
slots[0];
3098 if (slot >= btrfs_header_nritems(leaf)) {
3103 leaf = path->
nodes[0];
3104 slot = path->
slots[0];
3108 btrfs_item_key_to_cpu(leaf, &key, slot);
3118 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
3138 struct rb_node *rb_node;
3143 eb = path->
nodes[0];
3144 item_size = btrfs_item_size_nr(eb, path->
slots[0]);
3146 if (item_size >=
sizeof(*ei) +
sizeof(*bi)) {
3150 generation = btrfs_extent_generation(eb, ei);
3151 level = btrfs_tree_block_level(eb, bi);
3153 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3158 ret = get_ref_objectid_v0(rc, path, extent_key,
3162 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
3163 level = (
int)ref_owner;
3180 block->
key.objectid = extent_key->
offset;
3185 rb_node = tree_insert(blocks, block->
bytenr, &block->
rb_node);
3196 u64 bytenr,
u32 blocksize,
3203 if (tree_block_processed(bytenr, blocksize, rc))
3206 if (tree_search(blocks, bytenr))
3224 btrfs_item_key_to_cpu(path->
nodes[0], &key, path->
slots[0]);
3225 ret = add_tree_block(rc, &key, path, blocks);
3255 static int delete_block_group_cache(
struct btrfs_fs_info *fs_info,
3256 struct inode *inode,
u64 ino)
3287 if (IS_ERR(trans)) {
3289 ret = PTR_ERR(trans);
3318 struct rb_node *rb_node;
3330 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3331 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3332 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3333 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3340 ret = delete_block_group_cache(rc->
extent_root->fs_info,
3341 NULL, ref_objectid);
3352 root = read_fs_root(rc->
extent_root->fs_info, ref_root);
3354 err = PTR_ERR(root);
3360 if (ref_offset > ((
u64)-1 << 32))
3373 leaf = path->
nodes[0];
3374 nritems = btrfs_header_nritems(leaf);
3379 if (block_use_full_backref(rc, leaf))
3383 rb_node = tree_search(blocks, leaf->
start);
3391 while (ref_count > 0) {
3392 while (path->
slots[0] >= nritems) {
3403 leaf = path->
nodes[0];
3404 nritems = btrfs_header_nritems(leaf);
3407 if (block_use_full_backref(rc, leaf))
3411 rb_node = tree_search(blocks, leaf->
start);
3420 btrfs_item_key_to_cpu(leaf, &key, path->
slots[0]);
3421 if (key.
objectid != ref_objectid ||
3430 if (btrfs_file_extent_type(leaf, fi) ==
3434 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3438 key.
offset -= btrfs_file_extent_offset(leaf, fi);
3439 if (key.
offset != ref_offset)
3447 if (!tree_block_processed(leaf->
start, leaf->
len, rc)) {
3454 btrfs_item_key_to_cpu(leaf, &block->
key, 0);
3457 rb_node = tree_insert(blocks, block->
bytenr,
3479 static noinline_for_stack
3495 eb = path->
nodes[0];
3497 end = ptr + btrfs_item_size_nr(eb, path->
slots[0]);
3498 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3507 key.
type = btrfs_extent_inline_ref_type(eb, iref);
3509 key.
offset = btrfs_extent_inline_ref_offset(eb, iref);
3510 ret = __add_tree_block(rc, key.
offset, blocksize,
3514 ret = find_data_references(rc, extent_key,
3519 ptr += btrfs_extent_inline_ref_size(key.
type);
3525 eb = path->
nodes[0];
3526 if (path->
slots[0] >= btrfs_header_nritems(eb)) {
3534 eb = path->
nodes[0];
3537 btrfs_item_key_to_cpu(eb, &key, path->
slots[0]);
3541 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3548 ret = __add_tree_block(rc, key.
offset, blocksize,
3553 ret = find_data_references(rc, extent_key,
3566 free_block_list(blocks);
3573 static noinline_for_stack
3602 leaf = path->
nodes[0];
3603 if (path->
slots[0] >= btrfs_header_nritems(leaf)) {
3607 leaf = path->
nodes[0];
3610 btrfs_item_key_to_cpu(leaf, &key, path->
slots[0]);
3626 if (ret == 0 && start <= key.
objectid) {
3631 memcpy(extent_key, &key,
sizeof(key));
3657 static int check_extent_flags(
u64 flags)
3662 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3663 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3665 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3671 static noinline_for_stack
3699 set_reloc_control(rc);
3707 static noinline_for_stack
int relocate_block_group(
struct reloc_control *rc)
3726 ret = prepare_to_relocate(rc);
3742 ret = find_next_extent(trans, rc, path, &key);
3752 item_size = btrfs_item_size_nr(path->
nodes[0], path->
slots[0]);
3753 if (item_size >=
sizeof(*ei)) {
3754 flags = btrfs_extent_flags(path->
nodes[0], ei);
3755 ret = check_extent_flags(flags);
3759 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3761 int path_change = 0;
3765 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
3790 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3791 ret = add_tree_block(rc, &key, path, &blocks);
3793 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3794 ret = add_data_references(rc, &key, path, &blocks);
3805 ret = relocate_tree_blocks(trans, rc, &blocks);
3838 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3848 if (trans && progress && err == -
ENOSPC) {
3869 ret = relocate_file_extent_cluster(rc->
data_inode,
3876 set_reloc_control(rc);
3881 err = prepare_to_merge(rc, err);
3883 merge_reloc_roots(rc);
3886 unset_reloc_control(rc);
3892 err = PTR_ERR(trans);
3917 leaf = path->
nodes[0];
3920 btrfs_set_inode_generation(leaf, item, 1);
3921 btrfs_set_inode_size(leaf, item, 0);
3922 btrfs_set_inode_mode(leaf, item,
S_IFREG | 0600);
3936 static noinline_for_stack
3937 struct inode *create_reloc_inode(
struct btrfs_fs_info *fs_info,
3940 struct inode *inode =
NULL;
3950 return ERR_CAST(root);
3954 return ERR_CAST(trans);
3960 err = __insert_orphan_inode(trans, root, objectid);
3968 BTRFS_I(inode)->index_cnt = group->
key.objectid;
3978 inode = ERR_PTR(err);
3987 rc = kzalloc(
sizeof(*rc),
GFP_NOFS);
4005 struct inode *
inode;
4011 rc = alloc_reloc_control();
4040 ret = delete_block_group_cache(fs_info, inode, 0);
4042 ret = PTR_ERR(inode);
4044 if (ret && ret != -
ENOENT) {
4057 (
unsigned long long)rc->
block_group->key.objectid,
4067 ret = relocate_block_group(rc);
4106 static noinline_for_stack
int mark_garbage_root(
struct btrfs_root *root)
4113 return PTR_ERR(trans);
4118 btrfs_set_root_refs(&root->
root_item, 0);
4164 if (path->
slots[0] == 0)
4168 leaf = path->
nodes[0];
4169 btrfs_item_key_to_cpu(leaf, &key, path->
slots[0]);
4177 if (IS_ERR(reloc_root)) {
4178 err = PTR_ERR(reloc_root);
4182 list_add(&reloc_root->
root_list, &reloc_roots);
4184 if (btrfs_root_refs(&reloc_root->
root_item) > 0) {
4185 fs_root = read_fs_root(root->
fs_info,
4187 if (IS_ERR(fs_root)) {
4188 ret = PTR_ERR(fs_root);
4193 ret = mark_garbage_root(reloc_root);
4208 if (list_empty(&reloc_roots))
4211 rc = alloc_reloc_control();
4219 set_reloc_control(rc);
4222 if (IS_ERR(trans)) {
4223 unset_reloc_control(rc);
4224 err = PTR_ERR(trans);
4230 while (!list_empty(&reloc_roots)) {
4235 if (btrfs_root_refs(&reloc_root->
root_item) == 0) {
4241 fs_root = read_fs_root(root->
fs_info,
4243 if (IS_ERR(fs_root)) {
4244 err = PTR_ERR(fs_root);
4248 err = __add_reloc_root(reloc_root);
4257 merge_reloc_roots(rc);
4259 unset_reloc_control(rc);
4263 err = PTR_ERR(trans);
4269 while (!list_empty(&reloc_roots)) {
4281 fs_root = read_fs_root(root->
fs_info,
4283 if (IS_ERR(fs_root))
4284 err = PTR_ERR(fs_root);
4302 struct btrfs_root *root = BTRFS_I(inode)->root;
4311 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4313 disk_bytenr + len - 1, &
list, 0);
4317 while (!list_empty(&
list)) {
4319 list_del_init(&sums->
list);
4321 sector_sum = sums->
sums;
4348 rc = root->
fs_info->reloc_ctl;
4355 level = btrfs_header_level(buf);
4356 if (btrfs_header_generation(buf) <=
4357 btrfs_root_last_snapshot(&root->
root_item))
4362 WARN_ON(!first_cow && level == 0);
4368 drop_node_buffer(node);
4369 extent_buffer_get(cow);
4374 list_move_tail(&node->
list,
4380 __mark_block_processed(rc, node);
4382 if (first_cow && level > 0)
4387 ret = replace_file_extents(trans, rc, root, cow);
4398 u64 *bytes_to_reserve)
4403 root = pending->
root;
4407 rc = root->
fs_info->reloc_ctl;
4442 rc = root->
fs_info->reloc_ctl;
4453 new_root = pending->
snap;
4454 reloc_root = create_reloc_root(trans, root->
reloc_root,
4456 if (IS_ERR(reloc_root))
4457 return PTR_ERR(reloc_root);
4459 ret = __add_reloc_root(reloc_root);
4464 ret = clone_backref_node(trans, rc, root, reloc_root);