18 #include <linux/sched.h>
25 #include <linux/slab.h>
37 #undef SCRAMBLE_DELAYED_REFS
81 u64 owner_offset,
int refs_to_drop,
102 int dump_block_groups);
151 if (block_group->
key.objectid < cache->
key.objectid) {
153 }
else if (block_group->
key.objectid > cache->
key.objectid) {
161 rb_link_node(&block_group->
cache_node, parent, p);
187 end = cache->
key.objectid + cache->
key.offset - 1;
188 start = cache->
key.objectid;
190 if (bytenr < start) {
191 if (!contains && (!ret || start < ret->
key.objectid))
194 }
else if (bytenr > start) {
195 if (contains && bytenr <= end) {
206 btrfs_get_block_group(ret);
212 static int add_excluded_extent(
struct btrfs_root *root,
215 u64 end = start + num_bytes - 1;
223 static void free_excluded_extents(
struct btrfs_root *root,
228 start = cache->
key.objectid;
229 end = start + cache->
key.offset - 1;
237 static int exclude_super_stripes(
struct btrfs_root *root,
248 ret = add_excluded_extent(root, cache->
key.objectid,
254 bytenr = btrfs_sb_offset(i);
256 cache->
key.objectid, bytenr,
257 0, &logical, &nr, &stripe_len);
262 ret = add_excluded_extent(root, logical[nr],
277 spin_lock(&cache->
lock);
279 spin_unlock(&cache->
lock);
285 spin_unlock(&cache->
lock);
291 spin_unlock(&cache->
lock);
309 u64 extent_start, extent_end,
size, total_added = 0;
312 while (start < end) {
314 &extent_start, &extent_end,
320 if (extent_start <= start) {
321 start = extent_end + 1;
322 }
else if (extent_start > start && extent_start < end) {
323 size = extent_start -
start;
325 ret = btrfs_add_free_space(block_group, start,
328 start = extent_end + 1;
337 ret = btrfs_add_free_space(block_group, start, size);
360 fs_info = block_group->
fs_info;
391 leaf = path->
nodes[0];
392 nritems = btrfs_header_nritems(leaf);
395 if (btrfs_fs_closing(fs_info) > 1) {
400 if (path->
slots[0] < nritems) {
401 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
403 ret = find_next_key(path, 0, &
key);
407 if (need_resched() ||
416 leaf = path->
nodes[0];
417 nritems = btrfs_header_nritems(leaf);
421 if (
key.objectid < block_group->
key.objectid) {
426 if (
key.objectid >= block_group->
key.objectid +
427 block_group->
key.offset)
431 total_found += add_new_free_space(block_group,
434 last =
key.objectid +
key.offset;
436 if (total_found > (1024 * 1024 * 2)) {
445 total_found += add_new_free_space(block_group, fs_info, last,
446 block_group->
key.objectid +
447 block_group->
key.offset);
450 spin_lock(&block_group->
lock);
453 spin_unlock(&block_group->
lock);
459 free_excluded_extents(extent_root, block_group);
465 put_caching_control(caching_ctl);
479 caching_ctl = kzalloc(
sizeof(*caching_ctl),
GFP_NOFS);
483 INIT_LIST_HEAD(&caching_ctl->
list);
489 caching_ctl->
work.func = caching_thread;
491 spin_lock(&cache->
lock);
510 spin_unlock(&cache->
lock);
515 put_caching_control(ctl);
516 spin_lock(&cache->
lock);
520 spin_unlock(&cache->
lock);
527 spin_unlock(&cache->
lock);
538 spin_lock(&cache->
lock);
544 if (load_cache_only) {
551 spin_unlock(&cache->
lock);
554 put_caching_control(caching_ctl);
555 free_excluded_extents(fs_info->
extent_root, cache);
563 spin_lock(&cache->
lock);
564 if (load_cache_only) {
570 spin_unlock(&cache->
lock);
574 if (load_cache_only) {
575 put_caching_control(caching_ctl);
584 btrfs_get_block_group(cache);
599 cache = block_group_cache_tree_search(info, bytenr, 0);
613 cache = block_group_cache_tree_search(info, bytenr, 1);
627 list_for_each_entry_rcu(found, head,
list) {
628 if (found->
flags & flags) {
647 list_for_each_entry_rcu(found, head,
list)
661 static u64 div_factor_fine(
u64 num,
int factor)
675 u64 last =
max(search_hint, search_start);
682 cache = btrfs_lookup_first_block_group(root->
fs_info, last);
686 spin_lock(&cache->
lock);
687 last = cache->
key.objectid + cache->
key.offset;
688 used = btrfs_block_group_used(&cache->
item);
690 if ((full_search || !cache->
ro) &&
694 group_start = cache->
key.objectid;
695 spin_unlock(&cache->
lock);
700 spin_unlock(&cache->
lock);
709 if (!full_search && factor < 10) {
781 leaf = path->
nodes[0];
782 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
783 if (item_size >=
sizeof(*ei)) {
786 num_refs = btrfs_extent_refs(leaf, ei);
787 extent_flags = btrfs_extent_flags(leaf, ei);
789 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
791 BUG_ON(item_size !=
sizeof(*ei0));
794 num_refs = btrfs_extent_refs_v0(leaf, ei0);
812 spin_lock(&delayed_refs->
lock);
817 spin_unlock(&delayed_refs->
lock);
827 btrfs_put_delayed_ref(&head->
node);
831 extent_flags |= head->
extent_op->flags_to_set;
835 num_refs += head->
node.ref_mod;
838 spin_unlock(&delayed_refs->
lock);
844 *flags = extent_flags;
956 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
969 u32 new_size =
sizeof(*item);
973 leaf = path->
nodes[0];
974 BUG_ON(btrfs_item_size_nr(leaf, path->
slots[0]) !=
sizeof(*ei0));
976 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
979 refs = btrfs_extent_refs_v0(leaf, ei0);
981 if (owner == (
u64)-1) {
983 if (path->
slots[0] >= btrfs_header_nritems(leaf)) {
988 leaf = path->
nodes[0];
990 btrfs_item_key_to_cpu(leaf, &found_key,
992 BUG_ON(
key.objectid != found_key.objectid);
999 owner = btrfs_ref_objectid_v0(leaf, ref0);
1006 new_size +=
sizeof(*bi);
1008 new_size -=
sizeof(*ei0);
1010 new_size + extra_size, 1);
1017 leaf = path->
nodes[0];
1019 btrfs_set_extent_refs(leaf, item, refs);
1021 btrfs_set_extent_generation(leaf, item, 0);
1023 btrfs_set_extent_flags(leaf, item,
1029 btrfs_set_tree_block_level(leaf, bi, (
int)owner);
1045 high_crc =
crc32c(high_crc, &lenum,
sizeof(lenum));
1047 low_crc =
crc32c(low_crc, &lenum,
sizeof(lenum));
1049 low_crc =
crc32c(low_crc, &lenum,
sizeof(lenum));
1051 return ((
u64)high_crc << 31) ^ (
u64)low_crc;
1057 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1058 btrfs_extent_data_ref_objectid(leaf, ref),
1059 btrfs_extent_data_ref_offset(leaf, ref));
1062 static int match_extent_data_ref(
struct extent_buffer *leaf,
1064 u64 root_objectid,
u64 owner,
u64 offset)
1066 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1067 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1068 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091 key.offset = parent;
1094 key.offset = hash_extent_data_ref(root_objectid,
1108 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1122 leaf = path->
nodes[0];
1123 nritems = btrfs_header_nritems(leaf);
1125 if (path->
slots[0] >= nritems) {
1132 leaf = path->
nodes[0];
1133 nritems = btrfs_header_nritems(leaf);
1137 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
1138 if (
key.objectid != bytenr ||
1145 if (match_extent_data_ref(leaf, ref, root_objectid,
1164 u64 root_objectid,
u64 owner,
1165 u64 offset,
int refs_to_add)
1176 key.offset = parent;
1180 key.offset = hash_extent_data_ref(root_objectid,
1185 ret = btrfs_insert_empty_item(trans, root, path, &
key, size);
1186 if (ret && ret != -
EEXIST)
1189 leaf = path->
nodes[0];
1195 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1197 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1198 num_refs += refs_to_add;
1199 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1206 if (match_extent_data_ref(leaf, ref, root_objectid,
1211 ret = btrfs_insert_empty_item(trans, root, path, &
key,
1213 if (ret && ret != -
EEXIST)
1216 leaf = path->
nodes[0];
1221 btrfs_set_extent_data_ref_root(leaf, ref,
1223 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1224 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1225 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1227 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1228 num_refs += refs_to_add;
1229 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1251 leaf = path->
nodes[0];
1252 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
1257 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1261 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1262 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1267 num_refs = btrfs_ref_count_v0(leaf, ref0);
1273 BUG_ON(num_refs < refs_to_drop);
1274 num_refs -= refs_to_drop;
1276 if (num_refs == 0) {
1277 ret = btrfs_del_item(trans, root, path);
1280 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1282 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1283 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1288 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1306 leaf = path->
nodes[0];
1307 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
1309 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1312 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1315 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1320 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1324 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1325 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1330 num_refs = btrfs_ref_count_v0(leaf, ref0);
1350 key.offset = parent;
1359 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1360 if (ret == -
ENOENT && parent) {
1383 key.offset = parent;
1389 ret = btrfs_insert_empty_item(trans, root, path, &
key, 0);
1394 static inline int extent_ref_type(
u64 parent,
u64 owner)
1416 if (!path->
nodes[level])
1418 if (path->
slots[level] + 1 >=
1419 btrfs_header_nritems(path->
nodes[level]))
1422 btrfs_item_key_to_cpu(path->
nodes[level], key,
1423 path->
slots[level] + 1);
1425 btrfs_node_key_to_cpu(path->
nodes[level], key,
1426 path->
slots[level] + 1);
1445 static noinline_for_stack
1451 u64 parent,
u64 root_objectid,
1452 u64 owner,
u64 offset,
int insert)
1472 want = extent_ref_type(parent, owner);
1474 extra_size = btrfs_extent_inline_ref_size(want);
1483 if (ret && !insert) {
1489 leaf = path->
nodes[0];
1490 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
1491 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1492 if (item_size <
sizeof(*ei)) {
1497 ret = convert_extent_item_v0(trans, root, path, owner,
1503 leaf = path->
nodes[0];
1504 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
1507 BUG_ON(item_size <
sizeof(*ei));
1510 flags = btrfs_extent_flags(leaf, ei);
1512 ptr = (
unsigned long)(ei + 1);
1513 end = (
unsigned long)ei + item_size;
1529 type = btrfs_extent_inline_ref_type(leaf, iref);
1533 ptr += btrfs_extent_inline_ref_size(type);
1540 if (match_extent_data_ref(leaf, dref, root_objectid,
1545 if (hash_extent_data_ref_item(leaf, dref) <
1546 hash_extent_data_ref(root_objectid, owner, offset))
1550 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1552 if (parent == ref_offset) {
1556 if (ref_offset < parent)
1559 if (root_objectid == ref_offset) {
1563 if (ref_offset < root_objectid)
1567 ptr += btrfs_extent_inline_ref_size(type);
1569 if (err == -
ENOENT && insert) {
1570 if (item_size + extra_size >=
1581 if (find_next_key(path, 0, &key) == 0 &&
1600 static noinline_for_stack
1605 u64 parent,
u64 root_objectid,
1606 u64 owner,
u64 offset,
int refs_to_add,
1613 unsigned long item_offset;
1618 leaf = path->
nodes[0];
1620 item_offset = (
unsigned long)iref - (
unsigned long)ei;
1622 type = extent_ref_type(parent, owner);
1623 size = btrfs_extent_inline_ref_size(type);
1628 refs = btrfs_extent_refs(leaf, ei);
1629 refs += refs_to_add;
1630 btrfs_set_extent_refs(leaf, ei, refs);
1632 __run_delayed_extent_op(extent_op, leaf, ei);
1634 ptr = (
unsigned long)ei + item_offset;
1635 end = (
unsigned long)ei + btrfs_item_size_nr(leaf, path->
slots[0]);
1636 if (ptr < end - size)
1641 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1645 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1646 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1647 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1648 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1652 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1653 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1655 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1657 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1667 u64 root_objectid,
u64 owner,
u64 offset)
1671 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1672 bytenr, num_bytes, parent,
1673 root_objectid, owner, offset, 0);
1681 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1684 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1685 root_objectid, owner, offset);
1693 static noinline_for_stack
1712 leaf = path->
nodes[0];
1714 refs = btrfs_extent_refs(leaf, ei);
1715 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1716 refs += refs_to_mod;
1717 btrfs_set_extent_refs(leaf, ei, refs);
1719 __run_delayed_extent_op(extent_op, leaf, ei);
1721 type = btrfs_extent_inline_ref_type(leaf, iref);
1725 refs = btrfs_extent_data_ref_count(leaf, dref);
1728 refs = btrfs_shared_data_ref_count(leaf, sref);
1731 BUG_ON(refs_to_mod != -1);
1734 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1735 refs += refs_to_mod;
1739 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1741 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1743 size = btrfs_extent_inline_ref_size(type);
1744 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
1745 ptr = (
unsigned long)iref;
1746 end = (
unsigned long)ei + item_size;
1747 if (ptr + size < end)
1756 static noinline_for_stack
1761 u64 root_objectid,
u64 owner,
1762 u64 offset,
int refs_to_add,
1768 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1769 bytenr, num_bytes, parent,
1770 root_objectid, owner, offset, 1);
1773 update_inline_extent_backref(trans, root, path, iref,
1774 refs_to_add, extent_op);
1775 }
else if (ret == -
ENOENT) {
1776 setup_inline_extent_backref(trans, root, path, iref, parent,
1777 root_objectid, owner, offset,
1778 refs_to_add, extent_op);
1787 u64 bytenr,
u64 parent,
u64 root_objectid,
1788 u64 owner,
u64 offset,
int refs_to_add)
1792 BUG_ON(refs_to_add != 1);
1793 ret = insert_tree_block_ref(trans, root, path, bytenr,
1794 parent, root_objectid);
1796 ret = insert_extent_data_ref(trans, root, path, bytenr,
1797 parent, root_objectid,
1798 owner, offset, refs_to_add);
1807 int refs_to_drop,
int is_data)
1811 BUG_ON(!is_data && refs_to_drop != 1);
1813 update_inline_extent_backref(trans, root, path, iref,
1814 -refs_to_drop,
NULL);
1815 }
else if (is_data) {
1816 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1818 ret = btrfs_del_item(trans, root, path);
1823 static int btrfs_issue_discard(
struct block_device *bdev,
1829 static int btrfs_discard_extent(
struct btrfs_root *root,
u64 bytenr,
1830 u64 num_bytes,
u64 *actual_bytes)
1833 u64 discarded_bytes = 0;
1839 bytenr, &num_bytes, &bbio, 0);
1846 for (i = 0; i < bbio->
num_stripes; i++, stripe++) {
1847 if (!stripe->
dev->can_discard)
1850 ret = btrfs_issue_discard(stripe->
dev->bdev,
1854 discarded_bytes += stripe->
length;
1869 *actual_bytes = discarded_bytes;
1879 u64 root_objectid,
u64 owner,
u64 offset,
int for_cow)
1890 parent, root_objectid, (
int)owner,
1895 parent, root_objectid, owner, offset,
1903 u64 bytenr,
u64 num_bytes,
1904 u64 parent,
u64 root_objectid,
1905 u64 owner,
u64 offset,
int refs_to_add,
1922 ret = insert_inline_extent_backref(trans, root->
fs_info->extent_root,
1923 path, bytenr, num_bytes, parent,
1924 root_objectid, owner, offset,
1925 refs_to_add, extent_op);
1934 leaf = path->
nodes[0];
1936 refs = btrfs_extent_refs(leaf, item);
1937 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1939 __run_delayed_extent_op(extent_op, leaf, item);
1948 ret = insert_extent_backref(trans, root->
fs_info->extent_root,
1949 path, bytenr, parent, root_objectid,
1950 owner, offset, refs_to_add);
1962 int insert_reserved)
1975 ref = btrfs_delayed_node_to_data_ref(node);
1979 ref_root = ref->
root;
1986 ret = alloc_reserved_file_extent(trans, root,
1987 parent, ref_root, flags,
1991 ret = __btrfs_inc_extent_ref(trans, root, node->
bytenr,
1997 ret = __btrfs_free_extent(trans, root, node->
bytenr,
2012 u64 flags = btrfs_extent_flags(leaf, ei);
2015 btrfs_set_extent_flags(leaf, ei, flags);
2020 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2022 btrfs_set_tree_block_key(leaf, bi, &extent_op->
key);
2063 leaf = path->
nodes[0];
2064 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
2065 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2066 if (item_size <
sizeof(*ei)) {
2067 ret = convert_extent_item_v0(trans, root->
fs_info->extent_root,
2073 leaf = path->
nodes[0];
2074 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
2077 BUG_ON(item_size <
sizeof(*ei));
2079 __run_delayed_extent_op(extent_op, leaf, ei);
2091 int insert_reserved)
2103 ref = btrfs_delayed_node_to_tree_ref(node);
2107 ref_root = ref->
root;
2113 ret = alloc_reserved_tree_block(trans, root,
2119 ret = __btrfs_inc_extent_ref(trans, root, node->
bytenr,
2121 ref->
level, 0, 1, extent_op);
2123 ret = __btrfs_free_extent(trans, root, node->
bytenr,
2125 ref->
level, 0, 1, extent_op);
2137 int insert_reserved)
2144 if (btrfs_delayed_ref_is_head(node)) {
2153 head = btrfs_delayed_node_to_head(node);
2154 if (insert_reserved) {
2169 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2173 ret = run_delayed_data_ref(trans, root, node, extent_op,
2200 if (ref->
action == action)
2226 int must_insert_reserved = 0;
2232 if (list_empty(cluster))
2269 ref = select_delayed_ref(locked_ref);
2271 if (ref && ref->
seq &&
2277 list_del_init(&locked_ref->
cluster);
2281 spin_unlock(&delayed_refs->
lock);
2283 spin_lock(&delayed_refs->
lock);
2302 ref = &locked_ref->
node;
2304 if (extent_op && must_insert_reserved) {
2310 spin_unlock(&delayed_refs->
lock);
2312 ret = run_delayed_extent_op(trans, root,
2318 spin_lock(&delayed_refs->
lock);
2325 list_del_init(&locked_ref->
cluster);
2349 spin_unlock(&delayed_refs->
lock);
2351 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2352 must_insert_reserved);
2354 btrfs_put_delayed_ref(ref);
2360 spin_lock(&delayed_refs->
lock);
2366 spin_lock(&delayed_refs->
lock);
2371 #ifdef SCRAMBLE_DELAYED_REFS
2423 printk(
KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
2434 struct qgroup_update,
list);
2438 trans, fs_info, qgroup_update->
node,
2440 kfree(qgroup_update);
2459 struct btrfs_root *root,
unsigned long count)
2467 int run_all = count == (
unsigned long)-1;
2475 if (root == root->
fs_info->extent_root)
2476 root = root->
fs_info->tree_root;
2481 INIT_LIST_HEAD(&cluster);
2484 spin_lock(&delayed_refs->
lock);
2486 #ifdef SCRAMBLE_DELAYED_REFS
2495 if (!(run_all || run_most) &&
2511 ret = run_clustered_refs(trans, root, &cluster);
2513 spin_unlock(&delayed_refs->
lock);
2518 count -=
min_t(
unsigned long, ret, count);
2547 if (!list_empty(&trans->
new_bgs)) {
2548 spin_unlock(&delayed_refs->
lock);
2550 spin_lock(&delayed_refs->
lock);
2556 count = (
unsigned long)-1;
2561 if (btrfs_delayed_ref_is_head(ref)) {
2564 head = btrfs_delayed_node_to_head(ref);
2567 spin_unlock(&delayed_refs->
lock);
2575 btrfs_put_delayed_ref(ref);
2581 spin_unlock(&delayed_refs->
lock);
2586 spin_unlock(&delayed_refs->
lock);
2606 extent_op->
is_data = is_data ? 1 : 0;
2609 num_bytes, extent_op);
2629 spin_lock(&delayed_refs->
lock);
2636 spin_unlock(&delayed_refs->
lock);
2646 btrfs_put_delayed_ref(&head->
node);
2656 if (ref->
bytenr != bytenr)
2663 data_ref = btrfs_delayed_node_to_data_ref(ref);
2670 if (ref->
bytenr == bytenr && ref->
seq == seq)
2682 spin_unlock(&delayed_refs->
lock);
2710 if (path->
slots[0] == 0)
2714 leaf = path->
nodes[0];
2715 btrfs_item_key_to_cpu(leaf, &key, path->
slots[0]);
2721 item_size = btrfs_item_size_nr(leaf, path->
slots[0]);
2722 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2723 if (item_size <
sizeof(*ei)) {
2730 if (item_size !=
sizeof(*ei) +
2734 if (btrfs_extent_generation(leaf, ei) <=
2735 btrfs_root_last_snapshot(&root->
root_item))
2739 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2744 if (btrfs_extent_refs(leaf, ei) !=
2745 btrfs_extent_data_ref_count(leaf, ref) ||
2746 btrfs_extent_data_ref_root(leaf, ref) !=
2748 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2749 btrfs_extent_data_ref_offset(leaf, ref) !=
offset)
2770 ret = check_committed_ref(trans, root, path, objectid,
2772 if (ret && ret != -
ENOENT)
2775 ret2 = check_delayed_ref(trans, root, path, objectid,
2777 }
while (ret2 == -
EAGAIN);
2779 if (ret2 && ret2 != -
ENOENT) {
2796 int full_backref,
int inc,
int for_cow)
2811 ref_root = btrfs_header_owner(buf);
2812 nritems = btrfs_header_nritems(buf);
2813 level = btrfs_header_level(buf);
2824 parent = buf->
start;
2828 for (i = 0; i <
nritems; i++) {
2830 btrfs_item_key_to_cpu(buf, &key, i);
2835 if (btrfs_file_extent_type(buf, fi) ==
2838 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2842 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2843 key.
offset -= btrfs_file_extent_offset(buf, fi);
2844 ret = process_func(trans, root, bytenr, num_bytes,
2850 bytenr = btrfs_node_blockptr(buf, i);
2851 num_bytes = btrfs_level_size(root, level - 1);
2852 ret = process_func(trans, root, bytenr, num_bytes,
2853 parent, ref_root, level - 1, 0,
2867 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2873 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2891 leaf = path->
nodes[0];
2910 spin_lock(&root->
fs_info->block_group_cache_lock);
2916 btrfs_get_block_group(cache);
2919 spin_unlock(&root->
fs_info->block_group_cache_lock);
2939 if (block_group->
key.offset < (100 * 1024 * 1024)) {
2940 spin_lock(&block_group->
lock);
2942 spin_unlock(&block_group->
lock);
2948 if (IS_ERR(inode) && PTR_ERR(inode) != -
ENOENT) {
2949 ret = PTR_ERR(inode);
2954 if (IS_ERR(inode)) {
2958 if (block_group->
ro)
2969 i_size_read(inode)) {
2979 BTRFS_I(inode)->generation = 0;
2983 if (i_size_read(inode) > 0) {
2990 spin_lock(&block_group->
lock);
2999 spin_unlock(&block_group->
lock);
3002 spin_unlock(&block_group->
lock);
3010 num_pages = (
int)div64_u64(block_group->
key.offset, 256 * 1024 * 1024);
3022 num_pages, num_pages,
3033 spin_lock(&block_group->
lock);
3037 spin_unlock(&block_group->
lock);
3056 cache = btrfs_lookup_first_block_group(root->
fs_info, last);
3060 cache = next_block_group(root, cache);
3068 err = cache_save_setup(cache, trans, path);
3069 last = cache->
key.objectid + cache->
key.offset;
3081 cache = btrfs_lookup_first_block_group(root->
fs_info, last);
3090 cache = next_block_group(root, cache);
3102 last = cache->
key.objectid + cache->
key.offset;
3104 err = write_one_cache_group(trans, root, path, cache);
3124 cache = btrfs_lookup_first_block_group(root->
fs_info, last);
3137 cache = next_block_group(root, cache);
3154 last = cache->
key.objectid + cache->
key.offset;
3169 if (!block_group || block_group->
ro)
3176 static int update_space_info(
struct btrfs_fs_info *info, u64 flags,
3190 found = __find_space_info(info, flags);
3192 spin_lock(&found->
lock);
3196 found->
disk_used += bytes_used * factor;
3198 spin_unlock(&found->
lock);
3199 *space_info = found;
3202 found = kzalloc(
sizeof(*found),
GFP_NOFS);
3224 *space_info = found;
3231 static void set_avail_alloc_bits(
struct btrfs_fs_info *fs_info, u64 flags)
3233 u64 extra_flags = chunk_to_extended(flags) &
3250 static u64 get_restripe_target(
struct btrfs_fs_info *fs_info, u64 flags)
3258 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3260 target = BTRFS_BLOCK_GROUP_DATA | bctl->
data.target;
3264 }
else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3266 target = BTRFS_BLOCK_GROUP_METADATA | bctl->
meta.target;
3287 root->
fs_info->fs_devices->missing_devices;
3294 spin_lock(&root->
fs_info->balance_lock);
3295 target = get_restripe_target(root->
fs_info, flags);
3299 spin_unlock(&root->
fs_info->balance_lock);
3300 return extended_to_chunk(target);
3303 spin_unlock(&root->
fs_info->balance_lock);
3305 if (num_devices == 1)
3307 if (num_devices < 4)
3313 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3318 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3325 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3328 return extended_to_chunk(flags);
3331 static u64 get_alloc_profile(
struct btrfs_root *root, u64 flags)
3333 if (flags & BTRFS_BLOCK_GROUP_DATA)
3334 flags |= root->
fs_info->avail_data_alloc_bits;
3336 flags |= root->
fs_info->avail_system_alloc_bits;
3337 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3338 flags |= root->
fs_info->avail_metadata_alloc_bits;
3349 else if (root == root->
fs_info->chunk_root)
3354 return get_alloc_profile(root, flags);
3364 struct btrfs_root *root = BTRFS_I(inode)->root;
3367 int ret = 0, committed = 0, alloc_chunk = 1;
3372 if (root == root->
fs_info->tree_root ||
3384 spin_lock(&data_sinfo->
lock);
3396 if (!data_sinfo->
full && alloc_chunk) {
3400 spin_unlock(&data_sinfo->
lock);
3405 return PTR_ERR(trans);
3407 ret = do_chunk_alloc(trans, root->
fs_info->extent_root,
3430 spin_unlock(&data_sinfo->
lock);
3439 return PTR_ERR(trans);
3449 trace_btrfs_space_reservation(root->
fs_info,
"space_info",
3450 data_sinfo->
flags, bytes, 1);
3451 spin_unlock(&data_sinfo->
lock);
3461 struct btrfs_root *root = BTRFS_I(inode)->root;
3467 data_sinfo = root->
fs_info->data_sinfo;
3468 spin_lock(&data_sinfo->
lock);
3470 trace_btrfs_space_reservation(root->
fs_info,
"space_info",
3471 data_sinfo->
flags, bytes, 0);
3472 spin_unlock(&data_sinfo->
lock);
3475 static void force_metadata_allocation(
struct btrfs_fs_info *info)
3481 list_for_each_entry_rcu(found, head,
list) {
3482 if (found->
flags & BTRFS_BLOCK_GROUP_METADATA)
3488 static int should_alloc_chunk(
struct btrfs_root *root,
3504 if (sinfo->
flags & BTRFS_BLOCK_GROUP_METADATA)
3505 num_allocated += global_rsv->
size;
3512 thresh = btrfs_super_total_bytes(root->
fs_info->super_copy);
3513 thresh =
max_t(u64, 64 * 1024 * 1024,
3514 div_factor_fine(thresh, 1));
3516 if (num_bytes - num_allocated < thresh)
3520 if (num_allocated + 2 * 1024 * 1024 <
div_factor(num_bytes, 8))
3525 static u64 get_system_chunk_thresh(
struct btrfs_root *root, u64 type)
3531 num_dev = root->
fs_info->fs_devices->rw_devices;
3538 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3548 info = __find_space_info(root->
fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3549 spin_lock(&info->
lock);
3552 spin_unlock(&info->
lock);
3554 thresh = get_system_chunk_thresh(root, type);
3557 left, thresh, type);
3558 dump_space_info(info, 0, 0);
3561 if (left < thresh) {
3570 struct btrfs_root *extent_root, u64 flags,
int force)
3574 int wait_for_alloc = 0;
3577 space_info = __find_space_info(extent_root->
fs_info, flags);
3579 ret = update_space_info(extent_root->
fs_info, flags,
3586 spin_lock(&space_info->
lock);
3587 if (force < space_info->force_alloc)
3589 if (space_info->
full) {
3590 spin_unlock(&space_info->
lock);
3594 if (!should_alloc_chunk(extent_root, space_info, force)) {
3595 spin_unlock(&space_info->
lock);
3603 spin_unlock(&space_info->
lock);
3613 if (wait_for_alloc) {
3623 if (btrfs_mixed_space_info(space_info))
3635 force_metadata_allocation(fs_info);
3642 check_system_chunk(trans, extent_root, flags);
3645 if (ret < 0 && ret != -
ENOSPC)
3648 spin_lock(&space_info->
lock);
3650 space_info->
full = 1;
3656 spin_unlock(&space_info->
lock);
3662 static int can_overcommit(
struct btrfs_root *root,
3674 spin_lock(&root->
fs_info->free_chunk_lock);
3675 avail = root->
fs_info->free_chunk_space;
3676 spin_unlock(&root->
fs_info->free_chunk_lock);
3697 if (used + bytes < space_info->total_bytes + avail)
3705 static void shrink_delalloc(
struct btrfs_root *root, u64 to_reclaim, u64 orig,
3718 block_rsv = &root->
fs_info->delalloc_block_rsv;
3722 delalloc_bytes = root->
fs_info->delalloc_bytes;
3723 if (delalloc_bytes == 0) {
3730 while (delalloc_bytes && loops < 3) {
3731 max_reclaim =
min(delalloc_bytes, to_reclaim);
3743 spin_lock(&space_info->
lock);
3744 if (can_overcommit(root, space_info, orig, !trans)) {
3745 spin_unlock(&space_info->
lock);
3748 spin_unlock(&space_info->
lock);
3751 if (wait_ordered && !trans) {
3759 delalloc_bytes = root->
fs_info->delalloc_bytes;
3773 static int may_commit_transaction(
struct btrfs_root *root,
3775 u64 bytes,
int force)
3788 spin_lock(&space_info->
lock);
3790 spin_unlock(&space_info->
lock);
3793 spin_unlock(&space_info->
lock);
3802 spin_lock(&space_info->
lock);
3803 spin_lock(&delayed_rsv->
lock);
3805 spin_unlock(&delayed_rsv->
lock);
3806 spin_unlock(&space_info->
lock);
3809 spin_unlock(&delayed_rsv->
lock);
3810 spin_unlock(&space_info->
lock);
3829 static int flush_space(
struct btrfs_root *root,
3831 u64 orig_bytes,
int state)
3841 u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
3843 nr = (
int)div64_u64(num_bytes, bytes);
3851 if (IS_ERR(trans)) {
3852 ret = PTR_ERR(trans);
3860 shrink_delalloc(root, num_bytes, orig_bytes,
3865 if (IS_ERR(trans)) {
3866 ret = PTR_ERR(trans);
3869 ret = do_chunk_alloc(trans, root->
fs_info->extent_root,
3877 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3900 static int reserve_metadata_bytes(
struct btrfs_root *root,
3902 u64 orig_bytes,
int flush)
3906 u64 num_bytes = orig_bytes;
3909 bool flushing =
false;
3913 spin_lock(&space_info->
lock);
3918 while (flush && !flushing && space_info->
flush) {
3919 spin_unlock(&space_info->
lock);
3933 spin_lock(&space_info->
lock);
3948 if (used <= space_info->total_bytes) {
3949 if (used + orig_bytes <= space_info->total_bytes) {
3951 trace_btrfs_space_reservation(root->
fs_info,
3952 "space_info", space_info->
flags, orig_bytes, 1);
3960 num_bytes = orig_bytes;
3972 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
3974 trace_btrfs_space_reservation(root->
fs_info,
"space_info",
3975 space_info->
flags, orig_bytes,
3987 space_info->
flush = 1;
3990 spin_unlock(&space_info->
lock);
3995 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4005 spin_lock(&space_info->
lock);
4006 space_info->
flush = 0;
4008 spin_unlock(&space_info->
lock);
4029 block_rsv = &root->
fs_info->empty_block_rsv;
4038 spin_lock(&block_rsv->
lock);
4039 if (block_rsv->
reserved >= num_bytes) {
4042 block_rsv->
full = 0;
4045 spin_unlock(&block_rsv->
lock);
4050 u64 num_bytes,
int update_size)
4052 spin_lock(&block_rsv->
lock);
4057 block_rsv->
full = 1;
4058 spin_unlock(&block_rsv->
lock);
4061 static void block_rsv_release_bytes(
struct btrfs_fs_info *fs_info,
4067 spin_lock(&block_rsv->
lock);
4068 if (num_bytes == (u64)-1)
4069 num_bytes = block_rsv->
size;
4074 block_rsv->
full = 1;
4078 spin_unlock(&block_rsv->
lock);
4080 if (num_bytes > 0) {
4082 spin_lock(&dest->
lock);
4087 bytes_to_add =
min(num_bytes, bytes_to_add);
4091 num_bytes -= bytes_to_add;
4093 spin_unlock(&dest->
lock);
4096 spin_lock(&space_info->
lock);
4098 trace_btrfs_space_reservation(fs_info,
"space_info",
4099 space_info->
flags, num_bytes, 0);
4101 spin_unlock(&space_info->
lock);
4111 ret = block_rsv_use_bytes(src, num_bytes);
4115 block_rsv_add_bytes(dst, num_bytes, 1);
4121 memset(rsv, 0,
sizeof(*rsv));
4127 unsigned short type)
4137 block_rsv->
space_info = __find_space_info(fs_info,
4138 BTRFS_BLOCK_GROUP_METADATA);
4151 static inline int __block_rsv_add(
struct btrfs_root *root,
4153 u64 num_bytes,
int flush)
4160 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4162 block_rsv_add_bytes(block_rsv, num_bytes, 1);
4173 return __block_rsv_add(root, block_rsv, num_bytes, 1);
4180 return __block_rsv_add(root, block_rsv, num_bytes, 0);
4192 spin_lock(&block_rsv->
lock);
4194 if (block_rsv->
reserved >= num_bytes)
4196 spin_unlock(&block_rsv->
lock);
4201 static inline int __btrfs_block_rsv_refill(
struct btrfs_root *root,
4203 u64 min_reserved,
int flush)
4211 spin_lock(&block_rsv->
lock);
4212 num_bytes = min_reserved;
4213 if (block_rsv->
reserved >= num_bytes)
4217 spin_unlock(&block_rsv->
lock);
4222 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4224 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4235 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4242 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4249 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4257 if (global_rsv->
full || global_rsv == block_rsv ||
4260 block_rsv_release_bytes(root->
fs_info, block_rsv, global_rsv,
4269 static u64 calc_global_metadata_size(
struct btrfs_fs_info *fs_info)
4275 int csum_size = btrfs_super_csum_size(fs_info->
super_copy);
4277 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4278 spin_lock(&sinfo->
lock);
4280 spin_unlock(&sinfo->
lock);
4282 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4283 spin_lock(&sinfo->
lock);
4284 if (sinfo->
flags & BTRFS_BLOCK_GROUP_DATA)
4287 spin_unlock(&sinfo->
lock);
4289 num_bytes = (data_used >> fs_info->
sb->s_blocksize_bits) *
4291 num_bytes += div64_u64(data_used + meta_used, 50);
4293 if (num_bytes * 3 > meta_used)
4294 num_bytes = div64_u64(meta_used, 3);
4299 static void update_global_block_rsv(
struct btrfs_fs_info *fs_info)
4305 num_bytes = calc_global_metadata_size(fs_info);
4307 spin_lock(&sinfo->
lock);
4308 spin_lock(&block_rsv->
lock);
4320 trace_btrfs_space_reservation(fs_info,
"space_info",
4321 sinfo->
flags, num_bytes, 1);
4327 trace_btrfs_space_reservation(fs_info,
"space_info",
4328 sinfo->
flags, num_bytes, 0);
4331 block_rsv->
full = 1;
4334 spin_unlock(&block_rsv->
lock);
4335 spin_unlock(&sinfo->
lock);
4338 static void init_global_block_rsv(
struct btrfs_fs_info *fs_info)
4342 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4345 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4358 update_global_block_rsv(fs_info);
4361 static void release_global_block_rsv(
struct btrfs_fs_info *fs_info)
4384 trace_btrfs_space_reservation(root->
fs_info,
"transaction",
4392 struct inode *inode)
4394 struct btrfs_root *root = BTRFS_I(inode)->root;
4403 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4404 trace_btrfs_space_reservation(root->
fs_info,
"orphan",
4405 btrfs_ino(inode), num_bytes, 1);
4406 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4411 struct btrfs_root *root = BTRFS_I(inode)->root;
4412 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4413 trace_btrfs_space_reservation(root->
fs_info,
"orphan",
4414 btrfs_ino(inode), num_bytes, 0);
4428 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
4430 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4442 static unsigned drop_outstanding_extent(
struct inode *inode)
4444 unsigned drop_inode_space = 0;
4445 unsigned dropped_extents = 0;
4447 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4448 BTRFS_I(inode)->outstanding_extents--;
4450 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4452 &BTRFS_I(inode)->runtime_flags))
4453 drop_inode_space = 1;
4459 if (BTRFS_I(inode)->outstanding_extents >=
4460 BTRFS_I(inode)->reserved_extents)
4461 return drop_inode_space;
4463 dropped_extents = BTRFS_I(inode)->reserved_extents -
4464 BTRFS_I(inode)->outstanding_extents;
4465 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4466 return dropped_extents + drop_inode_space;
4487 static u64 calc_csum_metadata_size(
struct inode *inode, u64 num_bytes,
4490 struct btrfs_root *root = BTRFS_I(inode)->root;
4492 int num_csums_per_leaf;
4497 BTRFS_I(inode)->csum_bytes == 0)
4500 old_csums = (
int)div64_u64(BTRFS_I(inode)->csum_bytes, root->
sectorsize);
4502 BTRFS_I(inode)->csum_bytes +=
num_bytes;
4504 BTRFS_I(inode)->csum_bytes -=
num_bytes;
4506 num_csums_per_leaf = (
int)div64_u64(csum_size,
4509 num_csums = (
int)div64_u64(BTRFS_I(inode)->csum_bytes, root->
sectorsize);
4510 num_csums = num_csums + num_csums_per_leaf - 1;
4511 num_csums = num_csums / num_csums_per_leaf;
4513 old_csums = old_csums + num_csums_per_leaf - 1;
4514 old_csums = old_csums / num_csums_per_leaf;
4517 if (old_csums == num_csums)
4521 return btrfs_calc_trans_metadata_size(root,
4522 num_csums - old_csums);
4524 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4529 struct btrfs_root *root = BTRFS_I(inode)->root;
4533 unsigned nr_extents = 0;
4534 int extra_reserve = 0;
4539 if (btrfs_is_free_space_inode(inode))
4548 spin_lock(&BTRFS_I(inode)->
lock);
4549 BTRFS_I(inode)->outstanding_extents++;
4551 if (BTRFS_I(inode)->outstanding_extents >
4552 BTRFS_I(inode)->reserved_extents)
4553 nr_extents = BTRFS_I(inode)->outstanding_extents -
4554 BTRFS_I(inode)->reserved_extents;
4561 &BTRFS_I(inode)->runtime_flags)) {
4566 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4567 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4568 csum_bytes = BTRFS_I(inode)->csum_bytes;
4569 spin_unlock(&BTRFS_I(inode)->
lock);
4571 if (root->
fs_info->quota_enabled) {
4580 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4585 spin_lock(&BTRFS_I(inode)->
lock);
4586 dropped = drop_outstanding_extent(inode);
4595 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4596 calc_csum_metadata_size(inode, num_bytes, 0);
4598 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4599 spin_unlock(&BTRFS_I(inode)->
lock);
4601 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4605 trace_btrfs_space_reservation(root->
fs_info,
4614 spin_lock(&BTRFS_I(inode)->
lock);
4615 if (extra_reserve) {
4617 &BTRFS_I(inode)->runtime_flags);
4620 BTRFS_I(inode)->reserved_extents += nr_extents;
4621 spin_unlock(&BTRFS_I(inode)->
lock);
4625 trace_btrfs_space_reservation(root->
fs_info,
"delalloc",
4626 btrfs_ino(inode), to_reserve, 1);
4627 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4643 struct btrfs_root *root = BTRFS_I(inode)->root;
4648 spin_lock(&BTRFS_I(inode)->
lock);
4649 dropped = drop_outstanding_extent(inode);
4651 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4652 spin_unlock(&BTRFS_I(inode)->
lock);
4654 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4656 trace_btrfs_space_reservation(root->
fs_info,
"delalloc",
4657 btrfs_ino(inode), to_free, 0);
4658 if (root->
fs_info->quota_enabled) {
4720 u64 bytenr, u64 num_bytes,
int alloc)
4731 old_val = btrfs_super_bytes_used(info->
super_copy);
4736 btrfs_set_super_bytes_used(info->
super_copy, old_val);
4756 cache_block_group(cache, trans,
NULL, 1);
4758 byte_in_group = bytenr - cache->
key.objectid;
4762 spin_lock(&cache->
lock);
4769 old_val = btrfs_block_group_used(&cache->
item);
4770 num_bytes =
min(total, cache->
key.offset - byte_in_group);
4773 btrfs_set_block_group_used(&cache->
item, old_val);
4777 cache->
space_info->disk_used += num_bytes * factor;
4778 spin_unlock(&cache->
lock);
4782 btrfs_set_block_group_used(&cache->
item, old_val);
4786 cache->
space_info->disk_used -= num_bytes * factor;
4787 spin_unlock(&cache->
lock);
4791 bytenr, bytenr + num_bytes - 1,
4801 static u64 first_logical_byte(
struct btrfs_root *root, u64 search_start)
4806 cache = btrfs_lookup_first_block_group(root->
fs_info, search_start);
4810 bytenr = cache->
key.objectid;
4816 static int pin_down_extent(
struct btrfs_root *root,
4818 u64 bytenr, u64 num_bytes,
int reserved)
4821 spin_lock(&cache->
lock);
4828 spin_unlock(&cache->
lock);
4840 u64 bytenr, u64 num_bytes,
int reserved)
4847 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4858 u64 bytenr, u64 num_bytes)
4871 cache_block_group(cache, trans, root, 1);
4873 pin_down_extent(root, cache, bytenr, num_bytes, 0);
4904 u64 num_bytes,
int reserve)
4909 spin_lock(&space_info->
lock);
4910 spin_lock(&cache->
lock);
4918 trace_btrfs_space_reservation(cache->
fs_info,
4919 "space_info", space_info->
flags,
4931 spin_unlock(&cache->
lock);
4932 spin_unlock(&space_info->
lock);
4949 if (block_group_cache_done(cache)) {
4951 list_del_init(&caching_ctl->
list);
4952 put_caching_control(caching_ctl);
4965 update_global_block_rsv(fs_info);
4968 static int unpin_extent_range(
struct btrfs_root *root, u64 start, u64 end)
4974 while (start <= end) {
4976 start >= cache->
key.objectid + cache->
key.offset) {
4983 len = cache->
key.objectid + cache->
key.offset -
start;
4984 len =
min(len, end + 1 - start);
4988 btrfs_add_free_space(cache, start, len);
4994 spin_lock(&cache->
lock);
4999 spin_unlock(&cache->
lock);
5032 ret = btrfs_discard_extent(root, start,
5033 end + 1 - start,
NULL);
5036 unpin_extent_range(root, start, end);
5045 u64 bytenr, u64 num_bytes, u64 parent,
5046 u64 root_objectid, u64 owner_objectid,
5047 u64 owner_offset,
int refs_to_drop,
5059 int extent_slot = 0;
5060 int found_extent = 0;
5073 BUG_ON(!is_data && refs_to_drop != 1);
5075 ret = lookup_extent_backref(trans, extent_root, path, &iref,
5076 bytenr, num_bytes, parent,
5077 root_objectid, owner_objectid,
5080 extent_slot = path->
slots[0];
5081 while (extent_slot >= 0) {
5082 btrfs_item_key_to_cpu(path->
nodes[0], &key,
5087 key.
offset == num_bytes) {
5091 if (path->
slots[0] - extent_slot > 5)
5095 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5096 item_size = btrfs_item_size_nr(path->
nodes[0], extent_slot);
5097 if (found_extent && item_size <
sizeof(*ei))
5100 if (!found_extent) {
5102 ret = remove_extent_backref(trans, extent_root, path,
5120 ", was looking for %llu\n", ret,
5121 (
unsigned long long)bytenr);
5130 extent_slot = path->
slots[0];
5132 }
else if (ret == -
ENOENT) {
5136 "parent %llu root %llu owner %llu offset %llu\n",
5137 (
unsigned long long)bytenr,
5138 (
unsigned long long)parent,
5139 (
unsigned long long)root_objectid,
5140 (
unsigned long long)owner_objectid,
5141 (
unsigned long long)owner_offset);
5147 leaf = path->
nodes[0];
5148 item_size = btrfs_item_size_nr(leaf, extent_slot);
5149 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
5150 if (item_size <
sizeof(*ei)) {
5151 BUG_ON(found_extent || extent_slot != path->
slots[0]);
5152 ret = convert_extent_item_v0(trans, extent_root, path,
5170 ", was looking for %llu\n", ret,
5171 (
unsigned long long)bytenr);
5179 extent_slot = path->
slots[0];
5180 leaf = path->
nodes[0];
5181 item_size = btrfs_item_size_nr(leaf, extent_slot);
5184 BUG_ON(item_size <
sizeof(*ei));
5189 BUG_ON(item_size <
sizeof(*ei) +
sizeof(*bi));
5191 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
5194 refs = btrfs_extent_refs(leaf, ei);
5195 BUG_ON(refs < refs_to_drop);
5196 refs -= refs_to_drop;
5200 __run_delayed_extent_op(extent_op, leaf, ei);
5208 btrfs_set_extent_refs(leaf, ei, refs);
5212 ret = remove_extent_backref(trans, extent_root, path,
5222 BUG_ON(is_data && refs_to_drop !=
5223 extent_data_ref_count(root, path, iref));
5228 path->
slots[0] = extent_slot;
5249 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5276 spin_lock(&delayed_refs->
lock);
5288 if (ref->
bytenr == bytenr)
5309 head->
node.in_tree = 0;
5319 if (list_empty(&head->
cluster))
5322 list_del_init(&head->
cluster);
5323 spin_unlock(&delayed_refs->
lock);
5330 btrfs_put_delayed_ref(&head->
node);
5333 spin_unlock(&delayed_refs->
lock);
5340 u64 parent,
int last_ref)
5349 btrfs_header_level(buf),
5359 if (btrfs_header_generation(buf) == trans->
transid) {
5361 ret = check_ref_cleanup(trans, root, buf->
start);
5367 pin_down_extent(root, cache, buf->
start, buf->
len, 1);
5373 btrfs_add_free_space(cache, buf->
start, buf->
len);
5387 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5388 u64 owner, u64 offset,
int for_cow)
5405 parent, root_objectid, (
int)owner,
5410 parent, root_objectid, owner,
5420 u64 ret = (val +
mask) & ~mask;
5442 caching_ctl = get_caching_control(cache);
5449 put_caching_control(caching_ctl);
5459 caching_ctl = get_caching_control(cache);
5465 put_caching_control(caching_ctl);
5469 static int __get_block_group_index(u64 flags)
5479 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5489 return __get_block_group_index(cache->
flags);
5509 u64 num_bytes, u64 empty_size,
5518 u64 search_start = 0;
5519 int empty_cluster = 2 * 1024 * 1024;
5525 bool found_uncached_bg =
false;
5526 bool failed_cluster_refill =
false;
5527 bool failed_alloc =
false;
5528 bool use_cluster =
true;
5529 bool have_caching_bg =
false;
5536 trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5538 space_info = __find_space_info(root->
fs_info, data);
5548 if (btrfs_mixed_space_info(space_info))
5549 use_cluster =
false;
5551 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5552 last_ptr = &root->
fs_info->meta_alloc_cluster;
5554 empty_cluster = 64 * 1024;
5557 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5559 last_ptr = &root->
fs_info->data_alloc_cluster;
5563 spin_lock(&last_ptr->
lock);
5566 spin_unlock(&last_ptr->
lock);
5569 search_start =
max(search_start, first_logical_byte(root, 0));
5570 search_start =
max(search_start, hint_byte);
5575 if (search_start == hint_byte) {
5586 if (block_group && block_group_bits(block_group, data) &&
5589 if (list_empty(&block_group->
list) ||
5600 index = get_block_group_index(block_group);
5601 goto have_block_group;
5603 }
else if (block_group) {
5608 have_caching_bg =
false;
5616 btrfs_get_block_group(block_group);
5617 search_start = block_group->
key.objectid;
5624 if (!block_group_bits(block_group, data)) {
5625 u64
extra = BTRFS_BLOCK_GROUP_DUP |
5626 BTRFS_BLOCK_GROUP_RAID1 |
5634 if ((data & extra) && !(block_group->
flags & extra))
5639 cached = block_group_cache_done(block_group);
5641 found_uncached_bg =
true;
5642 ret = cache_block_group(block_group, trans,
5662 if (used_block_group != block_group &&
5663 (!used_block_group ||
5664 used_block_group->
ro ||
5665 !block_group_bits(used_block_group, data))) {
5667 goto refill_cluster;
5670 if (used_block_group != block_group)
5671 btrfs_get_block_group(used_block_group);
5674 last_ptr, num_bytes, used_block_group->
key.objectid);
5678 trace_btrfs_reserve_extent_cluster(root,
5679 block_group, search_start, num_bytes);
5684 if (used_block_group != block_group) {
5689 BUG_ON(used_block_group != block_group);
5708 goto unclustered_alloc;
5719 goto unclustered_alloc;
5724 block_group, last_ptr,
5725 search_start, num_bytes,
5726 empty_cluster + empty_size);
5733 last_ptr, num_bytes,
5738 trace_btrfs_reserve_extent_cluster(root,
5739 block_group, search_start,
5744 && !failed_cluster_refill) {
5747 failed_cluster_refill =
true;
5748 wait_block_group_cache_progress(block_group,
5749 num_bytes + empty_cluster + empty_size);
5750 goto have_block_group;
5768 num_bytes + empty_cluster + empty_size) {
5775 num_bytes, empty_size);
5785 if (!offset && !failed_alloc && !cached &&
5787 wait_block_group_cache_progress(block_group,
5788 num_bytes + empty_size);
5789 failed_alloc =
true;
5790 goto have_block_group;
5791 }
else if (!offset) {
5793 have_caching_bg =
true;
5797 search_start = stripe_align(root, offset);
5800 if (search_start + num_bytes >
5801 used_block_group->
key.objectid + used_block_group->
key.offset) {
5802 btrfs_add_free_space(used_block_group, offset, num_bytes);
5806 if (offset < search_start)
5807 btrfs_add_free_space(used_block_group, offset,
5808 search_start - offset);
5809 BUG_ON(offset > search_start);
5811 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5814 btrfs_add_free_space(used_block_group, offset, num_bytes);
5822 trace_btrfs_reserve_extent(orig_root, block_group,
5823 search_start, num_bytes);
5824 if (used_block_group != block_group)
5829 failed_cluster_refill =
false;
5830 failed_alloc =
false;
5831 BUG_ON(index != get_block_group_index(block_group));
5832 if (used_block_group != block_group)
5841 if (!ins->
objectid && ++index < BTRFS_NR_RAID_TYPES)
5856 ret = do_chunk_alloc(trans, root, data,
5862 if (ret < 0 && ret != -
ENOSPC) {
5886 int dump_block_groups)
5891 spin_lock(&info->
lock);
5893 (
unsigned long long)info->
flags,
5897 (info->
full) ?
"" :
"not ");
5899 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5906 spin_unlock(&info->
lock);
5908 if (!dump_block_groups)
5914 spin_lock(&cache->
lock);
5915 printk(
KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
5916 (
unsigned long long)cache->
key.objectid,
5917 (
unsigned long long)cache->
key.offset,
5918 (
unsigned long long)btrfs_block_group_used(&cache->
item),
5919 (
unsigned long long)cache->
pinned,
5920 (
unsigned long long)cache->
reserved,
5921 cache->
ro ?
"[readonly]" :
"");
5923 spin_unlock(&cache->
lock);
5925 if (++index < BTRFS_NR_RAID_TYPES)
5932 u64 num_bytes, u64 min_alloc_size,
5933 u64 empty_size, u64 hint_byte,
5936 bool final_tried =
false;
5941 WARN_ON(num_bytes < root->sectorsize);
5942 ret = find_free_extent(trans, root, num_bytes, empty_size,
5943 hint_byte, ins, data);
5947 num_bytes = num_bytes >> 1;
5948 num_bytes = num_bytes & ~(root->
sectorsize - 1);
5949 num_bytes =
max(num_bytes, min_alloc_size);
5950 if (num_bytes == min_alloc_size)
5956 sinfo = __find_space_info(root->
fs_info, data);
5958 "wanted %llu\n", (
unsigned long long)data,
5959 (
unsigned long long)num_bytes);
5961 dump_space_info(sinfo, num_bytes, 1);
5965 trace_btrfs_reserved_extent_alloc(root, ins->
objectid, ins->
offset);
5970 static int __btrfs_free_reserved_extent(
struct btrfs_root *root,
5971 u64 start, u64 len,
int pin)
5979 (
unsigned long long)start);
5984 ret = btrfs_discard_extent(root, start, len,
NULL);
5987 pin_down_extent(root, cache, start, len, 1);
5989 btrfs_add_free_space(cache, start, len);
5994 trace_btrfs_reserved_extent_free(root, start, len);
6002 return __btrfs_free_reserved_extent(root, start, len, 0);
6008 return __btrfs_free_reserved_extent(root, start, len, 1);
6013 u64 parent, u64 root_objectid,
6014 u64 flags, u64 owner, u64 offset,
6031 size =
sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6038 ret = btrfs_insert_empty_item(trans, fs_info->
extent_root, path,
6045 leaf = path->
nodes[0];
6048 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
6049 btrfs_set_extent_generation(leaf, extent_item, trans->
transid);
6050 btrfs_set_extent_flags(leaf, extent_item,
6054 btrfs_set_extent_inline_ref_type(leaf, iref, type);
6058 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6059 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
6063 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
6064 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
6065 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
6066 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
6072 ret = update_block_group(trans, root, ins->
objectid, ins->
offset, 1);
6075 "%llu\n", (
unsigned long long)ins->
objectid,
6076 (
unsigned long long)ins->
offset);
6084 u64 parent, u64 root_objectid,
6095 u32 size =
sizeof(*extent_item) +
sizeof(*block_info) +
sizeof(*iref);
6102 ret = btrfs_insert_empty_item(trans, fs_info->
extent_root, path,
6109 leaf = path->
nodes[0];
6112 btrfs_set_extent_refs(leaf, extent_item, 1);
6113 btrfs_set_extent_generation(leaf, extent_item, trans->
transid);
6114 btrfs_set_extent_flags(leaf, extent_item,
6115 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
6118 btrfs_set_tree_block_key(leaf, block_info, key);
6119 btrfs_set_tree_block_level(leaf, block_info, level);
6124 btrfs_set_extent_inline_ref_type(leaf, iref,
6126 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
6128 btrfs_set_extent_inline_ref_type(leaf, iref,
6130 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
6136 ret = update_block_group(trans, root, ins->
objectid, ins->
offset, 1);
6139 "%llu\n", (
unsigned long long)ins->
objectid,
6140 (
unsigned long long)ins->
offset);
6148 u64 root_objectid, u64 owner,
6157 root_objectid, owner, offset,
6169 u64 root_objectid, u64 owner, u64 offset,
6176 u64 num_bytes = ins->
offset;
6179 cache_block_group(block_group, trans,
NULL, 0);
6180 caching_ctl = get_caching_control(block_group);
6183 BUG_ON(!block_group_cache_done(block_group));
6189 if (start >= caching_ctl->
progress) {
6190 ret = add_excluded_extent(root, start, num_bytes);
6192 }
else if (start + num_bytes <= caching_ctl->
progress) {
6205 ret = add_excluded_extent(root, start, num_bytes);
6210 put_caching_control(caching_ctl);
6213 ret = btrfs_update_reserved_bytes(block_group, ins->
offset,
6217 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6218 0, owner, offset, ins, 1);
6224 u64 bytenr,
u32 blocksize,
6232 btrfs_set_header_generation(buf, trans->
transid);
6233 btrfs_set_buffer_lockdep_class(root->
root_key.objectid, buf, level);
6238 btrfs_set_lock_blocking(buf);
6269 block_rsv = get_block_rsv(trans, root);
6271 if (block_rsv->
size == 0) {
6272 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6277 if (ret && block_rsv != global_rsv) {
6278 ret = block_rsv_use_bytes(global_rsv, blocksize);
6281 return ERR_PTR(ret);
6283 return ERR_PTR(ret);
6288 ret = block_rsv_use_bytes(block_rsv, blocksize);
6299 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6302 }
else if (ret && block_rsv != global_rsv) {
6303 ret = block_rsv_use_bytes(global_rsv, blocksize);
6315 block_rsv_add_bytes(block_rsv, blocksize, 0);
6316 block_rsv_release_bytes(fs_info, block_rsv,
NULL, 0);
6328 u64 parent, u64 root_objectid,
6330 u64 hint, u64 empty_size)
6339 block_rsv = use_block_rsv(trans, root, blocksize);
6340 if (IS_ERR(block_rsv))
6341 return ERR_CAST(block_rsv);
6344 empty_size, hint, &ins, 0);
6346 unuse_block_rsv(root->
fs_info, block_rsv, blocksize);
6347 return ERR_PTR(ret);
6376 ins.
offset, parent, root_objectid,
6398 #define DROP_REFERENCE 1
6399 #define UPDATE_BACKREF 2
6428 nritems = btrfs_header_nritems(eb);
6429 blocksize = btrfs_level_size(root, wc->
level - 1);
6431 for (slot = path->
slots[wc->
level]; slot < nritems; slot++) {
6436 bytenr = btrfs_node_blockptr(eb, slot);
6437 generation = btrfs_node_ptr_generation(eb, slot);
6443 generation <= root->
root_key.offset)
6458 if (wc->
level == 1 &&
6462 generation <= root->
root_key.offset)
6464 btrfs_node_key_to_cpu(eb, &key, slot);
6470 if (wc->
level == 1 &&
6497 int level = wc->
level;
6503 btrfs_header_owner(eb) != root->
root_key.objectid)
6525 if (wc->
refs[level] > 1)
6529 btrfs_tree_unlock_rw(eb, path->
locks[level]);
6536 if (!(wc->
flags[level] & flag)) {
6552 if (path->
locks[level] && level > 0) {
6553 btrfs_tree_unlock_rw(eb, path->
locks[level]);
6583 int level = wc->
level;
6587 generation = btrfs_node_ptr_generation(path->
nodes[level],
6588 path->
slots[level]);
6595 generation <= root->
root_key.offset) {
6600 bytenr = btrfs_node_blockptr(path->
nodes[level], path->
slots[level]);
6601 blocksize = btrfs_level_size(root, level - 1);
6611 btrfs_set_lock_blocking(next);
6614 &wc->
refs[level - 1],
6615 &wc->
flags[level - 1]);
6625 if (wc->
refs[level - 1] > 1) {
6631 generation <= root->
root_key.offset)
6634 btrfs_node_key_to_cpu(path->
nodes[level], &key,
6635 path->
slots[level]);
6657 if (reada && level == 1)
6658 reada_walk_down(trans, root, wc, path);
6663 btrfs_set_lock_blocking(next);
6667 BUG_ON(level != btrfs_header_level(next));
6676 wc->
refs[level - 1] = 0;
6677 wc->
flags[level - 1] = 0;
6683 btrfs_header_owner(path->
nodes[level]));
6688 root->
root_key.objectid, level - 1, 0, 0);
6715 int level = wc->
level;
6721 if (level < wc->shared_level)
6737 if (!path->
locks[level]) {
6740 btrfs_set_lock_blocking(eb);
6748 btrfs_tree_unlock_rw(eb, path->
locks[level]);
6752 if (wc->
refs[level] == 1) {
6753 btrfs_tree_unlock_rw(eb, path->
locks[level]);
6762 if (wc->
refs[level] == 1) {
6773 if (!path->
locks[level] &&
6774 btrfs_header_generation(eb) == trans->
transid) {
6776 btrfs_set_lock_blocking(eb);
6782 if (eb == root->
node) {
6787 btrfs_header_owner(eb));
6789 if (wc->
flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6790 parent = path->
nodes[level + 1]->start;
6793 btrfs_header_owner(path->
nodes[level + 1]));
6808 int level = wc->
level;
6809 int lookup_info = 1;
6812 while (level >= 0) {
6813 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6820 if (path->
slots[level] >=
6821 btrfs_header_nritems(path->
nodes[level]))
6824 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6840 int level = wc->
level;
6844 while (level < max_level && path->nodes[level]) {
6846 if (path->
slots[level] + 1 <
6847 btrfs_header_nritems(path->
nodes[level])) {
6851 ret = walk_up_proc(trans, root, path, wc);
6855 if (path->
locks[level]) {
6856 btrfs_tree_unlock_rw(path->
nodes[level],
6857 path->
locks[level]);
6899 wc = kzalloc(
sizeof(*wc),
GFP_NOFS);
6907 if (IS_ERR(trans)) {
6908 err = PTR_ERR(trans);
6915 if (btrfs_disk_key_objectid(&root_item->
drop_progress) == 0) {
6916 level = btrfs_header_level(root->
node);
6918 btrfs_set_lock_blocking(path->
nodes[level]);
6945 level = btrfs_header_level(root->
node);
6948 btrfs_set_lock_blocking(path->
nodes[level]);
6951 path->
nodes[level]->start,
6952 path->
nodes[level]->len,
6979 ret = walk_down_tree(trans, root, path, wc);
6985 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
7000 path->
slots[level]);
7017 if (IS_ERR(trans)) {
7018 err = PTR_ERR(trans);
7042 }
else if (ret > 0) {
7095 wc = kzalloc(
sizeof(*wc),
GFP_NOFS);
7102 parent_level = btrfs_header_level(parent);
7103 extent_buffer_get(parent);
7104 path->
nodes[parent_level] = parent;
7105 path->
slots[parent_level] = btrfs_header_nritems(parent);
7108 level = btrfs_header_level(node);
7113 wc->
refs[parent_level] = 1;
7124 wret = walk_down_tree(trans, root, path, wc);
7130 wret = walk_up_tree(trans, root, path, wc, parent_level);
7142 static u64 update_block_group_flags(
struct btrfs_root *root, u64 flags)
7151 stripped = get_restripe_target(root->
fs_info, flags);
7153 return extended_to_chunk(stripped);
7160 num_devices = root->
fs_info->fs_devices->rw_devices +
7161 root->
fs_info->fs_devices->missing_devices;
7163 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7166 if (num_devices == 1) {
7168 stripped = flags & ~stripped;
7171 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7175 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7176 BTRFS_BLOCK_GROUP_RAID10))
7180 if (flags & stripped)
7184 stripped = flags & ~stripped;
7187 if (flags & BTRFS_BLOCK_GROUP_DUP)
7200 u64 min_allocable_bytes;
7210 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7212 min_allocable_bytes = 1 * 1024 * 1024;
7214 min_allocable_bytes = 0;
7216 spin_lock(&sinfo->
lock);
7217 spin_lock(&cache->
lock);
7229 min_allocable_bytes <= sinfo->total_bytes) {
7235 spin_unlock(&cache->
lock);
7236 spin_unlock(&sinfo->
lock);
7252 return PTR_ERR(trans);
7254 alloc_flags = update_block_group_flags(root, cache->
flags);
7255 if (alloc_flags != cache->
flags) {
7256 ret = do_chunk_alloc(trans, root, alloc_flags,
7262 ret = set_block_group_ro(cache, 0);
7265 alloc_flags = get_alloc_profile(root, cache->
space_info->flags);
7266 ret = do_chunk_alloc(trans, root, alloc_flags,
7270 ret = set_block_group_ro(cache, 0);
7279 u64 alloc_flags = get_alloc_profile(root, type);
7280 return do_chunk_alloc(trans, root, alloc_flags,
7288 static u64 __btrfs_get_ro_block_group_free_space(
struct list_head *groups_list)
7295 spin_lock(&block_group->
lock);
7297 if (!block_group->
ro) {
7298 spin_unlock(&block_group->
lock);
7302 if (block_group->
flags & (BTRFS_BLOCK_GROUP_RAID1 |
7303 BTRFS_BLOCK_GROUP_RAID10 |
7304 BTRFS_BLOCK_GROUP_DUP))
7309 free_bytes += (block_group->
key.offset -
7310 btrfs_block_group_used(&block_group->
item)) *
7313 spin_unlock(&block_group->
lock);
7328 spin_lock(&sinfo->
lock);
7332 free_bytes += __btrfs_get_ro_block_group_free_space(
7335 spin_unlock(&sinfo->
lock);
7348 spin_lock(&sinfo->
lock);
7349 spin_lock(&cache->
lock);
7354 spin_unlock(&cache->
lock);
7355 spin_unlock(&sinfo->
lock);
7384 min_free = btrfs_block_group_used(&block_group->
item);
7391 spin_lock(&space_info->
lock);
7393 full = space_info->
full;
7405 min_free < space_info->total_bytes)) {
7406 spin_unlock(&space_info->
lock);
7409 spin_unlock(&space_info->
lock);
7428 target = get_restripe_target(root->
fs_info, block_group->
flags);
7430 index = __get_block_group_index(extended_to_chunk(target));
7439 index = get_block_group_index(block_group);
7446 }
else if (index == 1) {
7448 }
else if (index == 2) {
7451 }
else if (index == 3) {
7453 do_div(min_free, dev_min);
7470 if (dev_nr >= dev_min)
7482 static int find_first_block_group(
struct btrfs_root *root,
7495 slot = path->
slots[0];
7496 leaf = path->
nodes[0];
7497 if (slot >= btrfs_header_nritems(leaf)) {
7505 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7507 if (found_key.objectid >= key->
objectid &&
7524 struct inode *
inode;
7526 block_group = btrfs_lookup_first_block_group(info, last);
7527 while (block_group) {
7528 spin_lock(&block_group->
lock);
7529 if (block_group->
iref)
7531 spin_unlock(&block_group->
lock);
7532 block_group = next_block_group(info->
tree_root,
7542 inode = block_group->
inode;
7543 block_group->
iref = 0;
7545 spin_unlock(&block_group->
lock);
7547 last = block_group->
key.objectid + block_group->
key.offset;
7564 put_caching_control(caching_ctl);
7581 wait_block_group_cache_done(block_group);
7588 free_excluded_extents(info->
extent_root, block_group);
7605 release_global_block_rsv(info);
7615 dump_space_info(space_info, 0, 0);
7626 int index = get_block_group_index(cache);
7655 cache_gen = btrfs_super_cache_generation(root->
fs_info->super_copy);
7657 btrfs_super_generation(root->
fs_info->super_copy) != cache_gen)
7663 ret = find_first_block_group(root, path, &key);
7668 leaf = path->
nodes[0];
7669 btrfs_item_key_to_cpu(leaf, &found_key, path->
slots[0]);
7670 cache = kzalloc(
sizeof(*cache),
GFP_NOFS);
7686 INIT_LIST_HEAD(&cache->
list);
7707 sizeof(cache->
item));
7708 memcpy(&cache->
key, &found_key,
sizeof(found_key));
7712 cache->
flags = btrfs_block_group_flags(&cache->
item);
7722 exclude_super_stripes(root, cache);
7731 if (found_key.
offset == btrfs_block_group_used(&cache->
item)) {
7734 free_excluded_extents(root, cache);
7735 }
else if (btrfs_block_group_used(&cache->
item) == 0) {
7738 add_new_free_space(cache, root->
fs_info,
7742 free_excluded_extents(root, cache);
7745 ret = update_space_info(info, cache->
flags, found_key.
offset,
7746 btrfs_block_group_used(&cache->
item),
7754 __link_block_group(space_info, cache);
7756 ret = btrfs_add_block_group_cache(root->
fs_info, cache);
7761 set_block_group_ro(cache, 1);
7764 list_for_each_entry_rcu(space_info, &root->
fs_info->space_info,
list) {
7765 if (!(get_alloc_profile(root, space_info->
flags) &
7766 (BTRFS_BLOCK_GROUP_RAID10 |
7767 BTRFS_BLOCK_GROUP_RAID1 |
7768 BTRFS_BLOCK_GROUP_DUP)))
7775 set_block_group_ro(cache, 1);
7777 set_block_group_ro(cache, 1);
7780 init_global_block_rsv(info);
7803 spin_lock(&block_group->
lock);
7804 memcpy(&item, &block_group->
item,
sizeof(item));
7805 memcpy(&key, &block_group->
key,
sizeof(key));
7806 spin_unlock(&block_group->
lock);
7824 extent_root = root->
fs_info->extent_root;
7828 cache = kzalloc(
sizeof(*cache),
GFP_NOFS);
7846 INIT_LIST_HEAD(&cache->
list);
7852 btrfs_set_block_group_used(&cache->
item, bytes_used);
7853 btrfs_set_block_group_chunk_objectid(&cache->
item, chunk_objectid);
7855 btrfs_set_block_group_flags(&cache->
item, type);
7859 exclude_super_stripes(root, cache);
7861 add_new_free_space(cache, root->
fs_info, chunk_offset,
7862 chunk_offset + size);
7864 free_excluded_extents(root, cache);
7866 ret = update_space_info(root->
fs_info, cache->
flags, size, bytes_used,
7869 update_global_block_rsv(root->
fs_info);
7875 __link_block_group(cache->
space_info, cache);
7877 ret = btrfs_add_block_group_cache(root->
fs_info, cache);
7882 set_avail_alloc_bits(extent_root->
fs_info, type);
7887 static void clear_avail_alloc_bits(
struct btrfs_fs_info *fs_info, u64 flags)
7889 u64 extra_flags = chunk_to_extended(flags) &
7892 if (flags & BTRFS_BLOCK_GROUP_DATA)
7894 if (flags & BTRFS_BLOCK_GROUP_METADATA)
7896 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7908 struct inode *
inode;
7913 root = root->
fs_info->extent_root;
7923 free_excluded_extents(root, block_group);
7925 memcpy(&key, &block_group->
key,
sizeof(key));
7926 index = get_block_group_index(block_group);
7927 if (block_group->
flags & (BTRFS_BLOCK_GROUP_DUP |
7928 BTRFS_BLOCK_GROUP_RAID1 |
7929 BTRFS_BLOCK_GROUP_RAID10))
7935 cluster = &root->
fs_info->data_alloc_cluster;
7944 cluster = &root->
fs_info->meta_alloc_cluster;
7956 if (!IS_ERR(inode)) {
7964 spin_lock(&block_group->
lock);
7965 if (block_group->
iref) {
7966 block_group->
iref = 0;
7968 spin_unlock(&block_group->
lock);
7971 spin_unlock(&block_group->
lock);
7987 ret = btrfs_del_item(trans, tree_root, path);
7993 spin_lock(&root->
fs_info->block_group_cache_lock);
7995 &root->
fs_info->block_group_cache_tree);
7996 spin_unlock(&root->
fs_info->block_group_cache_lock);
8003 list_del_init(&block_group->
list);
8004 if (list_empty(&block_group->
space_info->block_groups[index]))
8005 clear_avail_alloc_bits(root->
fs_info, block_group->
flags);
8009 wait_block_group_cache_done(block_group);
8014 block_group->
space_info->total_bytes -= block_group->
key.offset;
8015 block_group->
space_info->bytes_readonly -= block_group->
key.offset;
8016 block_group->
space_info->disk_total -= block_group->
key.offset * factor;
8019 memcpy(&key, &block_group->
key,
sizeof(key));
8032 ret = btrfs_del_item(trans, root, path);
8048 if (!btrfs_super_root(disk_super))
8051 features = btrfs_super_incompat_flags(disk_super);
8056 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8062 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8065 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8070 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8078 return unpin_extent_range(root, start, end);
8082 u64 num_bytes, u64 *actual_bytes)
8084 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
8095 u64 total_bytes = btrfs_super_total_bytes(fs_info->
super_copy);
8101 if (range->
len == total_bytes)
8102 cache = btrfs_lookup_first_block_group(fs_info, range->
start);
8107 if (cache->
key.objectid >= (range->
start + range->
len)) {
8114 cache->
key.objectid + cache->
key.offset);
8116 if (end - start >= range->
minlen) {
8117 if (!block_group_cache_done(cache)) {
8118 ret = cache_block_group(cache,
NULL, root, 0);
8120 wait_block_group_cache_done(cache);
8128 trimmed += group_trimmed;
8135 cache = next_block_group(fs_info->
tree_root, cache);
8138 range->
len = trimmed;