18 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include <linux/random.h>
25 #include <linux/capability.h>
28 #include <asm/div64.h>
43 static int btrfs_relocate_sys_chunks(
struct btrfs_root *root);
50 static void lock_chunks(
struct btrfs_root *root)
55 static void unlock_chunks(
struct btrfs_root *root)
64 while (!list_empty(&fs_devices->
devices)) {
68 rcu_string_free(device->
name);
78 while (!list_empty(&fs_uuids)) {
82 free_fs_devices(fs_devices);
92 if (dev->
devid == devid &&
112 struct bio *head,
struct bio *
tail)
115 struct bio *old_head;
117 old_head = pending_bios->
head;
119 if (pending_bios->
tail)
120 tail->bi_next = old_head;
145 unsigned long num_run;
146 unsigned long batch_run = 0;
148 unsigned long last_waited = 0;
150 int sync_pending = 0;
162 fs_info = device->
dev_root->fs_info;
164 limit = limit * 2 / 3;
185 pending = pending_bios->
head;
186 tail = pending_bios->
tail;
223 requeue_list(pending_bios, pending, tail);
228 pending = pending->bi_next;
247 }
else if (sync_pending) {
264 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
295 requeue_list(pending_bios, pending, tail);
303 if (batch_run % 64 == 0) {
328 run_scheduled_bios(device);
338 u64 found_transid = btrfs_super_generation(disk_super);
340 fs_devices = find_fsid(disk_super->
fsid);
342 fs_devices = kzalloc(
sizeof(*fs_devices),
GFP_NOFS);
345 INIT_LIST_HEAD(&fs_devices->
devices);
347 list_add(&fs_devices->
list, &fs_uuids);
354 device = __find_device(&fs_devices->
devices, devid,
361 device = kzalloc(
sizeof(*device),
GFP_NOFS);
368 device->
work.func = pending_bios_fn;
373 name = rcu_string_strdup(path,
GFP_NOFS);
396 name = rcu_string_strdup(path,
GFP_NOFS);
399 rcu_string_free(device->
name);
411 *fs_devices_ret = fs_devices;
421 fs_devices = kzalloc(
sizeof(*fs_devices),
GFP_NOFS);
425 INIT_LIST_HEAD(&fs_devices->
devices);
427 INIT_LIST_HEAD(&fs_devices->
list);
438 device = kzalloc(
sizeof(*device),
GFP_NOFS);
446 name = rcu_string_strdup(orig_dev->
name->str,
GFP_NOFS);
454 device->
work.func = pending_bios_fn;
466 free_fs_devices(fs_devices);
475 u64 latest_devid = 0;
476 u64 latest_transid = 0;
483 if (!latest_transid ||
485 latest_devid = device->
devid;
487 latest_bdev = device->
bdev;
504 rcu_string_free(device->
name);
508 if (fs_devices->
seed) {
509 fs_devices = fs_devices->
seed;
529 rcu_string_free(device->
name);
533 static void free_device(
struct rcu_head *head)
547 if (--fs_devices->
opened > 0)
568 memcpy(new_device, device,
sizeof(*new_device));
600 ret = __btrfs_close_devices(fs_devices);
601 if (!fs_devices->
opened) {
602 seed_devices = fs_devices->
seed;
607 while (seed_devices) {
608 fs_devices = seed_devices;
609 seed_devices = fs_devices->
seed;
610 __btrfs_close_devices(fs_devices);
611 free_fs_devices(fs_devices);
624 struct buffer_head *bh;
626 u64 latest_devid = 0;
627 u64 latest_transid = 0;
654 devid = btrfs_stack_device_id(&disk_super->
dev_item);
655 if (devid != device->
devid)
662 device->
generation = btrfs_super_generation(disk_super);
663 if (!latest_transid || device->
generation > latest_transid) {
664 latest_devid =
devid;
676 q = bdev_get_queue(bdev);
677 if (blk_queue_discard(q)) {
686 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
729 ret = __btrfs_open_devices(fs_devices, flags, holder);
740 struct buffer_head *bh;
764 devid = btrfs_stack_device_id(&disk_super->
dev_item);
765 transid = btrfs_super_generation(disk_super);
766 total_devices = btrfs_super_num_devices(disk_super);
767 if (disk_super->
label[0])
772 (
unsigned long long)devid, (
unsigned long long)transid, path);
773 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
774 if (!ret && fs_devices_ret)
775 (*fs_devices_ret)->total_devices = total_devices;
822 slot = path->
slots[0];
823 if (slot >= btrfs_header_nritems(l)) {
832 btrfs_item_key_to_cpu(l, &key, slot);
844 extent_end = key.
offset + btrfs_dev_extent_length(l,
846 if (key.
offset <= start && extent_end > end) {
847 *length = end - start + 1;
849 }
else if (key.
offset <= start && extent_end > start)
850 *length += extent_end -
start;
851 else if (key.
offset > start && extent_end <= end)
852 *length += extent_end - key.
offset;
854 *length += end - key.
offset + 1;
856 }
else if (key.
offset > end)
910 search_start =
max(root->
fs_info->alloc_start, 1024ull * 1024);
912 max_hole_start = search_start;
916 if (search_start >= search_end) {
929 key.
offset = search_start;
943 slot = path->
slots[0];
944 if (slot >= btrfs_header_nritems(l)) {
953 btrfs_item_key_to_cpu(l, &key, slot);
964 if (key.
offset > search_start) {
965 hole_size = key.
offset - search_start;
967 if (hole_size > max_hole_size) {
968 max_hole_start = search_start;
969 max_hole_size = hole_size;
981 if (hole_size >= num_bytes) {
988 extent_end = key.
offset + btrfs_dev_extent_length(l,
990 if (extent_end > search_start)
991 search_start = extent_end;
1002 if (search_end > search_start)
1003 hole_size = search_end - search_start;
1005 if (hole_size > max_hole_size) {
1006 max_hole_start = search_start;
1007 max_hole_size = hole_size;
1011 if (hole_size < num_bytes)
1019 *start = max_hole_start;
1021 *len = max_hole_size;
1051 leaf = path->
nodes[0];
1052 btrfs_item_key_to_cpu(leaf, &found_key, path->
slots[0]);
1055 BUG_ON(found_key.offset > start || found_key.offset +
1056 btrfs_dev_extent_length(leaf, extent) < start);
1060 }
else if (ret == 0) {
1061 leaf = path->
nodes[0];
1070 u64 len = btrfs_dev_extent_length(leaf, extent);
1072 spin_lock(&root->
fs_info->free_chunk_lock);
1073 root->
fs_info->free_chunk_space += len;
1074 spin_unlock(&root->
fs_info->free_chunk_lock);
1076 ret = btrfs_del_item(trans, root, path);
1079 "Failed to remove dev extent item");
1106 ret = btrfs_insert_empty_item(trans, root, path, &key,
1111 leaf = path->
nodes[0];
1114 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1115 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1116 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1119 (
unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1122 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1156 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1158 if (found_key.objectid != objectid)
1163 *offset = found_key.offset +
1164 btrfs_chunk_length(path->nodes[0], chunk);
1180 root = root->
fs_info->chunk_root;
1201 btrfs_item_key_to_cpu(path->
nodes[0], &found_key,
1203 *objectid = found_key.offset + 1;
1226 root = root->
fs_info->chunk_root;
1236 ret = btrfs_insert_empty_item(trans, root, path, &key,
1241 leaf = path->
nodes[0];
1244 btrfs_set_device_id(leaf, dev_item, device->
devid);
1245 btrfs_set_device_generation(leaf, dev_item, 0);
1246 btrfs_set_device_type(leaf, dev_item, device->
type);
1247 btrfs_set_device_io_align(leaf, dev_item, device->
io_align);
1248 btrfs_set_device_io_width(leaf, dev_item, device->
io_width);
1249 btrfs_set_device_sector_size(leaf, dev_item, device->
sector_size);
1250 btrfs_set_device_total_bytes(leaf, dev_item, device->
total_bytes);
1251 btrfs_set_device_bytes_used(leaf, dev_item, device->
bytes_used);
1252 btrfs_set_device_group(leaf, dev_item, 0);
1253 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1254 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1255 btrfs_set_device_start_offset(leaf, dev_item, 0);
1257 ptr = (
unsigned long)btrfs_device_uuid(dev_item);
1259 ptr = (
unsigned long)btrfs_device_fsid(dev_item);
1269 static int btrfs_rm_dev_item(
struct btrfs_root *root,
1277 root = root->
fs_info->chunk_root;
1284 if (IS_ERR(trans)) {
1286 return PTR_ERR(trans);
1302 ret = btrfs_del_item(trans, root, path);
1307 unlock_chunks(root);
1317 struct buffer_head *bh =
NULL;
1325 bool clear_super =
false;
1329 all_avail = root->
fs_info->avail_data_alloc_bits |
1330 root->
fs_info->avail_system_alloc_bits |
1331 root->
fs_info->avail_metadata_alloc_bits;
1334 root->
fs_info->fs_devices->num_devices <= 4) {
1342 root->
fs_info->fs_devices->num_devices <= 2) {
1344 "devices on raid1\n");
1349 if (
strcmp(device_path,
"missing") == 0) {
1354 devices = &root->
fs_info->fs_devices->devices;
1377 ret = PTR_ERR(bdev);
1389 devid = btrfs_stack_device_id(&disk_super->
dev_item);
1390 dev_uuid = disk_super->
dev_item.uuid;
1409 unlock_chunks(root);
1410 root->
fs_info->fs_devices->rw_devices--;
1418 ret = btrfs_rm_dev_item(root->
fs_info->chunk_root, device);
1422 spin_lock(&root->
fs_info->free_chunk_lock);
1425 spin_unlock(&root->
fs_info->free_chunk_lock);
1444 root->
fs_info->fs_devices->missing_devices--;
1450 if (device->
bdev == root->
fs_info->fs_devices->latest_bdev)
1451 root->
fs_info->fs_devices->latest_bdev = next_device->
bdev;
1459 num_devices = btrfs_super_num_devices(root->
fs_info->super_copy) - 1;
1460 btrfs_set_super_num_devices(root->
fs_info->super_copy, num_devices);
1464 fs_devices = root->
fs_info->fs_devices;
1465 while (fs_devices) {
1466 if (fs_devices->
seed == cur_devices)
1468 fs_devices = fs_devices->
seed;
1470 fs_devices->
seed = cur_devices->
seed;
1473 __btrfs_close_devices(cur_devices);
1474 unlock_chunks(root);
1475 free_fs_devices(cur_devices);
1478 root->
fs_info->num_tolerated_disk_barrier_failures =
1490 set_buffer_dirty(bh);
1508 &root->
fs_info->fs_devices->alloc_list);
1509 unlock_chunks(root);
1510 root->
fs_info->fs_devices->rw_devices++;
1518 static int btrfs_prepare_sprout(
struct btrfs_root *root)
1527 BUG_ON(!mutex_is_locked(&uuid_mutex));
1531 seed_devices = kzalloc(
sizeof(*fs_devices),
GFP_NOFS);
1535 old_devices = clone_fs_devices(fs_devices);
1536 if (IS_ERR(old_devices)) {
1537 kfree(seed_devices);
1538 return PTR_ERR(old_devices);
1541 list_add(&old_devices->
list, &fs_uuids);
1543 memcpy(seed_devices, fs_devices,
sizeof(*seed_devices));
1544 seed_devices->
opened = 1;
1545 INIT_LIST_HEAD(&seed_devices->
devices);
1550 list_splice_init_rcu(&fs_devices->
devices, &seed_devices->
devices,
1563 fs_devices->
seed = seed_devices;
1568 super_flags = btrfs_super_flags(disk_super) &
1569 ~BTRFS_SUPER_FLAG_SEEDING;
1570 btrfs_set_super_flags(disk_super, super_flags);
1595 root = root->
fs_info->chunk_root;
1605 leaf = path->
nodes[0];
1607 if (path->
slots[0] >= btrfs_header_nritems(leaf)) {
1613 leaf = path->
nodes[0];
1614 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
1619 btrfs_item_key_to_cpu(leaf, &
key, path->
slots[0]);
1626 devid = btrfs_device_id(leaf, dev_item);
1628 (
unsigned long)btrfs_device_uuid(dev_item),
1631 (
unsigned long)btrfs_device_fsid(dev_item),
1637 btrfs_set_device_generation(leaf, dev_item,
1661 int seeding_dev = 0;
1670 return PTR_ERR(bdev);
1672 if (root->
fs_info->fs_devices->seeding) {
1680 devices = &root->
fs_info->fs_devices->devices;
1686 if (device->
bdev == bdev) {
1692 device = kzalloc(
sizeof(*device),
GFP_NOFS);
1699 name = rcu_string_strdup(device_path,
GFP_NOFS);
1707 ret = find_next_devid(root, &device->
devid);
1709 rcu_string_free(device->
name);
1715 if (IS_ERR(trans)) {
1716 rcu_string_free(device->
name);
1718 ret = PTR_ERR(trans);
1724 q = bdev_get_queue(bdev);
1725 if (blk_queue_discard(q))
1728 device->
work.func = pending_bios_fn;
1738 device->
bdev = bdev;
1745 ret = btrfs_prepare_sprout(root);
1754 &root->
fs_info->fs_devices->alloc_list);
1755 root->
fs_info->fs_devices->num_devices++;
1756 root->
fs_info->fs_devices->open_devices++;
1757 root->
fs_info->fs_devices->rw_devices++;
1758 root->
fs_info->fs_devices->total_devices++;
1760 root->
fs_info->fs_devices->num_can_discard++;
1763 spin_lock(&root->
fs_info->free_chunk_lock);
1765 spin_unlock(&root->
fs_info->free_chunk_lock);
1767 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1768 root->
fs_info->fs_devices->rotating = 1;
1770 total_bytes = btrfs_super_total_bytes(root->
fs_info->super_copy);
1771 btrfs_set_super_total_bytes(root->
fs_info->super_copy,
1774 total_bytes = btrfs_super_num_devices(root->
fs_info->super_copy);
1775 btrfs_set_super_num_devices(root->
fs_info->super_copy,
1780 ret = init_first_rw_device(trans, root, device);
1785 ret = btrfs_finish_sprout(trans, root);
1804 unlock_chunks(root);
1805 root->
fs_info->num_tolerated_disk_barrier_failures =
1816 ret = btrfs_relocate_sys_chunks(root);
1819 "Failed to relocate sys chunks after "
1820 "device initialization. This can be fixed "
1821 "using the \"btrfs balance\" command.");
1823 if (IS_ERR(trans)) {
1824 if (PTR_ERR(trans) == -
ENOENT)
1826 return PTR_ERR(trans);
1834 unlock_chunks(root);
1836 rcu_string_free(device->
name);
1857 root = device->
dev_root->fs_info->chunk_root;
1876 leaf = path->
nodes[0];
1879 btrfs_set_device_id(leaf, dev_item, device->
devid);
1880 btrfs_set_device_type(leaf, dev_item, device->
type);
1881 btrfs_set_device_io_align(leaf, dev_item, device->
io_align);
1882 btrfs_set_device_io_width(leaf, dev_item, device->
io_width);
1883 btrfs_set_device_sector_size(leaf, dev_item, device->
sector_size);
1885 btrfs_set_device_bytes_used(leaf, dev_item, device->
bytes_used);
1897 device->
dev_root->fs_info->super_copy;
1898 u64 old_total = btrfs_super_total_bytes(super_copy);
1906 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1913 return btrfs_update_device(trans, device);
1921 ret = __btrfs_grow_device(trans, device, new_size);
1935 root = root->
fs_info->chunk_root;
1949 "Failed lookup while freeing chunk.");
1954 ret = btrfs_del_item(trans, root, path);
1957 "Failed to delete chunk item.");
1963 static int btrfs_del_sys_chunk(
struct btrfs_root *root,
u64 chunk_objectid,
u64
1977 array_size = btrfs_super_sys_array_size(super_copy);
1982 while (cur < array_size) {
1984 btrfs_disk_key_to_cpu(&
key, disk_key);
1986 len =
sizeof(*disk_key);
1990 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1991 len += btrfs_chunk_item_size(num_stripes);
1996 if (
key.objectid == chunk_objectid &&
1997 key.offset == chunk_offset) {
1998 memmove(ptr, ptr + len, array_size - (cur + len));
2000 btrfs_set_super_sys_array_size(super_copy, array_size);
2009 static int btrfs_relocate_chunk(
struct btrfs_root *root,
2010 u64 chunk_tree,
u64 chunk_objectid,
2021 root = root->
fs_info->chunk_root;
2022 extent_root = root->
fs_info->extent_root;
2023 em_tree = &root->
fs_info->mapping_tree.map_tree;
2048 em->
start + em->
len < chunk_offset);
2052 ret = btrfs_free_dev_extent(trans, map->
stripes[i].dev,
2057 ret = btrfs_update_device(trans, map->
stripes[i].dev);
2061 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2066 trace_btrfs_chunk_free(root, map, chunk_offset, em->
len);
2069 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2088 unlock_chunks(root);
2093 static int btrfs_relocate_sys_chunks(
struct btrfs_root *root)
2103 bool retried =
false;
2129 leaf = path->
nodes[0];
2130 btrfs_item_key_to_cpu(leaf, &found_key, path->
slots[0]);
2134 chunk_type = btrfs_chunk_type(leaf, chunk);
2138 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2147 if (found_key.offset == 0)
2149 key.offset = found_key.offset - 1;
2152 if (failed && !retried) {
2156 }
else if (failed && retried) {
2165 static int insert_balance_item(
struct btrfs_root *root,
2181 if (IS_ERR(trans)) {
2183 return PTR_ERR(trans);
2190 ret = btrfs_insert_empty_item(trans, root, path, &
key,
2195 leaf = path->
nodes[0];
2200 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->
data);
2201 btrfs_set_balance_data(leaf, item, &disk_bargs);
2202 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->
meta);
2203 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2204 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->
sys);
2205 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2207 btrfs_set_balance_flags(leaf, item, bctl->
flags);
2218 static int del_balance_item(
struct btrfs_root *root)
2230 if (IS_ERR(trans)) {
2232 return PTR_ERR(trans);
2247 ret = btrfs_del_item(trans, root, path);
2282 bctl->
data.usage = 90;
2287 bctl->
sys.usage = 90;
2292 bctl->
meta.usage = 90;
2312 static void unset_balance_control(
struct btrfs_fs_info *fs_info)
2329 static int chunk_profiles_filter(
u64 chunk_type,
2332 chunk_type = chunk_to_extended(chunk_type) &
2341 static u64 div_factor_fine(
u64 num,
int factor)
2353 static int chunk_usage_filter(
struct btrfs_fs_info *fs_info,
u64 chunk_offset,
2357 u64 chunk_used, user_thresh;
2361 chunk_used = btrfs_block_group_used(&cache->
item);
2363 user_thresh = div_factor_fine(cache->
key.offset, bargs->
usage);
2364 if (chunk_used < user_thresh)
2376 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2380 stripe = btrfs_stripe_nr(chunk, i);
2381 if (btrfs_stripe_devid(leaf, stripe) == bargs->
devid)
2395 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2409 factor = num_stripes / factor;
2412 stripe = btrfs_stripe_nr(chunk, i);
2413 if (btrfs_stripe_devid(leaf, stripe) != bargs->
devid)
2416 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2417 stripe_length = btrfs_chunk_length(leaf, chunk);
2418 do_div(stripe_length, factor);
2420 if (stripe_offset < bargs->
pend &&
2421 stripe_offset + stripe_length > bargs->
pstart)
2434 if (chunk_offset < bargs->
vend &&
2435 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->
vstart)
2442 static int chunk_soft_convert_filter(
u64 chunk_type,
2448 chunk_type = chunk_to_extended(chunk_type) &
2451 if (bargs->
target == chunk_type)
2457 static int should_balance_chunk(
struct btrfs_root *root,
2463 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2472 bargs = &bctl->
data;
2473 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2476 bargs = &bctl->
meta;
2480 chunk_profiles_filter(chunk_type, bargs)) {
2486 chunk_usage_filter(bctl->
fs_info, chunk_offset, bargs)) {
2492 chunk_devid_filter(leaf, chunk, bargs)) {
2498 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2504 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2510 chunk_soft_convert_filter(chunk_type, bargs)) {
2543 int enospc_errors = 0;
2544 bool counting =
true;
2551 size_to_free =
min(size_to_free, (
u64)1 * 1024 * 1024);
2611 leaf = path->
nodes[0];
2612 slot = path->
slots[0];
2613 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2615 if (found_key.objectid !=
key.objectid)
2619 if (found_key.offset == 0)
2626 bctl->
stat.considered++;
2630 ret = should_balance_chunk(chunk_root, leaf, chunk,
2638 bctl->
stat.expected++;
2643 ret = btrfs_relocate_chunk(chunk_root,
2647 if (ret && ret != -
ENOSPC)
2653 bctl->
stat.completed++;
2657 key.offset = found_key.offset - 1;
2667 if (enospc_errors) {
2682 static int alloc_profile_is_valid(
u64 flags,
int extended)
2687 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2698 return (flags & (flags - 1)) == 0;
2701 static inline int balance_need_close(
struct btrfs_fs_info *fs_info)
2713 unset_balance_control(fs_info);
2714 ret = del_balance_item(fs_info->
tree_root);
2732 if (btrfs_fs_closing(fs_info) ||
2739 allowed = btrfs_super_incompat_flags(fs_info->
super_copy);
2748 if (mixed && (bctl->
flags & allowed)) {
2753 "metadata balance options must be the same\n");
2762 else if (fs_info->
fs_devices->num_devices < 4)
2769 (!alloc_profile_is_valid(bctl->
data.target, 1) ||
2770 (bctl->
data.target & ~allowed))) {
2772 "data profile %llu\n",
2773 (
unsigned long long)bctl->
data.target);
2778 (!alloc_profile_is_valid(bctl->
meta.target, 1) ||
2779 (bctl->
meta.target & ~allowed))) {
2781 "metadata profile %llu\n",
2782 (
unsigned long long)bctl->
meta.target);
2787 (!alloc_profile_is_valid(bctl->
sys.target, 1) ||
2788 (bctl->
sys.target & ~allowed))) {
2790 "system profile %llu\n",
2791 (
unsigned long long)bctl->
sys.target);
2809 !(bctl->
sys.target & allowed)) ||
2812 !(bctl->
meta.target & allowed))) {
2818 "integrity, use force if you want this\n");
2828 num_tolerated_disk_barrier_failures =
2830 if (num_tolerated_disk_barrier_failures > 0 &&
2834 num_tolerated_disk_barrier_failures = 0;
2835 else if (num_tolerated_disk_barrier_failures > 1 &&
2838 num_tolerated_disk_barrier_failures = 1;
2844 ret = insert_balance_item(fs_info->
tree_root, bctl);
2845 if (ret && ret != -
EEXIST)
2850 set_balance_control(bctl);
2854 update_balance_args(bctl);
2861 ret = __btrfs_balance(fs_info);
2867 memset(bargs, 0,
sizeof(*bargs));
2872 balance_need_close(fs_info)) {
2873 __cancel_balance(fs_info);
2886 __cancel_balance(fs_info);
2892 static int balance_kthread(
void *
data)
2927 tsk =
kthread_run(balance_kthread, fs_info,
"btrfs-balance");
2929 return PTR_ERR(tsk);
2960 bctl = kzalloc(
sizeof(*bctl),
GFP_NOFS);
2966 leaf = path->
nodes[0];
2970 bctl->
flags = btrfs_balance_flags(leaf, item);
2973 btrfs_balance_data(leaf, item, &disk_bargs);
2974 btrfs_disk_balance_args_to_cpu(&bctl->
data, &disk_bargs);
2975 btrfs_balance_meta(leaf, item, &disk_bargs);
2976 btrfs_disk_balance_args_to_cpu(&bctl->
meta, &disk_bargs);
2977 btrfs_balance_sys(leaf, item, &disk_bargs);
2978 btrfs_disk_balance_args_to_cpu(&bctl->
sys, &disk_bargs);
2983 set_balance_control(bctl);
3046 __cancel_balance(fs_info);
3075 bool retried =
false;
3079 u64 old_total = btrfs_super_total_bytes(super_copy);
3097 spin_lock(&root->
fs_info->free_chunk_lock);
3098 root->
fs_info->free_chunk_space -= diff;
3099 spin_unlock(&root->
fs_info->free_chunk_lock);
3101 unlock_chunks(root);
3123 slot = path->
slots[0];
3124 btrfs_item_key_to_cpu(l, &key, path->
slots[0]);
3132 length = btrfs_dev_extent_length(l, dev_extent);
3134 if (key.
offset + length <= new_size) {
3139 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3140 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3141 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3144 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3146 if (ret && ret != -
ENOSPC)
3150 }
while (key.
offset-- > 0);
3152 if (failed && !retried) {
3156 }
else if (failed && retried) {
3163 spin_lock(&root->
fs_info->free_chunk_lock);
3164 root->
fs_info->free_chunk_space += diff;
3165 spin_unlock(&root->
fs_info->free_chunk_lock);
3166 unlock_chunks(root);
3172 if (IS_ERR(trans)) {
3173 ret = PTR_ERR(trans);
3181 ret = btrfs_update_device(trans, device);
3183 unlock_chunks(root);
3188 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3189 unlock_chunks(root);
3196 static int btrfs_add_system_chunk(
struct btrfs_root *root,
3205 array_size = btrfs_super_sys_array_size(super_copy);
3210 btrfs_cpu_key_to_disk(&disk_key, key);
3211 memcpy(ptr, &disk_key,
sizeof(disk_key));
3212 ptr +=
sizeof(disk_key);
3213 memcpy(ptr, chunk, item_size);
3214 item_size +=
sizeof(disk_key);
3215 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3222 static int btrfs_cmp_device_info(
const void *
a,
const void *
b)
3241 u64 *num_bytes_out,
u64 *stripe_size_out,
3260 u64 max_stripe_size;
3268 BUG_ON(!alloc_profile_is_valid(type, 0));
3305 if (type & BTRFS_BLOCK_GROUP_DATA) {
3306 max_stripe_size = 1024 * 1024 * 1024;
3307 max_chunk_size = 10 * max_stripe_size;
3308 }
else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3311 max_stripe_size = 1024 * 1024 * 1024;
3313 max_stripe_size = 256 * 1024 * 1024;
3314 max_chunk_size = max_stripe_size;
3315 }
else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3316 max_stripe_size = 32 * 1024 * 1024;
3317 max_chunk_size = 2 * max_stripe_size;
3328 devices_info = kzalloc(
sizeof(*devices_info) * fs_devices->
rw_devices,
3351 "btrfs: read-only device in alloc_list\n");
3365 if (total_avail == 0)
3369 max_stripe_size * dev_stripes,
3370 &dev_offset, &max_avail);
3371 if (ret && ret != -
ENOSPC)
3375 max_avail = max_stripe_size * dev_stripes;
3381 devices_info[ndevs].
max_avail = max_avail;
3383 devices_info[ndevs].
dev = device;
3391 btrfs_cmp_device_info,
NULL);
3394 ndevs -= ndevs % devs_increment;
3396 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3401 if (devs_max && ndevs > devs_max)
3407 stripe_size = devices_info[ndevs-1].
max_avail;
3408 num_stripes = ndevs * dev_stripes;
3410 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3411 stripe_size = max_chunk_size * ncopies;
3412 do_div(stripe_size, ndevs);
3415 do_div(stripe_size, dev_stripes);
3428 for (i = 0; i < ndevs; ++
i) {
3429 for (j = 0; j < dev_stripes; ++
j) {
3430 int s = i * dev_stripes +
j;
3444 num_bytes = stripe_size * (num_stripes / ncopies);
3449 trace_btrfs_chunk_alloc(info->
chunk_root, map, start, num_bytes);
3462 em_tree = &extent_root->
fs_info->mapping_tree.map_tree;
3481 dev_offset = map->
stripes[
i].physical;
3486 start, dev_offset, stripe_size);
3493 kfree(devices_info);
3498 kfree(devices_info);
3505 u64 chunk_size,
u64 stripe_size)
3513 size_t item_size = btrfs_chunk_item_size(map->
num_stripes);
3517 chunk = kzalloc(item_size,
GFP_NOFS);
3522 while (index < map->num_stripes) {
3525 ret = btrfs_update_device(trans, device);
3531 spin_lock(&extent_root->
fs_info->free_chunk_lock);
3532 extent_root->
fs_info->free_chunk_space -= (stripe_size *
3534 spin_unlock(&extent_root->
fs_info->free_chunk_lock);
3538 while (index < map->num_stripes) {
3542 btrfs_set_stack_stripe_devid(stripe, device->
devid);
3543 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3549 btrfs_set_stack_chunk_length(chunk, chunk_size);
3550 btrfs_set_stack_chunk_owner(chunk, extent_root->
root_key.objectid);
3551 btrfs_set_stack_chunk_stripe_len(chunk, map->
stripe_len);
3552 btrfs_set_stack_chunk_type(chunk, map->
type);
3553 btrfs_set_stack_chunk_num_stripes(chunk, map->
num_stripes);
3554 btrfs_set_stack_chunk_io_align(chunk, map->
stripe_len);
3555 btrfs_set_stack_chunk_io_width(chunk, map->
stripe_len);
3556 btrfs_set_stack_chunk_sector_size(chunk, extent_root->
sectorsize);
3557 btrfs_set_stack_chunk_sub_stripes(chunk, map->
sub_stripes);
3565 if (ret == 0 && map->
type & BTRFS_BLOCK_GROUP_SYSTEM) {
3570 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3601 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3602 &stripe_size, chunk_offset, type);
3606 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3607 chunk_size, stripe_size);
3618 u64 sys_chunk_offset;
3622 u64 sys_stripe_size;
3635 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3639 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3640 &stripe_size, chunk_offset, alloc_profile);
3644 sys_chunk_offset = chunk_offset + chunk_size;
3646 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3650 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3651 &sys_chunk_size, &sys_stripe_size,
3652 sys_chunk_offset, alloc_profile);
3670 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3671 chunk_size, stripe_size);
3677 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3678 sys_chunk_offset, sys_chunk_size,
3709 if (!map->
stripes[i].dev->writeable) {
3767 static int find_live_mirror(
struct map_lookup *map,
int first,
int num,
3771 if (map->
stripes[optimal].dev->bdev)
3773 for (i = first; i < first + num; i++) {
3774 if (map->
stripes[i].dev->bdev)
3793 u64 stripe_end_offset;
3810 (
unsigned long long)logical,
3811 (
unsigned long long)*length);
3817 offset = logical - em->
start;
3830 BUG_ON(offset < stripe_offset);
3833 stripe_offset = offset - stripe_offset;
3850 stripe_nr_orig = stripe_nr;
3851 stripe_nr_end = (offset + *length + map->
stripe_len - 1) &
3854 stripe_end_offset = stripe_nr_end * map->
stripe_len -
3857 if (rw & REQ_DISCARD)
3859 stripe_nr_end - stripe_nr_orig);
3864 else if (mirror_num)
3865 stripe_index = mirror_num - 1;
3867 stripe_index = find_live_mirror(map, 0,
3870 mirror_num = stripe_index + 1;
3876 }
else if (mirror_num) {
3877 stripe_index = mirror_num - 1;
3885 stripe_index =
do_div(stripe_nr, factor);
3890 else if (rw & REQ_DISCARD)
3892 (stripe_nr_end - stripe_nr_orig),
3894 else if (mirror_num)
3895 stripe_index += mirror_num - 1;
3897 int old_stripe_index = stripe_index;
3898 stripe_index = find_live_mirror(map, stripe_index,
3901 mirror_num = stripe_index - old_stripe_index + 1;
3910 mirror_num = stripe_index + 1;
3921 if (rw & REQ_DISCARD) {
3923 int sub_stripes = 0;
3924 u64 stripes_per_dev = 0;
3925 u32 remaining_stripes = 0;
3926 u32 last_stripe = 0;
3936 stripes_per_dev = div_u64_rem(stripe_nr_end -
3939 &remaining_stripes);
3940 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3946 map->
stripes[stripe_index].physical +
3952 bbio->
stripes[
i].length = stripes_per_dev *
3955 if (i / sub_stripes < remaining_stripes)
3967 if (i < sub_stripes)
3971 if (stripe_index >= last_stripe &&
3972 stripe_index <= (last_stripe +
3977 if (i == sub_stripes - 1)
3992 map->
stripes[stripe_index].physical +
3996 map->
stripes[stripe_index].dev;
4001 if (rw & REQ_WRITE) {
4019 u64 logical,
u64 *length,
4020 struct btrfs_bio **bbio_ret,
int mirror_num)
4022 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
4027 u64 chunk_start,
u64 physical,
u64 devid,
4056 if (devid && map->
stripes[i].dev->devid != devid)
4058 if (map->
stripes[i].physical > physical ||
4059 map->
stripes[i].physical + length <= physical)
4062 stripe_nr = physical - map->
stripes[
i].physical;
4071 bytenr = chunk_start + stripe_nr * map->
stripe_len;
4073 for (j = 0; j <
nr; j++) {
4074 if (buf[j] == bytenr)
4091 static void *merge_stripe_index_into_bio_private(
void *bi_private,
4092 unsigned int stripe_index)
4102 BUG_ON(stripe_index > 3);
4103 return (
void *)(((
uintptr_t)bi_private) | stripe_index);
4106 static struct btrfs_bio *extract_bbio_from_bio_private(
void *bi_private)
4111 static unsigned int extract_stripe_index_from_bio_private(
void *bi_private)
4113 return (
unsigned int)((
uintptr_t)bi_private) & 3;
4116 static void btrfs_end_bio(
struct bio *bio,
int err)
4118 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4119 int is_orig_bio = 0;
4124 unsigned int stripe_index =
4125 extract_stripe_index_from_bio_private(
4130 dev = bbio->
stripes[stripe_index].dev;
4132 if (bio->bi_rw &
WRITE)
4133 btrfs_dev_stat_inc(dev,
4136 btrfs_dev_stat_inc(dev,
4139 btrfs_dev_stat_inc(dev,
4154 bio->bi_private = bbio->
private;
4155 bio->bi_end_io = bbio->
end_io;
4168 set_bit(BIO_UPTODATE, &bio->bi_flags);
4174 }
else if (!is_orig_bio) {
4195 int rw,
struct bio *bio)
4197 int should_queue = 1;
4201 if (!(rw & REQ_WRITE)) {
4216 bio->bi_next =
NULL;
4225 if (pending_bios->
tail)
4226 pending_bios->
tail->bi_next = bio;
4228 pending_bios->
tail = bio;
4229 if (!pending_bios->
head)
4230 pending_bios->
head = bio;
4234 spin_unlock(&device->
io_lock);
4242 int mirror_num,
int async_submit)
4246 struct bio *first_bio = bio;
4247 u64 logical = (
u64)bio->bi_sector << 9;
4255 length = bio->bi_size;
4256 map_tree = &root->
fs_info->mapping_tree;
4257 map_length = length;
4265 if (map_length < length) {
4267 "len %llu\n", (
unsigned long long)logical,
4268 (
unsigned long long)length,
4269 (
unsigned long long)map_length);
4274 bbio->
private = first_bio->bi_private;
4275 bbio->
end_io = first_bio->bi_end_io;
4278 while (
dev_nr < total_devs) {
4279 if (
dev_nr < total_devs - 1) {
4280 bio = bio_clone(first_bio,
GFP_NOFS);
4285 bio->bi_private = bbio;
4286 bio->bi_private = merge_stripe_index_into_bio_private(
4287 bio->bi_private, (
unsigned int)
dev_nr);
4288 bio->bi_end_io = btrfs_end_bio;
4297 pr_debug(
"btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4298 "(%s id %llu), size=%u\n", rw,
4300 name->
str, dev->
devid, bio->bi_size);
4303 bio->bi_bdev = dev->
bdev;
4305 schedule_bio(root, dev, rw, bio);
4309 bio->bi_bdev = root->
fs_info->fs_devices->latest_bdev;
4310 bio->bi_sector = logical >> 9;
4324 cur_devices = root->
fs_info->fs_devices;
4325 while (cur_devices) {
4328 device = __find_device(&cur_devices->
devices,
4333 cur_devices = cur_devices->
seed;
4339 u64 devid,
u8 *dev_uuid)
4344 device = kzalloc(
sizeof(*device),
GFP_NOFS);
4351 device->
work.func = pending_bios_fn;
4378 length = btrfs_chunk_length(leaf, chunk);
4385 if (em && em->
start <= logical && em->
start + em->
len > logical) {
4395 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4403 em->
start = logical;
4409 map->
io_width = btrfs_chunk_io_width(leaf, chunk);
4410 map->
io_align = btrfs_chunk_io_align(leaf, chunk);
4411 map->
sector_size = btrfs_chunk_sector_size(leaf, chunk);
4412 map->
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4413 map->
type = btrfs_chunk_type(leaf, chunk);
4414 map->
sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4417 btrfs_stripe_offset_nr(leaf, chunk, i);
4418 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4420 btrfs_stripe_dev_uuid_nr(chunk, i),
4431 add_missing_dev(root, devid, uuid);
4438 map->
stripes[
i].dev->in_fs_metadata = 1;
4450 static void fill_device_from_item(
struct extent_buffer *leaf,
4456 device->
devid = btrfs_device_id(leaf, dev_item);
4459 device->
bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4460 device->
type = btrfs_device_type(leaf, dev_item);
4461 device->
io_align = btrfs_device_io_align(leaf, dev_item);
4462 device->
io_width = btrfs_device_io_width(leaf, dev_item);
4463 device->
sector_size = btrfs_device_sector_size(leaf, dev_item);
4465 ptr = (
unsigned long)btrfs_device_uuid(dev_item);
4469 static int open_seed_devices(
struct btrfs_root *root,
u8 *fsid)
4474 BUG_ON(!mutex_is_locked(&uuid_mutex));
4476 fs_devices = root->
fs_info->fs_devices->seed;
4477 while (fs_devices) {
4482 fs_devices = fs_devices->
seed;
4485 fs_devices = find_fsid(fsid);
4491 fs_devices = clone_fs_devices(fs_devices);
4492 if (IS_ERR(fs_devices)) {
4493 ret = PTR_ERR(fs_devices);
4497 ret = __btrfs_open_devices(fs_devices,
FMODE_READ,
4500 free_fs_devices(fs_devices);
4505 __btrfs_close_devices(fs_devices);
4506 free_fs_devices(fs_devices);
4511 fs_devices->
seed = root->
fs_info->fs_devices->seed;
4512 root->
fs_info->fs_devices->seed = fs_devices;
4517 static int read_one_dev(
struct btrfs_root *root,
4527 devid = btrfs_device_id(leaf, dev_item);
4529 (
unsigned long)btrfs_device_uuid(dev_item),
4532 (
unsigned long)btrfs_device_fsid(dev_item),
4536 ret = open_seed_devices(root, fs_uuid);
4542 if (!device || !device->
bdev) {
4548 (
unsigned long long)devid);
4549 device = add_missing_dev(root, devid, dev_uuid);
4552 }
else if (!device->
missing) {
4559 root->
fs_info->fs_devices->missing_devices++;
4567 btrfs_device_generation(leaf, dev_item))
4571 fill_device_from_item(leaf, dev_item, device);
4576 spin_lock(&root->
fs_info->free_chunk_lock);
4579 spin_unlock(&root->
fs_info->free_chunk_lock);
4592 unsigned long sb_ptr;
4605 btrfs_set_buffer_lockdep_class(root->
root_key.objectid, sb, 0);
4619 SetPageUptodate(sb->pages[0]);
4622 array_size = btrfs_super_sys_array_size(super_copy);
4628 while (cur < array_size) {
4630 btrfs_disk_key_to_cpu(&key, disk_key);
4632 len =
sizeof(*disk_key); ptr += len;
4638 ret = read_one_chunk(root, &key, sb, chunk);
4641 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4642 len = btrfs_chunk_item_size(num_stripes);
4664 root = root->
fs_info->chunk_root;
4685 leaf = path->
nodes[0];
4686 slot = path->
slots[0];
4687 if (slot >= btrfs_header_nritems(leaf)) {
4695 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4703 ret = read_one_dev(root, leaf, dev_item);
4710 ret = read_one_chunk(root, &found_key, leaf, chunk);
4723 unlock_chunks(root);
4730 static void __btrfs_reset_dev_stats(
struct btrfs_device *dev)
4735 btrfs_dev_stat_reset(dev, i);
4767 __btrfs_reset_dev_stats(device);
4772 slot = path->
slots[0];
4773 eb = path->
nodes[0];
4774 btrfs_item_key_to_cpu(eb, &found_key, slot);
4775 item_size = btrfs_item_size_nr(eb, slot);
4781 if (item_size >= (1 + i) *
sizeof(
__le64))
4782 btrfs_dev_stat_set(device, i,
4783 btrfs_dev_stats_value(eb, ptr, i));
4785 btrfs_dev_stat_reset(device, i);
4789 btrfs_dev_stat_print_on_load(device);
4796 return ret < 0 ? ret : 0;
4824 btrfs_item_size_nr(path->
nodes[0], path->
slots[0]) <
sizeof(*ptr)) {
4826 ret = btrfs_del_item(trans, dev_root, path);
4838 ret = btrfs_insert_empty_item(trans, dev_root, path,
4839 &key,
sizeof(*ptr));
4847 eb = path->
nodes[0];
4850 btrfs_set_dev_stats_value(eb, ptr, i,
4851 btrfs_dev_stat_read(device, i));
4875 ret = update_dev_stat_item(trans, dev_root, device);
4886 btrfs_dev_stat_inc(dev, index);
4895 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4900 btrfs_dev_stat_read(dev,
4902 btrfs_dev_stat_read(dev,
4906 static void btrfs_dev_stat_print_on_load(
struct btrfs_device *dev)
4911 if (btrfs_dev_stat_read(dev, i) != 0)
4913 if (i == BTRFS_DEV_STAT_VALUES_MAX)
4938 "btrfs: get dev_stats failed, device not found\n");
4942 "btrfs: get dev_stats failed, not yet valid\n");
4948 btrfs_dev_stat_read_and_reset(dev, i);
4950 btrfs_dev_stat_reset(dev, i);
4955 stats->
values[
i] = btrfs_dev_stat_read(dev, i);
4957 if (stats->
nr_items > BTRFS_DEV_STAT_VALUES_MAX)