20 #include <linux/sched.h>
21 #include <linux/slab.h>
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
63 leaf = path->
nodes[0];
66 btrfs_free_space_key(leaf, header, &disk_key);
67 btrfs_disk_key_to_cpu(&
location, &disk_key);
90 struct inode *inode =
NULL;
93 spin_lock(&block_group->
lock);
94 if (block_group->
inode)
96 spin_unlock(&block_group->
lock);
100 inode = __lookup_free_space_inode(root, path,
101 block_group->
key.objectid);
105 spin_lock(&block_group->
lock);
106 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
113 if (!block_group->
iref) {
115 block_group->
iref = 1;
117 spin_unlock(&block_group->
lock);
142 leaf = path->
nodes[0];
145 btrfs_item_key(leaf, &disk_key, path->
slots[0]);
147 sizeof(*inode_item));
148 btrfs_set_inode_generation(leaf, inode_item, trans->
transid);
149 btrfs_set_inode_size(leaf, inode_item, 0);
150 btrfs_set_inode_nbytes(leaf, inode_item, 0);
151 btrfs_set_inode_uid(leaf, inode_item, 0);
152 btrfs_set_inode_gid(leaf, inode_item, 0);
153 btrfs_set_inode_mode(leaf, inode_item,
S_IFREG | 0600);
154 btrfs_set_inode_flags(leaf, inode_item, flags);
155 btrfs_set_inode_nlink(leaf, inode_item, 1);
156 btrfs_set_inode_transid(leaf, inode_item, trans->
transid);
157 btrfs_set_inode_block_group(leaf, inode_item, offset);
165 ret = btrfs_insert_empty_item(trans, root, path, &key,
171 leaf = path->
nodes[0];
175 btrfs_set_free_space_key(leaf, header, &disk_key);
195 block_group->
key.objectid);
212 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
213 btrfs_calc_trans_metadata_size(root, 1);
216 if (trans->
block_rsv->reserved < needed_bytes) {
223 oldsize = i_size_read(inode);
224 btrfs_i_size_write(inode, 0);
248 static int readahead_cache(
struct inode *inode)
251 unsigned long last_index;
253 ra = kzalloc(
sizeof(*ra),
GFP_NOFS);
278 static int io_ctl_init(
struct io_ctl *
io_ctl,
struct inode *inode,
281 memset(io_ctl, 0,
sizeof(
struct io_ctl));
308 static void io_ctl_map_page(
struct io_ctl *io_ctl,
int clear)
320 static void io_ctl_drop_pages(
struct io_ctl *io_ctl)
324 io_ctl_unmap_page(io_ctl);
326 for (i = 0; i < io_ctl->
num_pages; i++) {
327 if (io_ctl->
pages[i]) {
328 ClearPageChecked(io_ctl->
pages[i]);
335 static int io_ctl_prepare_pages(
struct io_ctl *io_ctl,
struct inode *inode,
342 for (i = 0; i < io_ctl->
num_pages; i++) {
345 io_ctl_drop_pages(io_ctl);
349 if (uptodate && !PageUptodate(page)) {
352 if (!PageUptodate(page)) {
355 io_ctl_drop_pages(io_ctl);
361 for (i = 0; i < io_ctl->
num_pages; i++) {
369 static void io_ctl_set_generation(
struct io_ctl *io_ctl,
u64 generation)
373 io_ctl_map_page(io_ctl, 1);
383 io_ctl->
cur +=
sizeof(
u64);
384 io_ctl->
size -=
sizeof(
u64) * 2;
389 io_ctl->
cur +=
sizeof(
u64);
392 static int io_ctl_check_generation(
struct io_ctl *io_ctl,
u64 generation)
405 io_ctl->
cur +=
sizeof(
u64);
406 io_ctl->
size -=
sizeof(
u64) * 2;
412 "(%Lu) does not match inode (%Lu)\n", *gen,
414 io_ctl_unmap_page(io_ctl);
417 io_ctl->
cur +=
sizeof(
u64);
421 static void io_ctl_set_crc(
struct io_ctl *io_ctl,
int index)
428 io_ctl_unmap_page(io_ctl);
438 io_ctl_unmap_page(io_ctl);
445 static int io_ctl_check_crc(
struct io_ctl *io_ctl,
int index)
452 io_ctl_map_page(io_ctl, 0);
464 io_ctl_map_page(io_ctl, 0);
471 io_ctl_unmap_page(io_ctl);
478 static int io_ctl_add_entry(
struct io_ctl *io_ctl,
u64 offset,
u64 bytes,
497 io_ctl_set_crc(io_ctl, io_ctl->
index - 1);
504 io_ctl_map_page(io_ctl, 1);
508 static int io_ctl_add_bitmap(
struct io_ctl *io_ctl,
void *bitmap)
517 if (io_ctl->
cur != io_ctl->
orig) {
518 io_ctl_set_crc(io_ctl, io_ctl->
index - 1);
521 io_ctl_map_page(io_ctl, 0);
525 io_ctl_set_crc(io_ctl, io_ctl->
index - 1);
527 io_ctl_map_page(io_ctl, 0);
531 static void io_ctl_zero_remaining_pages(
struct io_ctl *io_ctl)
537 if (io_ctl->
cur != io_ctl->
orig)
538 io_ctl_set_crc(io_ctl, io_ctl->
index - 1);
540 io_ctl_unmap_page(io_ctl);
543 io_ctl_map_page(io_ctl, 1);
544 io_ctl_set_crc(io_ctl, io_ctl->
index - 1);
548 static int io_ctl_read_entry(
struct io_ctl *io_ctl,
555 ret = io_ctl_check_crc(io_ctl, io_ctl->
index);
570 io_ctl_unmap_page(io_ctl);
575 static int io_ctl_read_bitmap(
struct io_ctl *io_ctl,
580 ret = io_ctl_check_crc(io_ctl, io_ctl->
index);
585 io_ctl_unmap_page(io_ctl);
613 unlink_free_space(ctl, prev);
614 unlink_free_space(ctl, e);
617 link_free_space(ctl, prev);
634 struct io_ctl io_ctl;
644 INIT_LIST_HEAD(&bitmaps);
647 if (!i_size_read(inode))
664 leaf = path->
nodes[0];
667 num_entries = btrfs_free_space_entries(leaf, header);
668 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
669 generation = btrfs_free_space_generation(leaf, header);
672 if (BTRFS_I(inode)->generation != generation) {
674 " not match free space cache generation (%llu)\n",
675 (
unsigned long long)BTRFS_I(inode)->generation,
676 (
unsigned long long)generation);
683 ret = io_ctl_init(&io_ctl, inode, root);
687 ret = readahead_cache(inode);
691 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
695 ret = io_ctl_check_crc(&io_ctl, 0);
699 ret = io_ctl_check_generation(&io_ctl, generation);
703 while (num_entries) {
709 ret = io_ctl_read_entry(&io_ctl, e, &type);
722 ret = link_free_space(ctl, e);
726 "free space cache, dumping\n");
740 ret = link_free_space(ctl, e);
742 ctl->
op->recalc_thresholds(ctl);
746 "free space cache, dumping\n");
756 io_ctl_unmap_page(&io_ctl);
763 list_del_init(&e->
list);
764 ret = io_ctl_read_bitmap(&io_ctl, e);
769 io_ctl_drop_pages(&io_ctl);
770 merge_space_tree(ctl);
773 io_ctl_free(&io_ctl);
776 io_ctl_drop_pages(&io_ctl);
790 u64 used = btrfs_block_group_used(&block_group->
item);
796 spin_lock(&block_group->
lock);
798 spin_unlock(&block_group->
lock);
801 spin_unlock(&block_group->
lock);
816 spin_lock(&block_group->
lock);
818 spin_unlock(&block_group->
lock);
822 spin_unlock(&block_group->
lock);
825 path, block_group->
key.objectid);
831 matched = (ctl->
free_space == (block_group->
key.offset - used -
838 "space\n", block_group->
key.objectid);
844 spin_lock(&block_group->
lock);
846 spin_unlock(&block_group->
lock);
850 "for block group %llu\n", block_group->
key.objectid);
883 struct io_ctl io_ctl;
892 INIT_LIST_HEAD(&bitmap_list);
894 if (!i_size_read(inode))
897 ret = io_ctl_init(&io_ctl, inode, root);
902 if (block_group && !list_empty(&block_group->
cluster_list))
908 io_ctl_prepare_pages(&io_ctl, inode, 0);
914 if (!node && cluster) {
926 io_ctl_set_generation(&io_ctl, trans->
transid);
935 ret = io_ctl_add_entry(&io_ctl, e->
offset, e->
bytes,
945 if (!node && cluster) {
960 unpin = root->
fs_info->pinned_extents;
963 start = block_group->
key.objectid;
965 while (block_group && (start < block_group->key.
objectid +
966 block_group->
key.offset)) {
968 &extent_start, &extent_end,
976 if (extent_start >= block_group->
key.objectid +
977 block_group->
key.offset)
980 extent_start =
max(extent_start, start);
981 extent_end =
min(block_group->
key.objectid +
982 block_group->
key.offset, extent_end + 1);
983 len = extent_end - extent_start;
986 ret = io_ctl_add_entry(&io_ctl, extent_start, len,
NULL);
998 ret = io_ctl_add_bitmap(&io_ctl, entry->
bitmap);
1001 list_del_init(&entry->
list);
1005 io_ctl_zero_remaining_pages(&io_ctl);
1008 0, i_size_read(inode), &cached_state);
1009 io_ctl_drop_pages(&io_ctl);
1011 i_size_read(inode) - 1, &cached_state,
GFP_NOFS);
1030 leaf = path->
nodes[0];
1035 btrfs_item_key_to_cpu(leaf, &found_key, path->
slots[0]);
1037 found_key.
offset != offset) {
1047 BTRFS_I(inode)->generation = trans->
transid;
1050 btrfs_set_free_space_entries(leaf, header, entries);
1051 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1052 btrfs_set_free_space_generation(leaf, header, trans->
transid);
1058 io_ctl_free(&io_ctl);
1061 BTRFS_I(inode)->generation = 0;
1070 list_del_init(&entry->
list);
1072 io_ctl_drop_pages(&io_ctl);
1074 i_size_read(inode) - 1, &cached_state,
GFP_NOFS);
1084 struct inode *
inode;
1087 root = root->
fs_info->tree_root;
1089 spin_lock(&block_group->
lock);
1091 spin_unlock(&block_group->
lock);
1094 spin_unlock(&block_group->
lock);
1101 path, block_group->
key.objectid);
1103 spin_lock(&block_group->
lock);
1105 spin_unlock(&block_group->
lock);
1109 "for block group %llu\n", block_group->
key.objectid);
1117 static inline unsigned long offset_to_bit(
u64 bitmap_start,
u32 unit,
1120 BUG_ON(offset < bitmap_start);
1121 offset -= bitmap_start;
1122 return (
unsigned long)(div_u64(offset, unit));
1125 static inline unsigned long bytes_to_bits(
u64 bytes,
u32 unit)
1127 return (
unsigned long)(div_u64(bytes, unit));
1134 u64 bytes_per_bitmap;
1137 bitmap_start = offset - ctl->
start;
1138 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1139 bitmap_start *= bytes_per_bitmap;
1140 bitmap_start += ctl->
start;
1142 return bitmap_start;
1145 static int tree_insert_offset(
struct rb_root *root,
u64 offset,
1158 }
else if (offset > info->
offset) {
1190 rb_link_node(node, parent, p);
1205 u64 offset,
int bitmap_only,
int fuzzy)
1222 else if (offset > entry->
offset)
1242 if (entry->
offset != offset)
1275 if (entry->
offset > offset) {
1340 __unlink_free_space(ctl, info);
1368 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1377 if (size < 1024 * 1024 * 1024)
1381 div64_u64(size, 1024 * 1024 * 1024);
1390 if (bitmap_bytes >= max_bytes) {
1399 extent_bytes = max_bytes - bitmap_bytes;
1400 extent_bytes =
min_t(
u64, extent_bytes, div64_u64(max_bytes, 2));
1412 start = offset_to_bit(info->
offset, ctl->
unit, offset);
1413 count = bytes_to_bits(bytes, ctl->
unit);
1425 __bitmap_clear_bits(ctl, info, offset, bytes);
1435 start = offset_to_bit(info->
offset, ctl->
unit, offset);
1436 count = bytes_to_bits(bytes, ctl->
unit);
1449 unsigned long found_bits = 0;
1450 unsigned long bits,
i;
1451 unsigned long next_zero;
1453 i = offset_to_bit(bitmap_info->
offset, ctl->
unit,
1455 bits = bytes_to_bits(*bytes, ctl->
unit);
1460 if ((next_zero - i) >= bits) {
1461 found_bits = next_zero -
i;
1469 *bytes = (
u64)(found_bits) * ctl->
unit;
1486 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1492 if (entry->
bytes < *bytes)
1496 ret = search_bitmap(ctl, entry, offset, bytes);
1503 *bytes = entry->
bytes;
1513 info->
offset = offset_to_bitmap(ctl, offset);
1515 INIT_LIST_HEAD(&info->
list);
1516 link_free_space(ctl, info);
1519 ctl->
op->recalc_thresholds(ctl);
1525 unlink_free_space(ctl, bitmap_info);
1529 ctl->
op->recalc_thresholds(ctl);
1537 u64 search_start, search_bytes;
1550 search_bytes = ctl->
unit;
1551 search_bytes =
min(search_bytes, end - search_start + 1);
1552 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1553 BUG_ON(ret < 0 || search_start != *offset);
1556 search_bytes =
min(search_bytes, *bytes);
1559 search_bytes =
min(search_bytes, end - search_start + 1);
1561 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1562 *offset += search_bytes;
1563 *bytes -= search_bytes;
1567 if (!bitmap_info->
bytes)
1568 free_bitmap(ctl, bitmap_info);
1584 if (!bitmap_info->
bitmap)
1594 search_bytes = ctl->
unit;
1595 ret = search_bitmap(ctl, bitmap_info, &search_start,
1597 if (ret < 0 || search_start != *offset)
1601 }
else if (!bitmap_info->
bytes)
1602 free_bitmap(ctl, bitmap_info);
1611 u64 bytes_to_set = 0;
1616 bytes_to_set =
min(end - offset, bytes);
1618 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1620 return bytes_to_set;
1654 block_group->
key.offset)
1661 .recalc_thresholds = recalculate_thresholds,
1674 bytes = info->
bytes;
1677 if (!ctl->
op->use_bitmap(ctl, info))
1680 if (ctl->
op == &free_space_op)
1688 if (block_group && !list_empty(&block_group->
cluster_list)) {
1696 spin_lock(&cluster->
lock);
1699 spin_unlock(&cluster->
lock);
1700 goto no_cluster_bitmap;
1705 spin_unlock(&cluster->
lock);
1706 goto no_cluster_bitmap;
1709 if (entry->
offset == offset_to_bitmap(ctl, offset)) {
1710 bytes_added = add_bytes_to_bitmap(ctl, entry,
1712 bytes -= bytes_added;
1713 offset += bytes_added;
1715 spin_unlock(&cluster->
lock);
1723 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1730 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1731 bytes -= bytes_added;
1732 offset += bytes_added;
1742 if (info && info->
bitmap) {
1743 add_new_bitmap(ctl, info, offset);
1786 bool merged =
false;
1795 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1800 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1802 if (right_info && !right_info->
bitmap) {
1804 unlink_free_space(ctl, right_info);
1806 __unlink_free_space(ctl, right_info);
1812 if (left_info && !left_info->
bitmap &&
1815 unlink_free_space(ctl, left_info);
1817 __unlink_free_space(ctl, left_info);
1842 if (try_merge_free_space(ctl, info,
true))
1850 ret = insert_into_bitmap(ctl, info);
1858 ret = link_free_space(ctl, info);
1885 info = tree_search_offset(ctl, offset, 0, 0);
1891 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1906 unlink_free_space(ctl, info);
1907 if (offset == info->
offset) {
1910 info->
bytes -= to_free;
1913 ret = link_free_space(ctl, info);
1926 ret = link_free_space(ctl, info);
1932 if (old_end < offset + bytes) {
1933 bytes -= old_end -
offset;
1936 }
else if (old_end == offset + bytes) {
1942 ret = btrfs_add_free_space(block_group, offset + bytes,
1943 old_end - (offset + bytes));
1949 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1969 if (info->
bytes >= bytes && !block_group->
ro)
1972 (
unsigned long long)info->
offset,
1973 (
unsigned long long)info->
bytes,
1974 (info->
bitmap) ?
"yes" :
"no");
1977 list_empty(&block_group->
cluster_list) ?
"no" :
"yes");
1988 ctl->
start = block_group->
key.objectid;
1990 ctl->
op = &free_space_op;
2008 __btrfs_return_cluster_to_free_space(
2016 spin_lock(&cluster->
lock);
2034 try_merge_free_space(ctl, entry,
false);
2041 spin_unlock(&cluster->
lock);
2054 unlink_free_space(ctl, info);
2057 free_bitmap(ctl, info);
2059 if (need_resched()) {
2087 __btrfs_return_cluster_to_free_space(block_group, cluster);
2088 if (need_resched()) {
2104 u64 bytes_search = bytes + empty_size;
2108 entry = find_free_space(ctl, &offset, &bytes_search);
2114 bitmap_clear_bits(ctl, entry, offset, bytes);
2116 free_bitmap(ctl, entry);
2118 unlink_free_space(ctl, entry);
2124 link_free_space(ctl, entry);
2149 spin_lock(&cluster->
lock);
2153 spin_unlock(&cluster->
lock);
2158 spin_unlock(&cluster->
lock);
2162 spin_unlock(&cluster->
lock);
2168 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2179 u64 bytes,
u64 min_start)
2187 search_start = min_start;
2188 search_bytes =
bytes;
2190 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2195 __bitmap_clear_bits(ctl, entry, ret, bytes);
2214 spin_lock(&cluster->
lock);
2227 if (entry->
bytes < bytes ||
2238 ret = btrfs_alloc_from_bitmap(block_group,
2239 cluster, entry, bytes,
2257 if (entry->
bytes == 0)
2262 spin_unlock(&cluster->
lock);
2270 if (entry->
bytes == 0) {
2275 ctl->
op->recalc_thresholds(ctl);
2289 u64 cont1_bytes,
u64 min_bytes)
2292 unsigned long next_zero;
2294 unsigned long want_bits;
2295 unsigned long min_bits;
2296 unsigned long found_bits;
2297 unsigned long start = 0;
2298 unsigned long total_found = 0;
2303 want_bits = bytes_to_bits(bytes, block_group->
sectorsize);
2304 min_bits = bytes_to_bits(min_bytes, block_group->
sectorsize);
2311 if (next_zero - i >= min_bits) {
2312 found_bits = next_zero -
i;
2326 total_found += found_bits;
2331 if (total_found < want_bits || cluster->
max_size < cont1_bytes) {
2339 ret = tree_insert_offset(&cluster->
root, entry->
offset,
2343 trace_btrfs_setup_cluster(block_group, cluster,
2357 u64 cont1_bytes,
u64 min_bytes)
2369 entry = tree_search_offset(ctl, offset, 0, 1);
2377 while (entry->
bitmap || entry->
bytes < min_bytes) {
2378 if (entry->
bitmap && list_empty(&entry->
list))
2386 window_start = entry->
offset;
2387 window_free = entry->
bytes;
2388 max_extent = entry->
bytes;
2397 if (list_empty(&entry->
list))
2402 if (entry->
bytes < min_bytes)
2406 window_free += entry->
bytes;
2407 if (entry->
bytes > max_extent)
2408 max_extent = entry->
bytes;
2411 if (window_free < bytes || max_extent < cont1_bytes)
2431 ret = tree_insert_offset(&cluster->
root, entry->
offset,
2433 total_size += entry->
bytes;
2435 }
while (node && entry != last);
2438 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2450 u64 cont1_bytes,
u64 min_bytes)
2455 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2465 if (entry->
offset != bitmap_offset) {
2466 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2467 if (entry && list_empty(&entry->
list))
2468 list_add(&entry->
list, bitmaps);
2472 if (entry->
bytes < bytes)
2474 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2475 bytes, cont1_bytes, min_bytes);
2515 cont1_bytes = min_bytes = bytes + empty_size;
2517 cont1_bytes =
bytes;
2520 cont1_bytes =
max(bytes, (bytes + empty_size) >> 2);
2535 spin_lock(&cluster->
lock);
2543 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2546 INIT_LIST_HEAD(&bitmaps);
2547 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2549 cont1_bytes, min_bytes);
2551 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2552 offset, bytes + empty_size,
2553 cont1_bytes, min_bytes);
2557 list_del_init(&entry->
list);
2565 trace_btrfs_failed_cluster_setup(block_group);
2568 spin_unlock(&cluster->
lock);
2588 u64 *total_trimmed,
u64 start,
u64 bytes,
2589 u64 reserved_start,
u64 reserved_bytes)
2597 spin_lock(&space_info->
lock);
2598 spin_lock(&block_group->
lock);
2599 if (!block_group->
ro) {
2600 block_group->
reserved += reserved_bytes;
2604 spin_unlock(&block_group->
lock);
2605 spin_unlock(&space_info->
lock);
2608 start, bytes, &trimmed);
2610 *total_trimmed += trimmed;
2612 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2615 spin_lock(&space_info->
lock);
2616 spin_lock(&block_group->
lock);
2617 if (block_group->
ro)
2619 block_group->
reserved -= reserved_bytes;
2621 spin_unlock(&space_info->
lock);
2622 spin_unlock(&block_group->
lock);
2639 while (start < end) {
2647 entry = tree_search_offset(ctl, start, 0, 1);
2664 if (entry->
offset >= end) {
2669 extent_start = entry->
offset;
2670 extent_bytes = entry->
bytes;
2671 start =
max(start, extent_start);
2672 bytes =
min(extent_start + extent_bytes, end) -
start;
2673 if (bytes < minlen) {
2678 unlink_free_space(ctl, entry);
2683 ret = do_trimming(block_group, total_trimmed, start, bytes,
2684 extent_start, extent_bytes);
2690 if (fatal_signal_pending(
current)) {
2709 u64 offset = offset_to_bitmap(ctl, start);
2711 while (offset < end) {
2712 bool next_bitmap =
false;
2721 entry = tree_search_offset(ctl, offset, 1, 0);
2729 ret2 = search_bitmap(ctl, entry, &start, &bytes);
2730 if (ret2 || start >= end) {
2736 bytes =
min(bytes, end - start);
2737 if (bytes < minlen) {
2742 bitmap_clear_bits(ctl, entry, start, bytes);
2743 if (entry->
bytes == 0)
2744 free_bitmap(ctl, entry);
2748 ret = do_trimming(block_group, total_trimmed, start, bytes,
2761 if (fatal_signal_pending(
current)) {
2779 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2783 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2812 unlink_free_space(ctl, entry);
2818 link_free_space(ctl, entry);
2824 ret = search_bitmap(ctl, entry, &offset, &count);
2829 bitmap_clear_bits(ctl, entry, offset, 1);
2830 if (entry->
bytes == 0)
2831 free_bitmap(ctl, entry);
2842 struct inode *inode =
NULL;
2851 inode = __lookup_free_space_inode(root, path, 0);
2856 if (!btrfs_fs_closing(root->
fs_info))
2875 struct inode *
inode;
2877 u64 root_gen = btrfs_root_generation(&root->
root_item);
2886 if (btrfs_fs_closing(fs_info))
2897 if (root_gen != BTRFS_I(inode)->generation)
2904 "root %llu\n", root->
root_key.objectid);
2917 struct inode *
inode;
2932 "for root %llu\n", root->
root_key.objectid);