19 #include <linux/slab.h>
28 static struct kmem_cache *btrfs_ordered_extent_cache;
51 if (file_offset < entry->file_offset)
53 else if (file_offset >= entry_end(entry))
59 rb_link_node(node, parent, p);
64 static void ordered_data_tree_panic(
struct inode *
inode,
int errno,
68 btrfs_panic(fs_info, errno,
"Inconsistency in ordered tree at offset "
69 "%llu\n", (
unsigned long long)offset);
90 if (file_offset < entry->file_offset)
92 else if (file_offset >= entry_end(entry))
100 while (prev && file_offset >= entry_end(prev_entry)) {
106 if (file_offset < entry_end(prev_entry))
114 while (prev && file_offset < entry_end(prev_entry)) {
131 if (file_offset < entry->file_offset ||
140 if (file_offset + len <= entry->file_offset ||
161 if (offset_in_entry(entry, file_offset))
164 ret = __tree_search(root, file_offset, &prev);
183 static int __btrfs_add_ordered_extent(
struct inode *inode,
u64 file_offset,
191 tree = &BTRFS_I(inode)->ordered_tree;
192 entry = kmem_cache_zalloc(btrfs_ordered_extent_cache,
GFP_NOFS);
212 INIT_LIST_HEAD(&entry->
list);
215 trace_btrfs_ordered_extent_add(inode, entry);
217 spin_lock_irq(&tree->
lock);
218 node = tree_insert(&tree->
tree, file_offset,
221 ordered_data_tree_panic(inode, -
EEXIST, file_offset);
222 spin_unlock_irq(&tree->
lock);
224 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
226 &BTRFS_I(inode)->root->fs_info->ordered_extents);
227 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
233 u64 start,
u64 len,
u64 disk_len,
int type)
235 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
241 u64 start,
u64 len,
u64 disk_len,
int type)
243 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
250 int type,
int compress_type)
252 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
268 tree = &BTRFS_I(inode)->ordered_tree;
269 spin_lock_irq(&tree->
lock);
271 spin_unlock_irq(&tree->
lock);
288 u64 *file_offset,
u64 io_size,
int uptodate)
299 tree = &BTRFS_I(inode)->ordered_tree;
301 node = tree_search(tree, *file_offset);
308 if (!offset_in_entry(entry, *file_offset)) {
316 *file_offset = dec_end;
317 if (dec_start > dec_end) {
319 (
unsigned long long)dec_start,
320 (
unsigned long long)dec_end);
322 to_dec = dec_end - dec_start;
326 (
unsigned long long)to_dec);
337 if (!ret && cached && entry) {
341 spin_unlock_irqrestore(&tree->
lock, flags);
356 u64 file_offset,
u64 io_size,
int uptodate)
364 tree = &BTRFS_I(inode)->ordered_tree;
366 if (cached && *cached) {
371 node = tree_search(tree, file_offset);
379 if (!offset_in_entry(entry, file_offset)) {
387 (
unsigned long long)io_size);
398 if (!ret && cached && entry) {
402 spin_unlock_irqrestore(&tree->
lock, flags);
415 trace_btrfs_ordered_extent_put(entry->
inode, entry);
420 while (!list_empty(&entry->
list)) {
421 cur = entry->
list.next;
438 struct btrfs_root *root = BTRFS_I(inode)->root;
441 tree = &BTRFS_I(inode)->ordered_tree;
442 spin_lock_irq(&tree->
lock);
447 spin_unlock_irq(&tree->
lock);
449 spin_lock(&root->
fs_info->ordered_extent_lock);
452 trace_btrfs_ordered_extent_remove(inode, entry);
461 list_del_init(&BTRFS_I(inode)->ordered_operations);
463 spin_unlock(&root->
fs_info->ordered_extent_lock);
478 INIT_LIST_HEAD(&splice);
480 spin_lock(&root->
fs_info->ordered_extent_lock);
481 list_splice_init(&root->
fs_info->ordered_extents, &splice);
482 while (!list_empty(&splice)) {
494 spin_unlock(&root->
fs_info->ordered_extent_lock);
507 spin_lock(&root->
fs_info->ordered_extent_lock);
509 spin_unlock(&root->
fs_info->ordered_extent_lock);
528 INIT_LIST_HEAD(&splice);
531 spin_lock(&root->
fs_info->ordered_extent_lock);
533 list_splice_init(&root->
fs_info->ordered_operations, &splice);
535 while (!list_empty(&splice)) {
546 inode =
igrab(inode);
548 if (!wait && inode) {
550 &root->
fs_info->ordered_operations);
552 spin_unlock(&root->
fs_info->ordered_extent_lock);
563 spin_lock(&root->
fs_info->ordered_extent_lock);
565 if (wait && !list_empty(&root->
fs_info->ordered_operations))
568 spin_unlock(&root->
fs_info->ordered_extent_lock);
586 trace_btrfs_ordered_extent_start(inode, entry);
611 if (start + len < start) {
614 orig_end = start + len - 1;
639 &BTRFS_I(inode)->runtime_flags))
662 if (end == 0 || end == start)
679 tree = &BTRFS_I(inode)->ordered_tree;
680 spin_lock_irq(&tree->
lock);
681 node = tree_search(tree, file_offset);
686 if (!offset_in_entry(entry, file_offset))
691 spin_unlock_irq(&tree->
lock);
706 tree = &BTRFS_I(inode)->ordered_tree;
707 spin_lock_irq(&tree->
lock);
708 node = tree_search(tree, file_offset);
710 node = tree_search(tree, file_offset + len);
717 if (range_overlaps(entry, file_offset, len))
732 spin_unlock_irq(&tree->
lock);
747 tree = &BTRFS_I(inode)->ordered_tree;
748 spin_lock_irq(&tree->
lock);
749 node = tree_search(tree, file_offset);
756 spin_unlock_irq(&tree->
lock);
770 u64 i_size = i_size_read(inode);
777 offset = entry_end(ordered);
779 offset =
ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
781 spin_lock_irq(&tree->
lock);
782 disk_i_size = BTRFS_I(inode)->disk_i_size;
785 if (disk_i_size > i_size) {
786 BTRFS_I(inode)->disk_i_size = i_size;
795 if (disk_i_size == i_size || offset <= disk_i_size) {
807 prev = tree_search(tree, offset);
815 BUG_ON(offset_in_entry(test, offset));
845 new_i_size =
min_t(
u64, offset, i_size);
853 BTRFS_I(inode)->disk_i_size = new_i_size;
865 spin_unlock_irq(&tree->
lock);
881 unsigned long num_sectors;
890 spin_lock_irq(&tree->
lock);
892 if (disk_bytenr >= ordered_sum->
bytenr) {
894 sector_sums = ordered_sum->
sums;
895 for (i = 0; i < num_sectors; i++) {
896 if (sector_sums[i].
bytenr == disk_bytenr) {
897 *sum = sector_sums[
i].
sum;
905 spin_unlock_irq(&tree->
lock);
928 last_mod =
max(BTRFS_I(inode)->
generation, BTRFS_I(inode)->last_trans);
941 if (trans && root->
fs_info->running_transaction->blocked) {
946 spin_lock(&root->
fs_info->ordered_extent_lock);
947 if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
949 &root->
fs_info->ordered_operations);
951 spin_unlock(&root->
fs_info->ordered_extent_lock);
960 if (!btrfs_ordered_extent_cache)
967 if (btrfs_ordered_extent_cache)