Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ordered-data.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Oracle. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 
28 static struct kmem_cache *btrfs_ordered_extent_cache;
29 
30 static u64 entry_end(struct btrfs_ordered_extent *entry)
31 {
32  if (entry->file_offset + entry->len < entry->file_offset)
33  return (u64)-1;
34  return entry->file_offset + entry->len;
35 }
36 
37 /* returns NULL if the insertion worked, or it returns the node it did find
38  * in the tree
39  */
40 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
41  struct rb_node *node)
42 {
43  struct rb_node **p = &root->rb_node;
44  struct rb_node *parent = NULL;
46 
47  while (*p) {
48  parent = *p;
49  entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
50 
51  if (file_offset < entry->file_offset)
52  p = &(*p)->rb_left;
53  else if (file_offset >= entry_end(entry))
54  p = &(*p)->rb_right;
55  else
56  return parent;
57  }
58 
59  rb_link_node(node, parent, p);
60  rb_insert_color(node, root);
61  return NULL;
62 }
63 
64 static void ordered_data_tree_panic(struct inode *inode, int errno,
65  u64 offset)
66 {
67  struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
68  btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
69  "%llu\n", (unsigned long long)offset);
70 }
71 
72 /*
73  * look for a given offset in the tree, and if it can't be found return the
74  * first lesser offset
75  */
76 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
77  struct rb_node **prev_ret)
78 {
79  struct rb_node *n = root->rb_node;
80  struct rb_node *prev = NULL;
81  struct rb_node *test;
83  struct btrfs_ordered_extent *prev_entry = NULL;
84 
85  while (n) {
86  entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
87  prev = n;
88  prev_entry = entry;
89 
90  if (file_offset < entry->file_offset)
91  n = n->rb_left;
92  else if (file_offset >= entry_end(entry))
93  n = n->rb_right;
94  else
95  return n;
96  }
97  if (!prev_ret)
98  return NULL;
99 
100  while (prev && file_offset >= entry_end(prev_entry)) {
101  test = rb_next(prev);
102  if (!test)
103  break;
104  prev_entry = rb_entry(test, struct btrfs_ordered_extent,
105  rb_node);
106  if (file_offset < entry_end(prev_entry))
107  break;
108 
109  prev = test;
110  }
111  if (prev)
112  prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
113  rb_node);
114  while (prev && file_offset < entry_end(prev_entry)) {
115  test = rb_prev(prev);
116  if (!test)
117  break;
118  prev_entry = rb_entry(test, struct btrfs_ordered_extent,
119  rb_node);
120  prev = test;
121  }
122  *prev_ret = prev;
123  return NULL;
124 }
125 
126 /*
127  * helper to check if a given offset is inside a given entry
128  */
129 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
130 {
131  if (file_offset < entry->file_offset ||
132  entry->file_offset + entry->len <= file_offset)
133  return 0;
134  return 1;
135 }
136 
137 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
138  u64 len)
139 {
140  if (file_offset + len <= entry->file_offset ||
141  entry->file_offset + entry->len <= file_offset)
142  return 0;
143  return 1;
144 }
145 
146 /*
147  * look find the first ordered struct that has this offset, otherwise
148  * the first one less than this offset
149  */
150 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
151  u64 file_offset)
152 {
153  struct rb_root *root = &tree->tree;
154  struct rb_node *prev = NULL;
155  struct rb_node *ret;
156  struct btrfs_ordered_extent *entry;
157 
158  if (tree->last) {
159  entry = rb_entry(tree->last, struct btrfs_ordered_extent,
160  rb_node);
161  if (offset_in_entry(entry, file_offset))
162  return tree->last;
163  }
164  ret = __tree_search(root, file_offset, &prev);
165  if (!ret)
166  ret = prev;
167  if (ret)
168  tree->last = ret;
169  return ret;
170 }
171 
172 /* allocate and add a new ordered_extent into the per-inode tree.
173  * file_offset is the logical offset in the file
174  *
175  * start is the disk block number of an extent already reserved in the
176  * extent allocation tree
177  *
178  * len is the length of the extent
179  *
180  * The tree is given a single reference on the ordered extent that was
181  * inserted.
182  */
183 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
184  u64 start, u64 len, u64 disk_len,
185  int type, int dio, int compress_type)
186 {
188  struct rb_node *node;
189  struct btrfs_ordered_extent *entry;
190 
191  tree = &BTRFS_I(inode)->ordered_tree;
192  entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
193  if (!entry)
194  return -ENOMEM;
195 
196  entry->file_offset = file_offset;
197  entry->start = start;
198  entry->len = len;
199  entry->disk_len = disk_len;
200  entry->bytes_left = len;
201  entry->inode = igrab(inode);
202  entry->compress_type = compress_type;
203  if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
204  set_bit(type, &entry->flags);
205 
206  if (dio)
208 
209  /* one ref for the tree */
210  atomic_set(&entry->refs, 1);
211  init_waitqueue_head(&entry->wait);
212  INIT_LIST_HEAD(&entry->list);
213  INIT_LIST_HEAD(&entry->root_extent_list);
214 
215  trace_btrfs_ordered_extent_add(inode, entry);
216 
217  spin_lock_irq(&tree->lock);
218  node = tree_insert(&tree->tree, file_offset,
219  &entry->rb_node);
220  if (node)
221  ordered_data_tree_panic(inode, -EEXIST, file_offset);
222  spin_unlock_irq(&tree->lock);
223 
224  spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
226  &BTRFS_I(inode)->root->fs_info->ordered_extents);
227  spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
228 
229  return 0;
230 }
231 
232 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
233  u64 start, u64 len, u64 disk_len, int type)
234 {
235  return __btrfs_add_ordered_extent(inode, file_offset, start, len,
236  disk_len, type, 0,
238 }
239 
240 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
241  u64 start, u64 len, u64 disk_len, int type)
242 {
243  return __btrfs_add_ordered_extent(inode, file_offset, start, len,
244  disk_len, type, 1,
246 }
247 
248 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
249  u64 start, u64 len, u64 disk_len,
250  int type, int compress_type)
251 {
252  return __btrfs_add_ordered_extent(inode, file_offset, start, len,
253  disk_len, type, 0,
254  compress_type);
255 }
256 
257 /*
258  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
259  * when an ordered extent is finished. If the list covers more than one
260  * ordered extent, it is split across multiples.
261  */
262 void btrfs_add_ordered_sum(struct inode *inode,
263  struct btrfs_ordered_extent *entry,
264  struct btrfs_ordered_sum *sum)
265 {
267 
268  tree = &BTRFS_I(inode)->ordered_tree;
269  spin_lock_irq(&tree->lock);
270  list_add_tail(&sum->list, &entry->list);
271  spin_unlock_irq(&tree->lock);
272 }
273 
274 /*
275  * this is used to account for finished IO across a given range
276  * of the file. The IO may span ordered extents. If
277  * a given ordered_extent is completely done, 1 is returned, otherwise
278  * 0.
279  *
280  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
281  * to make sure this function only returns 1 once for a given ordered extent.
282  *
283  * file_offset is updated to one byte past the range that is recorded as
284  * complete. This allows you to walk forward in the file.
285  */
286 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
287  struct btrfs_ordered_extent **cached,
288  u64 *file_offset, u64 io_size, int uptodate)
289 {
291  struct rb_node *node;
292  struct btrfs_ordered_extent *entry = NULL;
293  int ret;
294  unsigned long flags;
295  u64 dec_end;
296  u64 dec_start;
297  u64 to_dec;
298 
299  tree = &BTRFS_I(inode)->ordered_tree;
300  spin_lock_irqsave(&tree->lock, flags);
301  node = tree_search(tree, *file_offset);
302  if (!node) {
303  ret = 1;
304  goto out;
305  }
306 
307  entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
308  if (!offset_in_entry(entry, *file_offset)) {
309  ret = 1;
310  goto out;
311  }
312 
313  dec_start = max(*file_offset, entry->file_offset);
314  dec_end = min(*file_offset + io_size, entry->file_offset +
315  entry->len);
316  *file_offset = dec_end;
317  if (dec_start > dec_end) {
318  printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
319  (unsigned long long)dec_start,
320  (unsigned long long)dec_end);
321  }
322  to_dec = dec_end - dec_start;
323  if (to_dec > entry->bytes_left) {
324  printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
325  (unsigned long long)entry->bytes_left,
326  (unsigned long long)to_dec);
327  }
328  entry->bytes_left -= to_dec;
329  if (!uptodate)
331 
332  if (entry->bytes_left == 0)
334  else
335  ret = 1;
336 out:
337  if (!ret && cached && entry) {
338  *cached = entry;
339  atomic_inc(&entry->refs);
340  }
341  spin_unlock_irqrestore(&tree->lock, flags);
342  return ret == 0;
343 }
344 
345 /*
346  * this is used to account for finished IO across a given range
347  * of the file. The IO should not span ordered extents. If
348  * a given ordered_extent is completely done, 1 is returned, otherwise
349  * 0.
350  *
351  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
352  * to make sure this function only returns 1 once for a given ordered extent.
353  */
354 int btrfs_dec_test_ordered_pending(struct inode *inode,
355  struct btrfs_ordered_extent **cached,
356  u64 file_offset, u64 io_size, int uptodate)
357 {
359  struct rb_node *node;
360  struct btrfs_ordered_extent *entry = NULL;
361  unsigned long flags;
362  int ret;
363 
364  tree = &BTRFS_I(inode)->ordered_tree;
365  spin_lock_irqsave(&tree->lock, flags);
366  if (cached && *cached) {
367  entry = *cached;
368  goto have_entry;
369  }
370 
371  node = tree_search(tree, file_offset);
372  if (!node) {
373  ret = 1;
374  goto out;
375  }
376 
377  entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
378 have_entry:
379  if (!offset_in_entry(entry, file_offset)) {
380  ret = 1;
381  goto out;
382  }
383 
384  if (io_size > entry->bytes_left) {
385  printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
386  (unsigned long long)entry->bytes_left,
387  (unsigned long long)io_size);
388  }
389  entry->bytes_left -= io_size;
390  if (!uptodate)
392 
393  if (entry->bytes_left == 0)
395  else
396  ret = 1;
397 out:
398  if (!ret && cached && entry) {
399  *cached = entry;
400  atomic_inc(&entry->refs);
401  }
402  spin_unlock_irqrestore(&tree->lock, flags);
403  return ret == 0;
404 }
405 
406 /*
407  * used to drop a reference on an ordered extent. This will free
408  * the extent if the last reference is dropped
409  */
411 {
412  struct list_head *cur;
413  struct btrfs_ordered_sum *sum;
414 
415  trace_btrfs_ordered_extent_put(entry->inode, entry);
416 
417  if (atomic_dec_and_test(&entry->refs)) {
418  if (entry->inode)
420  while (!list_empty(&entry->list)) {
421  cur = entry->list.next;
422  sum = list_entry(cur, struct btrfs_ordered_sum, list);
423  list_del(&sum->list);
424  kfree(sum);
425  }
426  kmem_cache_free(btrfs_ordered_extent_cache, entry);
427  }
428 }
429 
430 /*
431  * remove an ordered extent from the tree. No references are dropped
432  * and waiters are woken up.
433  */
434 void btrfs_remove_ordered_extent(struct inode *inode,
435  struct btrfs_ordered_extent *entry)
436 {
438  struct btrfs_root *root = BTRFS_I(inode)->root;
439  struct rb_node *node;
440 
441  tree = &BTRFS_I(inode)->ordered_tree;
442  spin_lock_irq(&tree->lock);
443  node = &entry->rb_node;
444  rb_erase(node, &tree->tree);
445  tree->last = NULL;
447  spin_unlock_irq(&tree->lock);
448 
449  spin_lock(&root->fs_info->ordered_extent_lock);
450  list_del_init(&entry->root_extent_list);
451 
452  trace_btrfs_ordered_extent_remove(inode, entry);
453 
454  /*
455  * we have no more ordered extents for this inode and
456  * no dirty pages. We can safely remove it from the
457  * list of ordered extents
458  */
459  if (RB_EMPTY_ROOT(&tree->tree) &&
461  list_del_init(&BTRFS_I(inode)->ordered_operations);
462  }
463  spin_unlock(&root->fs_info->ordered_extent_lock);
464  wake_up(&entry->wait);
465 }
466 
467 /*
468  * wait for all the ordered extents in a root. This is done when balancing
469  * space between drives.
470  */
471 void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
472 {
473  struct list_head splice;
474  struct list_head *cur;
475  struct btrfs_ordered_extent *ordered;
476  struct inode *inode;
477 
478  INIT_LIST_HEAD(&splice);
479 
480  spin_lock(&root->fs_info->ordered_extent_lock);
481  list_splice_init(&root->fs_info->ordered_extents, &splice);
482  while (!list_empty(&splice)) {
483  cur = splice.next;
484  ordered = list_entry(cur, struct btrfs_ordered_extent,
485  root_extent_list);
486  list_del_init(&ordered->root_extent_list);
487  atomic_inc(&ordered->refs);
488 
489  /*
490  * the inode may be getting freed (in sys_unlink path).
491  */
492  inode = igrab(ordered->inode);
493 
494  spin_unlock(&root->fs_info->ordered_extent_lock);
495 
496  if (inode) {
497  btrfs_start_ordered_extent(inode, ordered, 1);
498  btrfs_put_ordered_extent(ordered);
499  if (delay_iput)
500  btrfs_add_delayed_iput(inode);
501  else
502  iput(inode);
503  } else {
504  btrfs_put_ordered_extent(ordered);
505  }
506 
507  spin_lock(&root->fs_info->ordered_extent_lock);
508  }
509  spin_unlock(&root->fs_info->ordered_extent_lock);
510 }
511 
512 /*
513  * this is used during transaction commit to write all the inodes
514  * added to the ordered operation list. These files must be fully on
515  * disk before the transaction commits.
516  *
517  * we have two modes here, one is to just start the IO via filemap_flush
518  * and the other is to wait for all the io. When we wait, we have an
519  * extra check to make sure the ordered operation list really is empty
520  * before we return
521  */
523 {
524  struct btrfs_inode *btrfs_inode;
525  struct inode *inode;
526  struct list_head splice;
527 
528  INIT_LIST_HEAD(&splice);
529 
530  mutex_lock(&root->fs_info->ordered_operations_mutex);
531  spin_lock(&root->fs_info->ordered_extent_lock);
532 again:
533  list_splice_init(&root->fs_info->ordered_operations, &splice);
534 
535  while (!list_empty(&splice)) {
536  btrfs_inode = list_entry(splice.next, struct btrfs_inode,
537  ordered_operations);
538 
539  inode = &btrfs_inode->vfs_inode;
540 
541  list_del_init(&btrfs_inode->ordered_operations);
542 
543  /*
544  * the inode may be getting freed (in sys_unlink path).
545  */
546  inode = igrab(inode);
547 
548  if (!wait && inode) {
549  list_add_tail(&BTRFS_I(inode)->ordered_operations,
550  &root->fs_info->ordered_operations);
551  }
552  spin_unlock(&root->fs_info->ordered_extent_lock);
553 
554  if (inode) {
555  if (wait)
556  btrfs_wait_ordered_range(inode, 0, (u64)-1);
557  else
558  filemap_flush(inode->i_mapping);
559  btrfs_add_delayed_iput(inode);
560  }
561 
562  cond_resched();
563  spin_lock(&root->fs_info->ordered_extent_lock);
564  }
565  if (wait && !list_empty(&root->fs_info->ordered_operations))
566  goto again;
567 
568  spin_unlock(&root->fs_info->ordered_extent_lock);
569  mutex_unlock(&root->fs_info->ordered_operations_mutex);
570 }
571 
572 /*
573  * Used to start IO or wait for a given ordered extent to finish.
574  *
575  * If wait is one, this effectively waits on page writeback for all the pages
576  * in the extent, and it waits on the io completion code to insert
577  * metadata into the btree corresponding to the extent
578  */
579 void btrfs_start_ordered_extent(struct inode *inode,
580  struct btrfs_ordered_extent *entry,
581  int wait)
582 {
583  u64 start = entry->file_offset;
584  u64 end = start + entry->len - 1;
585 
586  trace_btrfs_ordered_extent_start(inode, entry);
587 
588  /*
589  * pages in the range can be dirty, clean or writeback. We
590  * start IO on any dirty ones so the wait doesn't stall waiting
591  * for the flusher thread to find them
592  */
593  if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
594  filemap_fdatawrite_range(inode->i_mapping, start, end);
595  if (wait) {
597  &entry->flags));
598  }
599 }
600 
601 /*
602  * Used to wait on ordered extents across a large range of bytes.
603  */
604 void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
605 {
606  u64 end;
607  u64 orig_end;
608  struct btrfs_ordered_extent *ordered;
609  int found;
610 
611  if (start + len < start) {
612  orig_end = INT_LIMIT(loff_t);
613  } else {
614  orig_end = start + len - 1;
615  if (orig_end > INT_LIMIT(loff_t))
616  orig_end = INT_LIMIT(loff_t);
617  }
618 
619  /* start IO across the range first to instantiate any delalloc
620  * extents
621  */
622  filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
623 
624  /*
625  * So with compression we will find and lock a dirty page and clear the
626  * first one as dirty, setup an async extent, and immediately return
627  * with the entire range locked but with nobody actually marked with
628  * writeback. So we can't just filemap_write_and_wait_range() and
629  * expect it to work since it will just kick off a thread to do the
630  * actual work. So we need to call filemap_fdatawrite_range _again_
631  * since it will wait on the page lock, which won't be unlocked until
632  * after the pages have been marked as writeback and so we're good to go
633  * from there. We have to do this otherwise we'll miss the ordered
634  * extents and that results in badness. Please Josef, do not think you
635  * know better and pull this out at some point in the future, it is
636  * right and you are wrong.
637  */
639  &BTRFS_I(inode)->runtime_flags))
640  filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
641 
642  filemap_fdatawait_range(inode->i_mapping, start, orig_end);
643 
644  end = orig_end;
645  found = 0;
646  while (1) {
647  ordered = btrfs_lookup_first_ordered_extent(inode, end);
648  if (!ordered)
649  break;
650  if (ordered->file_offset > orig_end) {
651  btrfs_put_ordered_extent(ordered);
652  break;
653  }
654  if (ordered->file_offset + ordered->len < start) {
655  btrfs_put_ordered_extent(ordered);
656  break;
657  }
658  found++;
659  btrfs_start_ordered_extent(inode, ordered, 1);
660  end = ordered->file_offset;
661  btrfs_put_ordered_extent(ordered);
662  if (end == 0 || end == start)
663  break;
664  end--;
665  }
666 }
667 
668 /*
669  * find an ordered extent corresponding to file_offset. return NULL if
670  * nothing is found, otherwise take a reference on the extent and return it
671  */
673  u64 file_offset)
674 {
676  struct rb_node *node;
677  struct btrfs_ordered_extent *entry = NULL;
678 
679  tree = &BTRFS_I(inode)->ordered_tree;
680  spin_lock_irq(&tree->lock);
681  node = tree_search(tree, file_offset);
682  if (!node)
683  goto out;
684 
685  entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
686  if (!offset_in_entry(entry, file_offset))
687  entry = NULL;
688  if (entry)
689  atomic_inc(&entry->refs);
690 out:
691  spin_unlock_irq(&tree->lock);
692  return entry;
693 }
694 
695 /* Since the DIO code tries to lock a wide area we need to look for any ordered
696  * extents that exist in the range, rather than just the start of the range.
697  */
699  u64 file_offset,
700  u64 len)
701 {
703  struct rb_node *node;
704  struct btrfs_ordered_extent *entry = NULL;
705 
706  tree = &BTRFS_I(inode)->ordered_tree;
707  spin_lock_irq(&tree->lock);
708  node = tree_search(tree, file_offset);
709  if (!node) {
710  node = tree_search(tree, file_offset + len);
711  if (!node)
712  goto out;
713  }
714 
715  while (1) {
716  entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
717  if (range_overlaps(entry, file_offset, len))
718  break;
719 
720  if (entry->file_offset >= file_offset + len) {
721  entry = NULL;
722  break;
723  }
724  entry = NULL;
725  node = rb_next(node);
726  if (!node)
727  break;
728  }
729 out:
730  if (entry)
731  atomic_inc(&entry->refs);
732  spin_unlock_irq(&tree->lock);
733  return entry;
734 }
735 
736 /*
737  * lookup and return any extent before 'file_offset'. NULL is returned
738  * if none is found
739  */
740 struct btrfs_ordered_extent *
741 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
742 {
744  struct rb_node *node;
745  struct btrfs_ordered_extent *entry = NULL;
746 
747  tree = &BTRFS_I(inode)->ordered_tree;
748  spin_lock_irq(&tree->lock);
749  node = tree_search(tree, file_offset);
750  if (!node)
751  goto out;
752 
753  entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
754  atomic_inc(&entry->refs);
755 out:
756  spin_unlock_irq(&tree->lock);
757  return entry;
758 }
759 
760 /*
761  * After an extent is done, call this to conditionally update the on disk
762  * i_size. i_size is updated to cover any fully written part of the file.
763  */
764 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
765  struct btrfs_ordered_extent *ordered)
766 {
767  struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
768  u64 disk_i_size;
769  u64 new_i_size;
770  u64 i_size = i_size_read(inode);
771  struct rb_node *node;
772  struct rb_node *prev = NULL;
773  struct btrfs_ordered_extent *test;
774  int ret = 1;
775 
776  if (ordered)
777  offset = entry_end(ordered);
778  else
779  offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
780 
781  spin_lock_irq(&tree->lock);
782  disk_i_size = BTRFS_I(inode)->disk_i_size;
783 
784  /* truncate file */
785  if (disk_i_size > i_size) {
786  BTRFS_I(inode)->disk_i_size = i_size;
787  ret = 0;
788  goto out;
789  }
790 
791  /*
792  * if the disk i_size is already at the inode->i_size, or
793  * this ordered extent is inside the disk i_size, we're done
794  */
795  if (disk_i_size == i_size || offset <= disk_i_size) {
796  goto out;
797  }
798 
799  /*
800  * walk backward from this ordered extent to disk_i_size.
801  * if we find an ordered extent then we can't update disk i_size
802  * yet
803  */
804  if (ordered) {
805  node = rb_prev(&ordered->rb_node);
806  } else {
807  prev = tree_search(tree, offset);
808  /*
809  * we insert file extents without involving ordered struct,
810  * so there should be no ordered struct cover this offset
811  */
812  if (prev) {
813  test = rb_entry(prev, struct btrfs_ordered_extent,
814  rb_node);
815  BUG_ON(offset_in_entry(test, offset));
816  }
817  node = prev;
818  }
819  for (; node; node = rb_prev(node)) {
820  test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
821 
822  /* We treat this entry as if it doesnt exist */
824  continue;
825  if (test->file_offset + test->len <= disk_i_size)
826  break;
827  if (test->file_offset >= i_size)
828  break;
829  if (test->file_offset >= disk_i_size) {
830  /*
831  * we don't update disk_i_size now, so record this
832  * undealt i_size. Or we will not know the real
833  * i_size.
834  */
835  if (test->outstanding_isize < offset)
836  test->outstanding_isize = offset;
837  if (ordered &&
838  ordered->outstanding_isize >
839  test->outstanding_isize)
840  test->outstanding_isize =
841  ordered->outstanding_isize;
842  goto out;
843  }
844  }
845  new_i_size = min_t(u64, offset, i_size);
846 
847  /*
848  * Some ordered extents may completed before the current one, and
849  * we hold the real i_size in ->outstanding_isize.
850  */
851  if (ordered && ordered->outstanding_isize > new_i_size)
852  new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
853  BTRFS_I(inode)->disk_i_size = new_i_size;
854  ret = 0;
855 out:
856  /*
857  * We need to do this because we can't remove ordered extents until
858  * after the i_disk_size has been updated and then the inode has been
859  * updated to reflect the change, so we need to tell anybody who finds
860  * this ordered extent that we've already done all the real work, we
861  * just haven't completed all the other work.
862  */
863  if (ordered)
865  spin_unlock_irq(&tree->lock);
866  return ret;
867 }
868 
869 /*
870  * search the ordered extents for one corresponding to 'offset' and
871  * try to find a checksum. This is used because we allow pages to
872  * be reclaimed before their checksum is actually put into the btree
873  */
874 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
875  u32 *sum)
876 {
877  struct btrfs_ordered_sum *ordered_sum;
878  struct btrfs_sector_sum *sector_sums;
879  struct btrfs_ordered_extent *ordered;
880  struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
881  unsigned long num_sectors;
882  unsigned long i;
883  u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
884  int ret = 1;
885 
886  ordered = btrfs_lookup_ordered_extent(inode, offset);
887  if (!ordered)
888  return 1;
889 
890  spin_lock_irq(&tree->lock);
891  list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
892  if (disk_bytenr >= ordered_sum->bytenr) {
893  num_sectors = ordered_sum->len / sectorsize;
894  sector_sums = ordered_sum->sums;
895  for (i = 0; i < num_sectors; i++) {
896  if (sector_sums[i].bytenr == disk_bytenr) {
897  *sum = sector_sums[i].sum;
898  ret = 0;
899  goto out;
900  }
901  }
902  }
903  }
904 out:
905  spin_unlock_irq(&tree->lock);
906  btrfs_put_ordered_extent(ordered);
907  return ret;
908 }
909 
910 
911 /*
912  * add a given inode to the list of inodes that must be fully on
913  * disk before a transaction commit finishes.
914  *
915  * This basically gives us the ext3 style data=ordered mode, and it is mostly
916  * used to make sure renamed files are fully on disk.
917  *
918  * It is a noop if the inode is already fully on disk.
919  *
920  * If trans is not null, we'll do a friendly check for a transaction that
921  * is already flushing things and force the IO down ourselves.
922  */
924  struct btrfs_root *root, struct inode *inode)
925 {
926  u64 last_mod;
927 
928  last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
929 
930  /*
931  * if this file hasn't been changed since the last transaction
932  * commit, we can safely return without doing anything
933  */
934  if (last_mod < root->fs_info->last_trans_committed)
935  return;
936 
937  /*
938  * the transaction is already committing. Just start the IO and
939  * don't bother with all of this list nonsense
940  */
941  if (trans && root->fs_info->running_transaction->blocked) {
942  btrfs_wait_ordered_range(inode, 0, (u64)-1);
943  return;
944  }
945 
946  spin_lock(&root->fs_info->ordered_extent_lock);
947  if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
948  list_add_tail(&BTRFS_I(inode)->ordered_operations,
949  &root->fs_info->ordered_operations);
950  }
951  spin_unlock(&root->fs_info->ordered_extent_lock);
952 }
953 
955 {
956  btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
957  sizeof(struct btrfs_ordered_extent), 0,
959  NULL);
960  if (!btrfs_ordered_extent_cache)
961  return -ENOMEM;
962  return 0;
963 }
964 
966 {
967  if (btrfs_ordered_extent_cache)
968  kmem_cache_destroy(btrfs_ordered_extent_cache);
969 }