Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
extent_map.c
Go to the documentation of this file.
1 #include <linux/err.h>
2 #include <linux/slab.h>
3 #include <linux/module.h>
4 #include <linux/spinlock.h>
5 #include <linux/hardirq.h>
6 #include "ctree.h"
7 #include "extent_map.h"
8 
9 
10 static struct kmem_cache *extent_map_cache;
11 
13 {
14  extent_map_cache = kmem_cache_create("btrfs_extent_map",
15  sizeof(struct extent_map), 0,
17  if (!extent_map_cache)
18  return -ENOMEM;
19  return 0;
20 }
21 
22 void extent_map_exit(void)
23 {
24  if (extent_map_cache)
25  kmem_cache_destroy(extent_map_cache);
26 }
27 
36 {
37  tree->map = RB_ROOT;
38  INIT_LIST_HEAD(&tree->modified_extents);
39  rwlock_init(&tree->lock);
40 }
41 
50 {
51  struct extent_map *em;
52  em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
53  if (!em)
54  return NULL;
55  em->in_tree = 0;
56  em->flags = 0;
58  em->generation = 0;
59  atomic_set(&em->refs, 1);
60  INIT_LIST_HEAD(&em->list);
61  return em;
62 }
63 
72 {
73  if (!em)
74  return;
75  WARN_ON(atomic_read(&em->refs) == 0);
76  if (atomic_dec_and_test(&em->refs)) {
77  WARN_ON(em->in_tree);
78  WARN_ON(!list_empty(&em->list));
79  kmem_cache_free(extent_map_cache, em);
80  }
81 }
82 
83 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
84  struct rb_node *node)
85 {
86  struct rb_node **p = &root->rb_node;
87  struct rb_node *parent = NULL;
88  struct extent_map *entry;
89 
90  while (*p) {
91  parent = *p;
92  entry = rb_entry(parent, struct extent_map, rb_node);
93 
94  WARN_ON(!entry->in_tree);
95 
97  p = &(*p)->rb_left;
98  else if (offset >= extent_map_end(entry))
99  p = &(*p)->rb_right;
100  else
101  return parent;
102  }
103 
104  entry = rb_entry(node, struct extent_map, rb_node);
105  entry->in_tree = 1;
106  rb_link_node(node, parent, p);
107  rb_insert_color(node, root);
108  return NULL;
109 }
110 
111 /*
112  * search through the tree for an extent_map with a given offset. If
113  * it can't be found, try to find some neighboring extents
114  */
115 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
116  struct rb_node **prev_ret,
117  struct rb_node **next_ret)
118 {
119  struct rb_node *n = root->rb_node;
120  struct rb_node *prev = NULL;
121  struct rb_node *orig_prev = NULL;
122  struct extent_map *entry;
123  struct extent_map *prev_entry = NULL;
124 
125  while (n) {
126  entry = rb_entry(n, struct extent_map, rb_node);
127  prev = n;
128  prev_entry = entry;
129 
130  WARN_ON(!entry->in_tree);
131 
132  if (offset < entry->start)
133  n = n->rb_left;
134  else if (offset >= extent_map_end(entry))
135  n = n->rb_right;
136  else
137  return n;
138  }
139 
140  if (prev_ret) {
141  orig_prev = prev;
142  while (prev && offset >= extent_map_end(prev_entry)) {
143  prev = rb_next(prev);
144  prev_entry = rb_entry(prev, struct extent_map, rb_node);
145  }
146  *prev_ret = prev;
147  prev = orig_prev;
148  }
149 
150  if (next_ret) {
151  prev_entry = rb_entry(prev, struct extent_map, rb_node);
152  while (prev && offset < prev_entry->start) {
153  prev = rb_prev(prev);
154  prev_entry = rb_entry(prev, struct extent_map, rb_node);
155  }
156  *next_ret = prev;
157  }
158  return NULL;
159 }
160 
161 /* check to see if two extent_map structs are adjacent and safe to merge */
162 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
163 {
164  if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
165  return 0;
166 
167  /*
168  * don't merge compressed extents, we need to know their
169  * actual size
170  */
172  return 0;
173 
174  if (extent_map_end(prev) == next->start &&
175  prev->flags == next->flags &&
176  prev->bdev == next->bdev &&
177  ((next->block_start == EXTENT_MAP_HOLE &&
178  prev->block_start == EXTENT_MAP_HOLE) ||
179  (next->block_start == EXTENT_MAP_INLINE &&
180  prev->block_start == EXTENT_MAP_INLINE) ||
181  (next->block_start == EXTENT_MAP_DELALLOC &&
182  prev->block_start == EXTENT_MAP_DELALLOC) ||
183  (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
184  next->block_start == extent_map_block_end(prev)))) {
185  return 1;
186  }
187  return 0;
188 }
189 
190 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
191 {
192  struct extent_map *merge = NULL;
193  struct rb_node *rb;
194 
195  if (em->start != 0) {
196  rb = rb_prev(&em->rb_node);
197  if (rb)
198  merge = rb_entry(rb, struct extent_map, rb_node);
199  if (rb && mergable_maps(merge, em)) {
200  em->start = merge->start;
201  em->len += merge->len;
202  em->block_len += merge->block_len;
203  em->block_start = merge->block_start;
204  merge->in_tree = 0;
205  if (merge->generation > em->generation) {
206  em->mod_start = em->start;
207  em->mod_len = em->len;
208  em->generation = merge->generation;
209  list_move(&em->list, &tree->modified_extents);
210  }
211 
212  list_del_init(&merge->list);
213  rb_erase(&merge->rb_node, &tree->map);
214  free_extent_map(merge);
215  }
216  }
217 
218  rb = rb_next(&em->rb_node);
219  if (rb)
220  merge = rb_entry(rb, struct extent_map, rb_node);
221  if (rb && mergable_maps(em, merge)) {
222  em->len += merge->len;
223  em->block_len += merge->len;
224  rb_erase(&merge->rb_node, &tree->map);
225  merge->in_tree = 0;
226  if (merge->generation > em->generation) {
227  em->mod_len = em->len;
228  em->generation = merge->generation;
229  list_move(&em->list, &tree->modified_extents);
230  }
231  list_del_init(&merge->list);
232  free_extent_map(merge);
233  }
234 }
235 
248 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
249  u64 gen)
250 {
251  int ret = 0;
252  struct extent_map *em;
253  bool prealloc = false;
254 
255  write_lock(&tree->lock);
256  em = lookup_extent_mapping(tree, start, len);
257 
258  WARN_ON(!em || em->start != start);
259 
260  if (!em)
261  goto out;
262 
263  list_move(&em->list, &tree->modified_extents);
264  em->generation = gen;
266  em->mod_start = em->start;
267  em->mod_len = em->len;
268 
269  if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
270  prealloc = true;
272  }
273 
274  try_merge_map(tree, em);
275 
276  if (prealloc) {
277  em->mod_start = em->start;
278  em->mod_len = em->len;
279  }
280 
281  free_extent_map(em);
282 out:
283  write_unlock(&tree->lock);
284  return ret;
285 
286 }
287 
299  struct extent_map *em)
300 {
301  int ret = 0;
302  struct rb_node *rb;
303  struct extent_map *exist;
304 
305  exist = lookup_extent_mapping(tree, em->start, em->len);
306  if (exist) {
307  free_extent_map(exist);
308  ret = -EEXIST;
309  goto out;
310  }
311  rb = tree_insert(&tree->map, em->start, &em->rb_node);
312  if (rb) {
313  ret = -EEXIST;
314  goto out;
315  }
316  atomic_inc(&em->refs);
317 
318  em->mod_start = em->start;
319  em->mod_len = em->len;
320 
321  try_merge_map(tree, em);
322 out:
323  return ret;
324 }
325 
326 /* simple helper to do math around the end of an extent, handling wrap */
327 static u64 range_end(u64 start, u64 len)
328 {
329  if (start + len < start)
330  return (u64)-1;
331  return start + len;
332 }
333 
335  u64 start, u64 len, int strict)
336 {
337  struct extent_map *em;
338  struct rb_node *rb_node;
339  struct rb_node *prev = NULL;
340  struct rb_node *next = NULL;
341  u64 end = range_end(start, len);
342 
343  rb_node = __tree_search(&tree->map, start, &prev, &next);
344  if (!rb_node) {
345  if (prev)
346  rb_node = prev;
347  else if (next)
348  rb_node = next;
349  else
350  return NULL;
351  }
352 
353  em = rb_entry(rb_node, struct extent_map, rb_node);
354 
355  if (strict && !(end > em->start && start < extent_map_end(em)))
356  return NULL;
357 
358  atomic_inc(&em->refs);
359  return em;
360 }
361 
374  u64 start, u64 len)
375 {
376  return __lookup_extent_mapping(tree, start, len, 1);
377 }
378 
391  u64 start, u64 len)
392 {
393  return __lookup_extent_mapping(tree, start, len, 0);
394 }
395 
404 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
405 {
406  int ret = 0;
407 
409  rb_erase(&em->rb_node, &tree->map);
410  if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
411  list_del_init(&em->list);
412  em->in_tree = 0;
413  return ret;
414 }