Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qgroup.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2011 STRATO. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 
27 #include "ctree.h"
28 #include "transaction.h"
29 #include "disk-io.h"
30 #include "locking.h"
31 #include "ulist.h"
32 #include "ioctl.h"
33 #include "backref.h"
34 
35 /* TODO XXX FIXME
36  * - subvol delete -> delete when ref goes to 0? delete limits also?
37  * - reorganize keys
38  * - compressed
39  * - sync
40  * - rescan
41  * - copy also limits on subvol creation
42  * - limit
43  * - caches fuer ulists
44  * - performance benchmarks
45  * - check all ioctl parameters
46  */
47 
48 /*
49  * one struct for each qgroup, organized in fs_info->qgroup_tree.
50  */
51 struct btrfs_qgroup {
53 
54  /*
55  * state
56  */
57  u64 rfer; /* referenced */
58  u64 rfer_cmpr; /* referenced compressed */
59  u64 excl; /* exclusive */
60  u64 excl_cmpr; /* exclusive compressed */
61 
62  /*
63  * limits
64  */
65  u64 lim_flags; /* which limits are set */
70 
71  /*
72  * reservation tracking
73  */
75 
76  /*
77  * lists
78  */
79  struct list_head groups; /* groups this group is member of */
80  struct list_head members; /* groups that are members of this group */
81  struct list_head dirty; /* dirty groups */
82  struct rb_node node; /* tree of qgroups */
83 
84  /*
85  * temp variables for accounting operations
86  */
89 };
90 
91 /*
92  * glue structure to represent the relations between qgroups.
93  */
99 };
100 
101 /* must be called with qgroup_lock held */
102 static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
103  u64 qgroupid)
104 {
105  struct rb_node *n = fs_info->qgroup_tree.rb_node;
106  struct btrfs_qgroup *qgroup;
107 
108  while (n) {
109  qgroup = rb_entry(n, struct btrfs_qgroup, node);
110  if (qgroup->qgroupid < qgroupid)
111  n = n->rb_left;
112  else if (qgroup->qgroupid > qgroupid)
113  n = n->rb_right;
114  else
115  return qgroup;
116  }
117  return NULL;
118 }
119 
120 /* must be called with qgroup_lock held */
121 static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
122  u64 qgroupid)
123 {
124  struct rb_node **p = &fs_info->qgroup_tree.rb_node;
125  struct rb_node *parent = NULL;
126  struct btrfs_qgroup *qgroup;
127 
128  while (*p) {
129  parent = *p;
130  qgroup = rb_entry(parent, struct btrfs_qgroup, node);
131 
132  if (qgroup->qgroupid < qgroupid)
133  p = &(*p)->rb_left;
134  else if (qgroup->qgroupid > qgroupid)
135  p = &(*p)->rb_right;
136  else
137  return qgroup;
138  }
139 
140  qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
141  if (!qgroup)
142  return ERR_PTR(-ENOMEM);
143 
144  qgroup->qgroupid = qgroupid;
145  INIT_LIST_HEAD(&qgroup->groups);
146  INIT_LIST_HEAD(&qgroup->members);
147  INIT_LIST_HEAD(&qgroup->dirty);
148 
149  rb_link_node(&qgroup->node, parent, p);
150  rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
151 
152  return qgroup;
153 }
154 
155 /* must be called with qgroup_lock held */
156 static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
157 {
158  struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
159  struct btrfs_qgroup_list *list;
160 
161  if (!qgroup)
162  return -ENOENT;
163 
164  rb_erase(&qgroup->node, &fs_info->qgroup_tree);
165  list_del(&qgroup->dirty);
166 
167  while (!list_empty(&qgroup->groups)) {
168  list = list_first_entry(&qgroup->groups,
169  struct btrfs_qgroup_list, next_group);
170  list_del(&list->next_group);
171  list_del(&list->next_member);
172  kfree(list);
173  }
174 
175  while (!list_empty(&qgroup->members)) {
176  list = list_first_entry(&qgroup->members,
178  list_del(&list->next_group);
179  list_del(&list->next_member);
180  kfree(list);
181  }
182  kfree(qgroup);
183 
184  return 0;
185 }
186 
187 /* must be called with qgroup_lock held */
188 static int add_relation_rb(struct btrfs_fs_info *fs_info,
189  u64 memberid, u64 parentid)
190 {
191  struct btrfs_qgroup *member;
192  struct btrfs_qgroup *parent;
193  struct btrfs_qgroup_list *list;
194 
195  member = find_qgroup_rb(fs_info, memberid);
196  parent = find_qgroup_rb(fs_info, parentid);
197  if (!member || !parent)
198  return -ENOENT;
199 
200  list = kzalloc(sizeof(*list), GFP_ATOMIC);
201  if (!list)
202  return -ENOMEM;
203 
204  list->group = parent;
205  list->member = member;
206  list_add_tail(&list->next_group, &member->groups);
207  list_add_tail(&list->next_member, &parent->members);
208 
209  return 0;
210 }
211 
212 /* must be called with qgroup_lock held */
213 static int del_relation_rb(struct btrfs_fs_info *fs_info,
214  u64 memberid, u64 parentid)
215 {
216  struct btrfs_qgroup *member;
217  struct btrfs_qgroup *parent;
218  struct btrfs_qgroup_list *list;
219 
220  member = find_qgroup_rb(fs_info, memberid);
221  parent = find_qgroup_rb(fs_info, parentid);
222  if (!member || !parent)
223  return -ENOENT;
224 
225  list_for_each_entry(list, &member->groups, next_group) {
226  if (list->group == parent) {
227  list_del(&list->next_group);
228  list_del(&list->next_member);
229  kfree(list);
230  return 0;
231  }
232  }
233  return -ENOENT;
234 }
235 
236 /*
237  * The full config is read in one go, only called from open_ctree()
238  * It doesn't use any locking, as at this point we're still single-threaded
239  */
241 {
242  struct btrfs_key key;
243  struct btrfs_key found_key;
244  struct btrfs_root *quota_root = fs_info->quota_root;
245  struct btrfs_path *path = NULL;
246  struct extent_buffer *l;
247  int slot;
248  int ret = 0;
249  u64 flags = 0;
250 
251  if (!fs_info->quota_enabled)
252  return 0;
253 
254  path = btrfs_alloc_path();
255  if (!path) {
256  ret = -ENOMEM;
257  goto out;
258  }
259 
260  /* default this to quota off, in case no status key is found */
261  fs_info->qgroup_flags = 0;
262 
263  /*
264  * pass 1: read status, all qgroup infos and limits
265  */
266  key.objectid = 0;
267  key.type = 0;
268  key.offset = 0;
269  ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
270  if (ret)
271  goto out;
272 
273  while (1) {
274  struct btrfs_qgroup *qgroup;
275 
276  slot = path->slots[0];
277  l = path->nodes[0];
278  btrfs_item_key_to_cpu(l, &found_key, slot);
279 
280  if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
282 
283  ptr = btrfs_item_ptr(l, slot,
284  struct btrfs_qgroup_status_item);
285 
286  if (btrfs_qgroup_status_version(l, ptr) !=
289  "btrfs: old qgroup version, quota disabled\n");
290  goto out;
291  }
292  if (btrfs_qgroup_status_generation(l, ptr) !=
293  fs_info->generation) {
296  "btrfs: qgroup generation mismatch, "
297  "marked as inconsistent\n");
298  }
299  fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
300  ptr);
301  /* FIXME read scan element */
302  goto next1;
303  }
304 
305  if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
306  found_key.type != BTRFS_QGROUP_LIMIT_KEY)
307  goto next1;
308 
309  qgroup = find_qgroup_rb(fs_info, found_key.offset);
310  if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
311  (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
312  printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
314  }
315  if (!qgroup) {
316  qgroup = add_qgroup_rb(fs_info, found_key.offset);
317  if (IS_ERR(qgroup)) {
318  ret = PTR_ERR(qgroup);
319  goto out;
320  }
321  }
322  switch (found_key.type) {
323  case BTRFS_QGROUP_INFO_KEY: {
324  struct btrfs_qgroup_info_item *ptr;
325 
326  ptr = btrfs_item_ptr(l, slot,
327  struct btrfs_qgroup_info_item);
328  qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
329  qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
330  qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
331  qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
332  /* generation currently unused */
333  break;
334  }
335  case BTRFS_QGROUP_LIMIT_KEY: {
337 
338  ptr = btrfs_item_ptr(l, slot,
339  struct btrfs_qgroup_limit_item);
340  qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
341  qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
342  qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
343  qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
344  qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
345  break;
346  }
347  }
348 next1:
349  ret = btrfs_next_item(quota_root, path);
350  if (ret < 0)
351  goto out;
352  if (ret)
353  break;
354  }
355  btrfs_release_path(path);
356 
357  /*
358  * pass 2: read all qgroup relations
359  */
360  key.objectid = 0;
362  key.offset = 0;
363  ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
364  if (ret)
365  goto out;
366  while (1) {
367  slot = path->slots[0];
368  l = path->nodes[0];
369  btrfs_item_key_to_cpu(l, &found_key, slot);
370 
371  if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
372  goto next2;
373 
374  if (found_key.objectid > found_key.offset) {
375  /* parent <- member, not needed to build config */
376  /* FIXME should we omit the key completely? */
377  goto next2;
378  }
379 
380  ret = add_relation_rb(fs_info, found_key.objectid,
381  found_key.offset);
382  if (ret)
383  goto out;
384 next2:
385  ret = btrfs_next_item(quota_root, path);
386  if (ret < 0)
387  goto out;
388  if (ret)
389  break;
390  }
391 out:
392  fs_info->qgroup_flags |= flags;
393  if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
394  fs_info->quota_enabled = 0;
395  fs_info->pending_quota_state = 0;
396  }
397  btrfs_free_path(path);
398 
399  return ret < 0 ? ret : 0;
400 }
401 
402 /*
403  * This is only called from close_ctree() or open_ctree(), both in single-
404  * treaded paths. Clean up the in-memory structures. No locking needed.
405  */
407 {
408  struct rb_node *n;
409  struct btrfs_qgroup *qgroup;
410  struct btrfs_qgroup_list *list;
411 
412  while ((n = rb_first(&fs_info->qgroup_tree))) {
413  qgroup = rb_entry(n, struct btrfs_qgroup, node);
414  rb_erase(n, &fs_info->qgroup_tree);
415 
416  WARN_ON(!list_empty(&qgroup->dirty));
417 
418  while (!list_empty(&qgroup->groups)) {
419  list = list_first_entry(&qgroup->groups,
420  struct btrfs_qgroup_list,
421  next_group);
422  list_del(&list->next_group);
423  list_del(&list->next_member);
424  kfree(list);
425  }
426 
427  while (!list_empty(&qgroup->members)) {
428  list = list_first_entry(&qgroup->members,
429  struct btrfs_qgroup_list,
430  next_member);
431  list_del(&list->next_group);
432  list_del(&list->next_member);
433  kfree(list);
434  }
435  kfree(qgroup);
436  }
437 }
438 
439 static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
440  struct btrfs_root *quota_root,
441  u64 src, u64 dst)
442 {
443  int ret;
444  struct btrfs_path *path;
445  struct btrfs_key key;
446 
447  path = btrfs_alloc_path();
448  if (!path)
449  return -ENOMEM;
450 
451  key.objectid = src;
453  key.offset = dst;
454 
455  ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
456 
457  btrfs_mark_buffer_dirty(path->nodes[0]);
458 
459  btrfs_free_path(path);
460  return ret;
461 }
462 
463 static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
464  struct btrfs_root *quota_root,
465  u64 src, u64 dst)
466 {
467  int ret;
468  struct btrfs_path *path;
469  struct btrfs_key key;
470 
471  path = btrfs_alloc_path();
472  if (!path)
473  return -ENOMEM;
474 
475  key.objectid = src;
477  key.offset = dst;
478 
479  ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
480  if (ret < 0)
481  goto out;
482 
483  if (ret > 0) {
484  ret = -ENOENT;
485  goto out;
486  }
487 
488  ret = btrfs_del_item(trans, quota_root, path);
489 out:
490  btrfs_free_path(path);
491  return ret;
492 }
493 
494 static int add_qgroup_item(struct btrfs_trans_handle *trans,
495  struct btrfs_root *quota_root, u64 qgroupid)
496 {
497  int ret;
498  struct btrfs_path *path;
499  struct btrfs_qgroup_info_item *qgroup_info;
500  struct btrfs_qgroup_limit_item *qgroup_limit;
501  struct extent_buffer *leaf;
502  struct btrfs_key key;
503 
504  path = btrfs_alloc_path();
505  if (!path)
506  return -ENOMEM;
507 
508  key.objectid = 0;
509  key.type = BTRFS_QGROUP_INFO_KEY;
510  key.offset = qgroupid;
511 
512  ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
513  sizeof(*qgroup_info));
514  if (ret)
515  goto out;
516 
517  leaf = path->nodes[0];
518  qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
519  struct btrfs_qgroup_info_item);
520  btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
521  btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
522  btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
523  btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
524  btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
525 
527 
528  btrfs_release_path(path);
529 
531  ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
532  sizeof(*qgroup_limit));
533  if (ret)
534  goto out;
535 
536  leaf = path->nodes[0];
537  qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
538  struct btrfs_qgroup_limit_item);
539  btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
540  btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
541  btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
542  btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
543  btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
544 
546 
547  ret = 0;
548 out:
549  btrfs_free_path(path);
550  return ret;
551 }
552 
553 static int del_qgroup_item(struct btrfs_trans_handle *trans,
554  struct btrfs_root *quota_root, u64 qgroupid)
555 {
556  int ret;
557  struct btrfs_path *path;
558  struct btrfs_key key;
559 
560  path = btrfs_alloc_path();
561  if (!path)
562  return -ENOMEM;
563 
564  key.objectid = 0;
565  key.type = BTRFS_QGROUP_INFO_KEY;
566  key.offset = qgroupid;
567  ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
568  if (ret < 0)
569  goto out;
570 
571  if (ret > 0) {
572  ret = -ENOENT;
573  goto out;
574  }
575 
576  ret = btrfs_del_item(trans, quota_root, path);
577  if (ret)
578  goto out;
579 
580  btrfs_release_path(path);
581 
583  ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
584  if (ret < 0)
585  goto out;
586 
587  if (ret > 0) {
588  ret = -ENOENT;
589  goto out;
590  }
591 
592  ret = btrfs_del_item(trans, quota_root, path);
593 
594 out:
595  btrfs_free_path(path);
596  return ret;
597 }
598 
599 static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
600  struct btrfs_root *root, u64 qgroupid,
603 {
604  struct btrfs_path *path;
605  struct btrfs_key key;
606  struct extent_buffer *l;
607  struct btrfs_qgroup_limit_item *qgroup_limit;
608  int ret;
609  int slot;
610 
611  key.objectid = 0;
613  key.offset = qgroupid;
614 
615  path = btrfs_alloc_path();
616  BUG_ON(!path);
617  ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
618  if (ret > 0)
619  ret = -ENOENT;
620 
621  if (ret)
622  goto out;
623 
624  l = path->nodes[0];
625  slot = path->slots[0];
626  qgroup_limit = btrfs_item_ptr(l, path->slots[0],
627  struct btrfs_qgroup_limit_item);
628  btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
629  btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
630  btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
631  btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
632  btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
633 
635 
636 out:
637  btrfs_free_path(path);
638  return ret;
639 }
640 
641 static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
642  struct btrfs_root *root,
643  struct btrfs_qgroup *qgroup)
644 {
645  struct btrfs_path *path;
646  struct btrfs_key key;
647  struct extent_buffer *l;
648  struct btrfs_qgroup_info_item *qgroup_info;
649  int ret;
650  int slot;
651 
652  key.objectid = 0;
653  key.type = BTRFS_QGROUP_INFO_KEY;
654  key.offset = qgroup->qgroupid;
655 
656  path = btrfs_alloc_path();
657  BUG_ON(!path);
658  ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
659  if (ret > 0)
660  ret = -ENOENT;
661 
662  if (ret)
663  goto out;
664 
665  l = path->nodes[0];
666  slot = path->slots[0];
667  qgroup_info = btrfs_item_ptr(l, path->slots[0],
668  struct btrfs_qgroup_info_item);
669  btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
670  btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
671  btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
672  btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
673  btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
674 
676 
677 out:
678  btrfs_free_path(path);
679  return ret;
680 }
681 
682 static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
683  struct btrfs_fs_info *fs_info,
684  struct btrfs_root *root)
685 {
686  struct btrfs_path *path;
687  struct btrfs_key key;
688  struct extent_buffer *l;
690  int ret;
691  int slot;
692 
693  key.objectid = 0;
695  key.offset = 0;
696 
697  path = btrfs_alloc_path();
698  BUG_ON(!path);
699  ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
700  if (ret > 0)
701  ret = -ENOENT;
702 
703  if (ret)
704  goto out;
705 
706  l = path->nodes[0];
707  slot = path->slots[0];
708  ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
709  btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
710  btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
711  /* XXX scan */
712 
714 
715 out:
716  btrfs_free_path(path);
717  return ret;
718 }
719 
720 /*
721  * called with qgroup_lock held
722  */
723 static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
724  struct btrfs_root *root)
725 {
726  struct btrfs_path *path;
727  struct btrfs_key key;
728  int ret;
729 
730  if (!root)
731  return -EINVAL;
732 
733  path = btrfs_alloc_path();
734  if (!path)
735  return -ENOMEM;
736 
737  while (1) {
738  key.objectid = 0;
739  key.offset = 0;
740  key.type = 0;
741 
742  path->leave_spinning = 1;
743  ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
744  if (ret > 0) {
745  if (path->slots[0] == 0)
746  break;
747  path->slots[0]--;
748  } else if (ret < 0) {
749  break;
750  }
751 
752  ret = btrfs_del_item(trans, root, path);
753  if (ret)
754  goto out;
755  btrfs_release_path(path);
756  }
757  ret = 0;
758 out:
759  root->fs_info->pending_quota_state = 0;
760  btrfs_free_path(path);
761  return ret;
762 }
763 
765  struct btrfs_fs_info *fs_info)
766 {
767  struct btrfs_root *quota_root;
768  struct btrfs_path *path = NULL;
770  struct extent_buffer *leaf;
771  struct btrfs_key key;
772  int ret = 0;
773 
774  spin_lock(&fs_info->qgroup_lock);
775  if (fs_info->quota_root) {
776  fs_info->pending_quota_state = 1;
777  spin_unlock(&fs_info->qgroup_lock);
778  goto out;
779  }
780  spin_unlock(&fs_info->qgroup_lock);
781 
782  /*
783  * initially create the quota tree
784  */
785  quota_root = btrfs_create_tree(trans, fs_info,
787  if (IS_ERR(quota_root)) {
788  ret = PTR_ERR(quota_root);
789  goto out;
790  }
791 
792  path = btrfs_alloc_path();
793  if (!path) {
794  ret = -ENOMEM;
795  goto out_free_root;
796  }
797 
798  key.objectid = 0;
800  key.offset = 0;
801 
802  ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
803  sizeof(*ptr));
804  if (ret)
805  goto out_free_path;
806 
807  leaf = path->nodes[0];
808  ptr = btrfs_item_ptr(leaf, path->slots[0],
809  struct btrfs_qgroup_status_item);
810  btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
811  btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
814  btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
815  btrfs_set_qgroup_status_scan(leaf, ptr, 0);
816 
818 
819  spin_lock(&fs_info->qgroup_lock);
820  fs_info->quota_root = quota_root;
821  fs_info->pending_quota_state = 1;
822  spin_unlock(&fs_info->qgroup_lock);
823 out_free_path:
824  btrfs_free_path(path);
825 out_free_root:
826  if (ret) {
827  free_extent_buffer(quota_root->node);
828  free_extent_buffer(quota_root->commit_root);
829  kfree(quota_root);
830  }
831 out:
832  return ret;
833 }
834 
836  struct btrfs_fs_info *fs_info)
837 {
838  struct btrfs_root *tree_root = fs_info->tree_root;
839  struct btrfs_root *quota_root;
840  int ret = 0;
841 
842  spin_lock(&fs_info->qgroup_lock);
843  fs_info->quota_enabled = 0;
844  fs_info->pending_quota_state = 0;
845  quota_root = fs_info->quota_root;
846  fs_info->quota_root = NULL;
847  btrfs_free_qgroup_config(fs_info);
848  spin_unlock(&fs_info->qgroup_lock);
849 
850  if (!quota_root)
851  return -EINVAL;
852 
853  ret = btrfs_clean_quota_tree(trans, quota_root);
854  if (ret)
855  goto out;
856 
857  ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
858  if (ret)
859  goto out;
860 
861  list_del(&quota_root->dirty_list);
862 
863  btrfs_tree_lock(quota_root->node);
864  clean_tree_block(trans, tree_root, quota_root->node);
865  btrfs_tree_unlock(quota_root->node);
866  btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
867 
868  free_extent_buffer(quota_root->node);
869  free_extent_buffer(quota_root->commit_root);
870  kfree(quota_root);
871 out:
872  return ret;
873 }
874 
875 int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
876 {
877  /* FIXME */
878  return 0;
879 }
880 
882  struct btrfs_fs_info *fs_info, u64 src, u64 dst)
883 {
884  struct btrfs_root *quota_root;
885  int ret = 0;
886 
887  quota_root = fs_info->quota_root;
888  if (!quota_root)
889  return -EINVAL;
890 
891  ret = add_qgroup_relation_item(trans, quota_root, src, dst);
892  if (ret)
893  return ret;
894 
895  ret = add_qgroup_relation_item(trans, quota_root, dst, src);
896  if (ret) {
897  del_qgroup_relation_item(trans, quota_root, src, dst);
898  return ret;
899  }
900 
901  spin_lock(&fs_info->qgroup_lock);
902  ret = add_relation_rb(quota_root->fs_info, src, dst);
903  spin_unlock(&fs_info->qgroup_lock);
904 
905  return ret;
906 }
907 
909  struct btrfs_fs_info *fs_info, u64 src, u64 dst)
910 {
911  struct btrfs_root *quota_root;
912  int ret = 0;
913  int err;
914 
915  quota_root = fs_info->quota_root;
916  if (!quota_root)
917  return -EINVAL;
918 
919  ret = del_qgroup_relation_item(trans, quota_root, src, dst);
920  err = del_qgroup_relation_item(trans, quota_root, dst, src);
921  if (err && !ret)
922  ret = err;
923 
924  spin_lock(&fs_info->qgroup_lock);
925  del_relation_rb(fs_info, src, dst);
926 
927  spin_unlock(&fs_info->qgroup_lock);
928 
929  return ret;
930 }
931 
933  struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
934 {
935  struct btrfs_root *quota_root;
936  struct btrfs_qgroup *qgroup;
937  int ret = 0;
938 
939  quota_root = fs_info->quota_root;
940  if (!quota_root)
941  return -EINVAL;
942 
943  ret = add_qgroup_item(trans, quota_root, qgroupid);
944 
945  spin_lock(&fs_info->qgroup_lock);
946  qgroup = add_qgroup_rb(fs_info, qgroupid);
947  spin_unlock(&fs_info->qgroup_lock);
948 
949  if (IS_ERR(qgroup))
950  ret = PTR_ERR(qgroup);
951 
952  return ret;
953 }
954 
956  struct btrfs_fs_info *fs_info, u64 qgroupid)
957 {
958  struct btrfs_root *quota_root;
959  int ret = 0;
960 
961  quota_root = fs_info->quota_root;
962  if (!quota_root)
963  return -EINVAL;
964 
965  ret = del_qgroup_item(trans, quota_root, qgroupid);
966 
967  spin_lock(&fs_info->qgroup_lock);
968  del_qgroup_rb(quota_root->fs_info, qgroupid);
969 
970  spin_unlock(&fs_info->qgroup_lock);
971 
972  return ret;
973 }
974 
976  struct btrfs_fs_info *fs_info, u64 qgroupid,
977  struct btrfs_qgroup_limit *limit)
978 {
979  struct btrfs_root *quota_root = fs_info->quota_root;
980  struct btrfs_qgroup *qgroup;
981  int ret = 0;
982 
983  if (!quota_root)
984  return -EINVAL;
985 
986  ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
987  limit->flags, limit->max_rfer,
988  limit->max_excl, limit->rsv_rfer,
989  limit->rsv_excl);
990  if (ret) {
992  printk(KERN_INFO "unable to update quota limit for %llu\n",
993  (unsigned long long)qgroupid);
994  }
995 
996  spin_lock(&fs_info->qgroup_lock);
997 
998  qgroup = find_qgroup_rb(fs_info, qgroupid);
999  if (!qgroup) {
1000  ret = -ENOENT;
1001  goto unlock;
1002  }
1003  qgroup->lim_flags = limit->flags;
1004  qgroup->max_rfer = limit->max_rfer;
1005  qgroup->max_excl = limit->max_excl;
1006  qgroup->rsv_rfer = limit->rsv_rfer;
1007  qgroup->rsv_excl = limit->rsv_excl;
1008 
1009 unlock:
1010  spin_unlock(&fs_info->qgroup_lock);
1011 
1012  return ret;
1013 }
1014 
1015 static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1016  struct btrfs_qgroup *qgroup)
1017 {
1018  if (list_empty(&qgroup->dirty))
1019  list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
1020 }
1021 
1022 /*
1023  * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
1024  * the modification into a list that's later used by btrfs_end_transaction to
1025  * pass the recorded modifications on to btrfs_qgroup_account_ref.
1026  */
1028  struct btrfs_delayed_ref_node *node,
1029  struct btrfs_delayed_extent_op *extent_op)
1030 {
1031  struct qgroup_update *u;
1032 
1033  BUG_ON(!trans->delayed_ref_elem.seq);
1034  u = kmalloc(sizeof(*u), GFP_NOFS);
1035  if (!u)
1036  return -ENOMEM;
1037 
1038  u->node = node;
1039  u->extent_op = extent_op;
1040  list_add_tail(&u->list, &trans->qgroup_ref_list);
1041 
1042  return 0;
1043 }
1044 
1045 /*
1046  * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
1047  * from the fs. First, all roots referencing the extent are searched, and
1048  * then the space is accounted accordingly to the different roots. The
1049  * accounting algorithm works in 3 steps documented inline.
1050  */
1052  struct btrfs_fs_info *fs_info,
1053  struct btrfs_delayed_ref_node *node,
1055 {
1056  struct btrfs_key ins;
1057  struct btrfs_root *quota_root;
1058  u64 ref_root;
1059  struct btrfs_qgroup *qgroup;
1060  struct ulist_node *unode;
1061  struct ulist *roots = NULL;
1062  struct ulist *tmp = NULL;
1063  struct ulist_iterator uiter;
1064  u64 seq;
1065  int ret = 0;
1066  int sgn;
1067 
1068  if (!fs_info->quota_enabled)
1069  return 0;
1070 
1071  BUG_ON(!fs_info->quota_root);
1072 
1073  ins.objectid = node->bytenr;
1074  ins.offset = node->num_bytes;
1076 
1077  if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1078  node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
1079  struct btrfs_delayed_tree_ref *ref;
1080  ref = btrfs_delayed_node_to_tree_ref(node);
1081  ref_root = ref->root;
1082  } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1083  node->type == BTRFS_SHARED_DATA_REF_KEY) {
1084  struct btrfs_delayed_data_ref *ref;
1085  ref = btrfs_delayed_node_to_data_ref(node);
1086  ref_root = ref->root;
1087  } else {
1088  BUG();
1089  }
1090 
1091  if (!is_fstree(ref_root)) {
1092  /*
1093  * non-fs-trees are not being accounted
1094  */
1095  return 0;
1096  }
1097 
1098  switch (node->action) {
1099  case BTRFS_ADD_DELAYED_REF:
1101  sgn = 1;
1102  break;
1104  sgn = -1;
1105  break;
1107  return 0;
1108  default:
1109  BUG();
1110  }
1111 
1112  /*
1113  * the delayed ref sequence number we pass depends on the direction of
1114  * the operation. for add operations, we pass (node->seq - 1) to skip
1115  * the delayed ref's current sequence number, because we need the state
1116  * of the tree before the add operation. for delete operations, we pass
1117  * (node->seq) to include the delayed ref's current sequence number,
1118  * because we need the state of the tree after the delete operation.
1119  */
1120  ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
1121  sgn > 0 ? node->seq - 1 : node->seq, &roots);
1122  if (ret < 0)
1123  goto out;
1124 
1125  spin_lock(&fs_info->qgroup_lock);
1126  quota_root = fs_info->quota_root;
1127  if (!quota_root)
1128  goto unlock;
1129 
1130  qgroup = find_qgroup_rb(fs_info, ref_root);
1131  if (!qgroup)
1132  goto unlock;
1133 
1134  /*
1135  * step 1: for each old ref, visit all nodes once and inc refcnt
1136  */
1137  tmp = ulist_alloc(GFP_ATOMIC);
1138  if (!tmp) {
1139  ret = -ENOMEM;
1140  goto unlock;
1141  }
1142  seq = fs_info->qgroup_seq;
1143  fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
1144 
1145  ULIST_ITER_INIT(&uiter);
1146  while ((unode = ulist_next(roots, &uiter))) {
1147  struct ulist_node *tmp_unode;
1148  struct ulist_iterator tmp_uiter;
1149  struct btrfs_qgroup *qg;
1150 
1151  qg = find_qgroup_rb(fs_info, unode->val);
1152  if (!qg)
1153  continue;
1154 
1155  ulist_reinit(tmp);
1156  /* XXX id not needed */
1157  ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
1158  ULIST_ITER_INIT(&tmp_uiter);
1159  while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1160  struct btrfs_qgroup_list *glist;
1161 
1162  qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1163  if (qg->refcnt < seq)
1164  qg->refcnt = seq + 1;
1165  else
1166  ++qg->refcnt;
1167 
1168  list_for_each_entry(glist, &qg->groups, next_group) {
1169  ulist_add(tmp, glist->group->qgroupid,
1170  (u64)(uintptr_t)glist->group,
1171  GFP_ATOMIC);
1172  }
1173  }
1174  }
1175 
1176  /*
1177  * step 2: walk from the new root
1178  */
1179  ulist_reinit(tmp);
1180  ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1181  ULIST_ITER_INIT(&uiter);
1182  while ((unode = ulist_next(tmp, &uiter))) {
1183  struct btrfs_qgroup *qg;
1184  struct btrfs_qgroup_list *glist;
1185 
1186  qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1187  if (qg->refcnt < seq) {
1188  /* not visited by step 1 */
1189  qg->rfer += sgn * node->num_bytes;
1190  qg->rfer_cmpr += sgn * node->num_bytes;
1191  if (roots->nnodes == 0) {
1192  qg->excl += sgn * node->num_bytes;
1193  qg->excl_cmpr += sgn * node->num_bytes;
1194  }
1195  qgroup_dirty(fs_info, qg);
1196  }
1197  WARN_ON(qg->tag >= seq);
1198  qg->tag = seq;
1199 
1200  list_for_each_entry(glist, &qg->groups, next_group) {
1201  ulist_add(tmp, glist->group->qgroupid,
1202  (uintptr_t)glist->group, GFP_ATOMIC);
1203  }
1204  }
1205 
1206  /*
1207  * step 3: walk again from old refs
1208  */
1209  ULIST_ITER_INIT(&uiter);
1210  while ((unode = ulist_next(roots, &uiter))) {
1211  struct btrfs_qgroup *qg;
1212  struct ulist_node *tmp_unode;
1213  struct ulist_iterator tmp_uiter;
1214 
1215  qg = find_qgroup_rb(fs_info, unode->val);
1216  if (!qg)
1217  continue;
1218 
1219  ulist_reinit(tmp);
1220  ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
1221  ULIST_ITER_INIT(&tmp_uiter);
1222  while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
1223  struct btrfs_qgroup_list *glist;
1224 
1225  qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
1226  if (qg->tag == seq)
1227  continue;
1228 
1229  if (qg->refcnt - seq == roots->nnodes) {
1230  qg->excl -= sgn * node->num_bytes;
1231  qg->excl_cmpr -= sgn * node->num_bytes;
1232  qgroup_dirty(fs_info, qg);
1233  }
1234 
1235  list_for_each_entry(glist, &qg->groups, next_group) {
1236  ulist_add(tmp, glist->group->qgroupid,
1237  (uintptr_t)glist->group,
1238  GFP_ATOMIC);
1239  }
1240  }
1241  }
1242  ret = 0;
1243 unlock:
1244  spin_unlock(&fs_info->qgroup_lock);
1245 out:
1246  ulist_free(roots);
1247  ulist_free(tmp);
1248 
1249  return ret;
1250 }
1251 
1252 /*
1253  * called from commit_transaction. Writes all changed qgroups to disk.
1254  */
1256  struct btrfs_fs_info *fs_info)
1257 {
1258  struct btrfs_root *quota_root = fs_info->quota_root;
1259  int ret = 0;
1260 
1261  if (!quota_root)
1262  goto out;
1263 
1264  fs_info->quota_enabled = fs_info->pending_quota_state;
1265 
1266  spin_lock(&fs_info->qgroup_lock);
1267  while (!list_empty(&fs_info->dirty_qgroups)) {
1268  struct btrfs_qgroup *qgroup;
1269  qgroup = list_first_entry(&fs_info->dirty_qgroups,
1270  struct btrfs_qgroup, dirty);
1271  list_del_init(&qgroup->dirty);
1272  spin_unlock(&fs_info->qgroup_lock);
1273  ret = update_qgroup_info_item(trans, quota_root, qgroup);
1274  if (ret)
1275  fs_info->qgroup_flags |=
1277  spin_lock(&fs_info->qgroup_lock);
1278  }
1279  if (fs_info->quota_enabled)
1281  else
1283  spin_unlock(&fs_info->qgroup_lock);
1284 
1285  ret = update_qgroup_status_item(trans, fs_info, quota_root);
1286  if (ret)
1288 
1289 out:
1290 
1291  return ret;
1292 }
1293 
1294 /*
1295  * copy the acounting information between qgroups. This is necessary when a
1296  * snapshot or a subvolume is created
1297  */
1299  struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
1300  struct btrfs_qgroup_inherit *inherit)
1301 {
1302  int ret = 0;
1303  int i;
1304  u64 *i_qgroups;
1305  struct btrfs_root *quota_root = fs_info->quota_root;
1306  struct btrfs_qgroup *srcgroup;
1307  struct btrfs_qgroup *dstgroup;
1308  u32 level_size = 0;
1309 
1310  if (!fs_info->quota_enabled)
1311  return 0;
1312 
1313  if (!quota_root)
1314  return -EINVAL;
1315 
1316  /*
1317  * create a tracking group for the subvol itself
1318  */
1319  ret = add_qgroup_item(trans, quota_root, objectid);
1320  if (ret)
1321  goto out;
1322 
1323  if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
1324  ret = update_qgroup_limit_item(trans, quota_root, objectid,
1325  inherit->lim.flags,
1326  inherit->lim.max_rfer,
1327  inherit->lim.max_excl,
1328  inherit->lim.rsv_rfer,
1329  inherit->lim.rsv_excl);
1330  if (ret)
1331  goto out;
1332  }
1333 
1334  if (srcid) {
1335  struct btrfs_root *srcroot;
1336  struct btrfs_key srckey;
1337  int srcroot_level;
1338 
1339  srckey.objectid = srcid;
1340  srckey.type = BTRFS_ROOT_ITEM_KEY;
1341  srckey.offset = (u64)-1;
1342  srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
1343  if (IS_ERR(srcroot)) {
1344  ret = PTR_ERR(srcroot);
1345  goto out;
1346  }
1347 
1348  rcu_read_lock();
1349  srcroot_level = btrfs_header_level(srcroot->node);
1350  level_size = btrfs_level_size(srcroot, srcroot_level);
1351  rcu_read_unlock();
1352  }
1353 
1354  /*
1355  * add qgroup to all inherited groups
1356  */
1357  if (inherit) {
1358  i_qgroups = (u64 *)(inherit + 1);
1359  for (i = 0; i < inherit->num_qgroups; ++i) {
1360  ret = add_qgroup_relation_item(trans, quota_root,
1361  objectid, *i_qgroups);
1362  if (ret)
1363  goto out;
1364  ret = add_qgroup_relation_item(trans, quota_root,
1365  *i_qgroups, objectid);
1366  if (ret)
1367  goto out;
1368  ++i_qgroups;
1369  }
1370  }
1371 
1372 
1373  spin_lock(&fs_info->qgroup_lock);
1374 
1375  dstgroup = add_qgroup_rb(fs_info, objectid);
1376  if (IS_ERR(dstgroup)) {
1377  ret = PTR_ERR(dstgroup);
1378  goto unlock;
1379  }
1380 
1381  if (srcid) {
1382  srcgroup = find_qgroup_rb(fs_info, srcid);
1383  if (!srcgroup)
1384  goto unlock;
1385  dstgroup->rfer = srcgroup->rfer - level_size;
1386  dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
1387  srcgroup->excl = level_size;
1388  srcgroup->excl_cmpr = level_size;
1389  qgroup_dirty(fs_info, dstgroup);
1390  qgroup_dirty(fs_info, srcgroup);
1391  }
1392 
1393  if (!inherit)
1394  goto unlock;
1395 
1396  i_qgroups = (u64 *)(inherit + 1);
1397  for (i = 0; i < inherit->num_qgroups; ++i) {
1398  ret = add_relation_rb(quota_root->fs_info, objectid,
1399  *i_qgroups);
1400  if (ret)
1401  goto unlock;
1402  ++i_qgroups;
1403  }
1404 
1405  for (i = 0; i < inherit->num_ref_copies; ++i) {
1406  struct btrfs_qgroup *src;
1407  struct btrfs_qgroup *dst;
1408 
1409  src = find_qgroup_rb(fs_info, i_qgroups[0]);
1410  dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1411 
1412  if (!src || !dst) {
1413  ret = -EINVAL;
1414  goto unlock;
1415  }
1416 
1417  dst->rfer = src->rfer - level_size;
1418  dst->rfer_cmpr = src->rfer_cmpr - level_size;
1419  i_qgroups += 2;
1420  }
1421  for (i = 0; i < inherit->num_excl_copies; ++i) {
1422  struct btrfs_qgroup *src;
1423  struct btrfs_qgroup *dst;
1424 
1425  src = find_qgroup_rb(fs_info, i_qgroups[0]);
1426  dst = find_qgroup_rb(fs_info, i_qgroups[1]);
1427 
1428  if (!src || !dst) {
1429  ret = -EINVAL;
1430  goto unlock;
1431  }
1432 
1433  dst->excl = src->excl + level_size;
1434  dst->excl_cmpr = src->excl_cmpr + level_size;
1435  i_qgroups += 2;
1436  }
1437 
1438 unlock:
1439  spin_unlock(&fs_info->qgroup_lock);
1440 out:
1441  return ret;
1442 }
1443 
1444 /*
1445  * reserve some space for a qgroup and all its parents. The reservation takes
1446  * place with start_transaction or dealloc_reserve, similar to ENOSPC
1447  * accounting. If not enough space is available, EDQUOT is returned.
1448  * We assume that the requested space is new for all qgroups.
1449  */
1451 {
1452  struct btrfs_root *quota_root;
1453  struct btrfs_qgroup *qgroup;
1454  struct btrfs_fs_info *fs_info = root->fs_info;
1455  u64 ref_root = root->root_key.objectid;
1456  int ret = 0;
1457  struct ulist *ulist = NULL;
1458  struct ulist_node *unode;
1459  struct ulist_iterator uiter;
1460 
1461  if (!is_fstree(ref_root))
1462  return 0;
1463 
1464  if (num_bytes == 0)
1465  return 0;
1466 
1467  spin_lock(&fs_info->qgroup_lock);
1468  quota_root = fs_info->quota_root;
1469  if (!quota_root)
1470  goto out;
1471 
1472  qgroup = find_qgroup_rb(fs_info, ref_root);
1473  if (!qgroup)
1474  goto out;
1475 
1476  /*
1477  * in a first step, we check all affected qgroups if any limits would
1478  * be exceeded
1479  */
1480  ulist = ulist_alloc(GFP_ATOMIC);
1481  if (!ulist) {
1482  ret = -ENOMEM;
1483  goto out;
1484  }
1485  ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1486  ULIST_ITER_INIT(&uiter);
1487  while ((unode = ulist_next(ulist, &uiter))) {
1488  struct btrfs_qgroup *qg;
1489  struct btrfs_qgroup_list *glist;
1490 
1491  qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1492 
1493  if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
1494  qg->reserved + qg->rfer + num_bytes >
1495  qg->max_rfer)
1496  ret = -EDQUOT;
1497 
1498  if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
1499  qg->reserved + qg->excl + num_bytes >
1500  qg->max_excl)
1501  ret = -EDQUOT;
1502 
1503  list_for_each_entry(glist, &qg->groups, next_group) {
1504  ulist_add(ulist, glist->group->qgroupid,
1505  (uintptr_t)glist->group, GFP_ATOMIC);
1506  }
1507  }
1508  if (ret)
1509  goto out;
1510 
1511  /*
1512  * no limits exceeded, now record the reservation into all qgroups
1513  */
1514  ULIST_ITER_INIT(&uiter);
1515  while ((unode = ulist_next(ulist, &uiter))) {
1516  struct btrfs_qgroup *qg;
1517 
1518  qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1519 
1520  qg->reserved += num_bytes;
1521  }
1522 
1523 out:
1524  spin_unlock(&fs_info->qgroup_lock);
1525  ulist_free(ulist);
1526 
1527  return ret;
1528 }
1529 
1531 {
1532  struct btrfs_root *quota_root;
1533  struct btrfs_qgroup *qgroup;
1534  struct btrfs_fs_info *fs_info = root->fs_info;
1535  struct ulist *ulist = NULL;
1536  struct ulist_node *unode;
1537  struct ulist_iterator uiter;
1538  u64 ref_root = root->root_key.objectid;
1539 
1540  if (!is_fstree(ref_root))
1541  return;
1542 
1543  if (num_bytes == 0)
1544  return;
1545 
1546  spin_lock(&fs_info->qgroup_lock);
1547 
1548  quota_root = fs_info->quota_root;
1549  if (!quota_root)
1550  goto out;
1551 
1552  qgroup = find_qgroup_rb(fs_info, ref_root);
1553  if (!qgroup)
1554  goto out;
1555 
1556  ulist = ulist_alloc(GFP_ATOMIC);
1557  if (!ulist) {
1558  btrfs_std_error(fs_info, -ENOMEM);
1559  goto out;
1560  }
1561  ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
1562  ULIST_ITER_INIT(&uiter);
1563  while ((unode = ulist_next(ulist, &uiter))) {
1564  struct btrfs_qgroup *qg;
1565  struct btrfs_qgroup_list *glist;
1566 
1567  qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
1568 
1569  qg->reserved -= num_bytes;
1570 
1571  list_for_each_entry(glist, &qg->groups, next_group) {
1572  ulist_add(ulist, glist->group->qgroupid,
1573  (uintptr_t)glist->group, GFP_ATOMIC);
1574  }
1575  }
1576 
1577 out:
1578  spin_unlock(&fs_info->qgroup_lock);
1579  ulist_free(ulist);
1580 }
1581 
1583 {
1584  if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
1585  return;
1586  printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
1587  trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
1588  trans->delayed_ref_elem.seq);
1589  BUG();
1590 }