Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gc.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Adrian Hunter
20  * Artem Bityutskiy (Битюцкий Артём)
21  */
22 
23 /*
24  * This file implements garbage collection. The procedure for garbage collection
25  * is different depending on whether a LEB as an index LEB (contains index
26  * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
27  * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
28  * nodes to the journal, at which point the garbage-collected LEB is free to be
29  * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
30  * dirty in the TNC, and after the next commit, the garbage-collected LEB is
31  * to be reused. Garbage collection will cause the number of dirty index nodes
32  * to grow, however sufficient space is reserved for the index to ensure the
33  * commit will never run out of space.
34  *
35  * Notes about dead watermark. At current UBIFS implementation we assume that
36  * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
37  * and not worth garbage-collecting. The dead watermark is one min. I/O unit
38  * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
39  * Garbage Collector has to synchronize the GC head's write buffer before
40  * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
41  * actually reclaim even very small pieces of dirty space by garbage collecting
42  * enough dirty LEBs, but we do not bother doing this at this implementation.
43  *
44  * Notes about dark watermark. The results of GC work depends on how big are
45  * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
46  * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
47  * have to waste large pieces of free space at the end of LEB B, because nodes
48  * from LEB A would not fit. And the worst situation is when all nodes are of
49  * maximum size. So dark watermark is the amount of free + dirty space in LEB
50  * which are guaranteed to be reclaimable. If LEB has less space, the GC might
51  * be unable to reclaim it. So, LEBs with free + dirty greater than dark
52  * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
53  * good, and GC takes extra care when moving them.
54  */
55 
56 #include <linux/slab.h>
57 #include <linux/pagemap.h>
58 #include <linux/list_sort.h>
59 #include "ubifs.h"
60 
61 /*
62  * GC may need to move more than one LEB to make progress. The below constants
63  * define "soft" and "hard" limits on the number of LEBs the garbage collector
64  * may move.
65  */
66 #define SOFT_LEBS_LIMIT 4
67 #define HARD_LEBS_LIMIT 32
68 
81 static int switch_gc_head(struct ubifs_info *c)
82 {
83  int err, gc_lnum = c->gc_lnum;
84  struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
85 
86  ubifs_assert(gc_lnum != -1);
87  dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
88  wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
89  c->leb_size - wbuf->offs - wbuf->used);
90 
91  err = ubifs_wbuf_sync_nolock(wbuf);
92  if (err)
93  return err;
94 
95  /*
96  * The GC write-buffer was synchronized, we may safely unmap
97  * 'c->gc_lnum'.
98  */
99  err = ubifs_leb_unmap(c, gc_lnum);
100  if (err)
101  return err;
102 
103  err = ubifs_wbuf_sync_nolock(wbuf);
104  if (err)
105  return err;
106 
107  err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
108  if (err)
109  return err;
110 
111  c->gc_lnum = -1;
112  err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
113  return err;
114 }
115 
125 static int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
126 {
127  ino_t inuma, inumb;
128  struct ubifs_info *c = priv;
129  struct ubifs_scan_node *sa, *sb;
130 
131  cond_resched();
132  if (a == b)
133  return 0;
134 
135  sa = list_entry(a, struct ubifs_scan_node, list);
136  sb = list_entry(b, struct ubifs_scan_node, list);
137 
138  ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
139  ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
142 
143  inuma = key_inum(c, &sa->key);
144  inumb = key_inum(c, &sb->key);
145 
146  if (inuma == inumb) {
147  unsigned int blka = key_block(c, &sa->key);
148  unsigned int blkb = key_block(c, &sb->key);
149 
150  if (blka <= blkb)
151  return -1;
152  } else if (inuma <= inumb)
153  return -1;
154 
155  return 1;
156 }
157 
158 /*
159  * nondata_nodes_cmp - compare 2 non-data nodes.
160  * @priv: UBIFS file-system description object
161  * @a: first node
162  * @a: second node
163  *
164  * This function compares nodes @a and @b. It makes sure that inode nodes go
165  * first and sorted by length in descending order. Directory entry nodes go
166  * after inode nodes and are sorted in ascending hash valuer order.
167  */
168 static int nondata_nodes_cmp(void *priv, struct list_head *a,
169  struct list_head *b)
170 {
171  ino_t inuma, inumb;
172  struct ubifs_info *c = priv;
173  struct ubifs_scan_node *sa, *sb;
174 
175  cond_resched();
176  if (a == b)
177  return 0;
178 
179  sa = list_entry(a, struct ubifs_scan_node, list);
180  sb = list_entry(b, struct ubifs_scan_node, list);
181 
182  ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
183  key_type(c, &sb->key) != UBIFS_DATA_KEY);
185  sb->type != UBIFS_DATA_NODE);
186 
187  /* Inodes go before directory entries */
188  if (sa->type == UBIFS_INO_NODE) {
189  if (sb->type == UBIFS_INO_NODE)
190  return sb->len - sa->len;
191  return -1;
192  }
193  if (sb->type == UBIFS_INO_NODE)
194  return 1;
195 
196  ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
197  key_type(c, &sa->key) == UBIFS_XENT_KEY);
198  ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
199  key_type(c, &sb->key) == UBIFS_XENT_KEY);
201  sa->type == UBIFS_XENT_NODE);
203  sb->type == UBIFS_XENT_NODE);
204 
205  inuma = key_inum(c, &sa->key);
206  inumb = key_inum(c, &sb->key);
207 
208  if (inuma == inumb) {
209  uint32_t hasha = key_hash(c, &sa->key);
210  uint32_t hashb = key_hash(c, &sb->key);
211 
212  if (hasha <= hashb)
213  return -1;
214  } else if (inuma <= inumb)
215  return -1;
216 
217  return 1;
218 }
219 
247 static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
248  struct list_head *nondata, int *min)
249 {
250  int err;
251  struct ubifs_scan_node *snod, *tmp;
252 
253  *min = INT_MAX;
254 
255  /* Separate data nodes and non-data nodes */
256  list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
257  ubifs_assert(snod->type == UBIFS_INO_NODE ||
258  snod->type == UBIFS_DATA_NODE ||
259  snod->type == UBIFS_DENT_NODE ||
260  snod->type == UBIFS_XENT_NODE ||
261  snod->type == UBIFS_TRUN_NODE);
262 
263  if (snod->type != UBIFS_INO_NODE &&
264  snod->type != UBIFS_DATA_NODE &&
265  snod->type != UBIFS_DENT_NODE &&
266  snod->type != UBIFS_XENT_NODE) {
267  /* Probably truncation node, zap it */
268  list_del(&snod->list);
269  kfree(snod);
270  continue;
271  }
272 
273  ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
274  key_type(c, &snod->key) == UBIFS_INO_KEY ||
275  key_type(c, &snod->key) == UBIFS_DENT_KEY ||
276  key_type(c, &snod->key) == UBIFS_XENT_KEY);
277 
278  err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
279  snod->offs, 0);
280  if (err < 0)
281  return err;
282 
283  if (!err) {
284  /* The node is obsolete, remove it from the list */
285  list_del(&snod->list);
286  kfree(snod);
287  continue;
288  }
289 
290  if (snod->len < *min)
291  *min = snod->len;
292 
293  if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
294  list_move_tail(&snod->list, nondata);
295  }
296 
297  /* Sort data and non-data nodes */
298  list_sort(c, &sleb->nodes, &data_nodes_cmp);
299  list_sort(c, nondata, &nondata_nodes_cmp);
300 
301  err = dbg_check_data_nodes_order(c, &sleb->nodes);
302  if (err)
303  return err;
304  err = dbg_check_nondata_nodes_order(c, nondata);
305  if (err)
306  return err;
307  return 0;
308 }
309 
321 static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
322  struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
323 {
324  int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
325 
326  cond_resched();
327  err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
328  if (err)
329  return err;
330 
331  err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
332  snod->offs, new_lnum, new_offs,
333  snod->len);
334  list_del(&snod->list);
335  kfree(snod);
336  return err;
337 }
338 
349 static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
350 {
351  int err, min;
352  LIST_HEAD(nondata);
353  struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
354 
355  if (wbuf->lnum == -1) {
356  /*
357  * The GC journal head is not set, because it is the first GC
358  * invocation since mount.
359  */
360  err = switch_gc_head(c);
361  if (err)
362  return err;
363  }
364 
365  err = sort_nodes(c, sleb, &nondata, &min);
366  if (err)
367  goto out;
368 
369  /* Write nodes to their new location. Use the first-fit strategy */
370  while (1) {
371  int avail;
372  struct ubifs_scan_node *snod, *tmp;
373 
374  /* Move data nodes */
375  list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
376  avail = c->leb_size - wbuf->offs - wbuf->used;
377  if (snod->len > avail)
378  /*
379  * Do not skip data nodes in order to optimize
380  * bulk-read.
381  */
382  break;
383 
384  err = move_node(c, sleb, snod, wbuf);
385  if (err)
386  goto out;
387  }
388 
389  /* Move non-data nodes */
390  list_for_each_entry_safe(snod, tmp, &nondata, list) {
391  avail = c->leb_size - wbuf->offs - wbuf->used;
392  if (avail < min)
393  break;
394 
395  if (snod->len > avail) {
396  /*
397  * Keep going only if this is an inode with
398  * some data. Otherwise stop and switch the GC
399  * head. IOW, we assume that data-less inode
400  * nodes and direntry nodes are roughly of the
401  * same size.
402  */
403  if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
404  snod->len == UBIFS_INO_NODE_SZ)
405  break;
406  continue;
407  }
408 
409  err = move_node(c, sleb, snod, wbuf);
410  if (err)
411  goto out;
412  }
413 
414  if (list_empty(&sleb->nodes) && list_empty(&nondata))
415  break;
416 
417  /*
418  * Waste the rest of the space in the LEB and switch to the
419  * next LEB.
420  */
421  err = switch_gc_head(c);
422  if (err)
423  goto out;
424  }
425 
426  return 0;
427 
428 out:
429  list_splice_tail(&nondata, &sleb->nodes);
430  return err;
431 }
432 
446 static int gc_sync_wbufs(struct ubifs_info *c)
447 {
448  int err, i;
449 
450  for (i = 0; i < c->jhead_cnt; i++) {
451  if (i == GCHD)
452  continue;
453  err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
454  if (err)
455  return err;
456  }
457  return 0;
458 }
459 
470 {
471  struct ubifs_scan_leb *sleb;
472  struct ubifs_scan_node *snod;
473  struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
474  int err = 0, lnum = lp->lnum;
475 
476  ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
477  c->need_recovery);
478  ubifs_assert(c->gc_lnum != lnum);
479  ubifs_assert(wbuf->lnum != lnum);
480 
481  if (lp->free + lp->dirty == c->leb_size) {
482  /* Special case - a free LEB */
483  dbg_gc("LEB %d is free, return it", lp->lnum);
484  ubifs_assert(!(lp->flags & LPROPS_INDEX));
485 
486  if (lp->free != c->leb_size) {
487  /*
488  * Write buffers must be sync'd before unmapping
489  * freeable LEBs, because one of them may contain data
490  * which obsoletes something in 'lp->pnum'.
491  */
492  err = gc_sync_wbufs(c);
493  if (err)
494  return err;
495  err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
496  0, 0, 0, 0);
497  if (err)
498  return err;
499  }
500  err = ubifs_leb_unmap(c, lp->lnum);
501  if (err)
502  return err;
503 
504  if (c->gc_lnum == -1) {
505  c->gc_lnum = lnum;
506  return LEB_RETAINED;
507  }
508 
509  return LEB_FREED;
510  }
511 
512  /*
513  * We scan the entire LEB even though we only really need to scan up to
514  * (c->leb_size - lp->free).
515  */
516  sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
517  if (IS_ERR(sleb))
518  return PTR_ERR(sleb);
519 
520  ubifs_assert(!list_empty(&sleb->nodes));
521  snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
522 
523  if (snod->type == UBIFS_IDX_NODE) {
524  struct ubifs_gced_idx_leb *idx_gc;
525 
526  dbg_gc("indexing LEB %d (free %d, dirty %d)",
527  lnum, lp->free, lp->dirty);
528  list_for_each_entry(snod, &sleb->nodes, list) {
529  struct ubifs_idx_node *idx = snod->node;
530  int level = le16_to_cpu(idx->level);
531 
532  ubifs_assert(snod->type == UBIFS_IDX_NODE);
533  key_read(c, ubifs_idx_key(c, idx), &snod->key);
534  err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
535  snod->offs);
536  if (err)
537  goto out;
538  }
539 
540  idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
541  if (!idx_gc) {
542  err = -ENOMEM;
543  goto out;
544  }
545 
546  idx_gc->lnum = lnum;
547  idx_gc->unmap = 0;
548  list_add(&idx_gc->list, &c->idx_gc);
549 
550  /*
551  * Don't release the LEB until after the next commit, because
552  * it may contain data which is needed for recovery. So
553  * although we freed this LEB, it will become usable only after
554  * the commit.
555  */
556  err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
557  LPROPS_INDEX, 1);
558  if (err)
559  goto out;
560  err = LEB_FREED_IDX;
561  } else {
562  dbg_gc("data LEB %d (free %d, dirty %d)",
563  lnum, lp->free, lp->dirty);
564 
565  err = move_nodes(c, sleb);
566  if (err)
567  goto out_inc_seq;
568 
569  err = gc_sync_wbufs(c);
570  if (err)
571  goto out_inc_seq;
572 
573  err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
574  if (err)
575  goto out_inc_seq;
576 
577  /* Allow for races with TNC */
578  c->gced_lnum = lnum;
579  smp_wmb();
580  c->gc_seq += 1;
581  smp_wmb();
582 
583  if (c->gc_lnum == -1) {
584  c->gc_lnum = lnum;
585  err = LEB_RETAINED;
586  } else {
587  err = ubifs_wbuf_sync_nolock(wbuf);
588  if (err)
589  goto out;
590 
591  err = ubifs_leb_unmap(c, lnum);
592  if (err)
593  goto out;
594 
595  err = LEB_FREED;
596  }
597  }
598 
599 out:
600  ubifs_scan_destroy(sleb);
601  return err;
602 
603 out_inc_seq:
604  /* We may have moved at least some nodes so allow for races with TNC */
605  c->gced_lnum = lnum;
606  smp_wmb();
607  c->gc_seq += 1;
608  smp_wmb();
609  goto out;
610 }
611 
648 int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
649 {
650  int i, err, ret, min_space = c->dead_wm;
651  struct ubifs_lprops lp;
652  struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
653 
655  ubifs_assert(!c->ro_media && !c->ro_mount);
656 
657  if (ubifs_gc_should_commit(c))
658  return -EAGAIN;
659 
660  mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
661 
662  if (c->ro_error) {
663  ret = -EROFS;
664  goto out_unlock;
665  }
666 
667  /* We expect the write-buffer to be empty on entry */
668  ubifs_assert(!wbuf->used);
669 
670  for (i = 0; ; i++) {
671  int space_before = c->leb_size - wbuf->offs - wbuf->used;
672  int space_after;
673 
674  cond_resched();
675 
676  /* Give the commit an opportunity to run */
677  if (ubifs_gc_should_commit(c)) {
678  ret = -EAGAIN;
679  break;
680  }
681 
682  if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
683  /*
684  * We've done enough iterations. Indexing LEBs were
685  * moved and will be available after the commit.
686  */
687  dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
689  ret = -EAGAIN;
690  break;
691  }
692 
693  if (i > HARD_LEBS_LIMIT) {
694  /*
695  * We've moved too many LEBs and have not made
696  * progress, give up.
697  */
698  dbg_gc("hard limit, -ENOSPC");
699  ret = -ENOSPC;
700  break;
701  }
702 
703  /*
704  * Empty and freeable LEBs can turn up while we waited for
705  * the wbuf lock, or while we have been running GC. In that
706  * case, we should just return one of those instead of
707  * continuing to GC dirty LEBs. Hence we request
708  * 'ubifs_find_dirty_leb()' to return an empty LEB if it can.
709  */
710  ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
711  if (ret) {
712  if (ret == -ENOSPC)
713  dbg_gc("no more dirty LEBs");
714  break;
715  }
716 
717  dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
718  lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
719  min_space);
720 
721  space_before = c->leb_size - wbuf->offs - wbuf->used;
722  if (wbuf->lnum == -1)
723  space_before = 0;
724 
725  ret = ubifs_garbage_collect_leb(c, &lp);
726  if (ret < 0) {
727  if (ret == -EAGAIN) {
728  /*
729  * This is not error, so we have to return the
730  * LEB to lprops. But if 'ubifs_return_leb()'
731  * fails, its failure code is propagated to the
732  * caller instead of the original '-EAGAIN'.
733  */
734  err = ubifs_return_leb(c, lp.lnum);
735  if (err)
736  ret = err;
737  break;
738  }
739  goto out;
740  }
741 
742  if (ret == LEB_FREED) {
743  /* An LEB has been freed and is ready for use */
744  dbg_gc("LEB %d freed, return", lp.lnum);
745  ret = lp.lnum;
746  break;
747  }
748 
749  if (ret == LEB_FREED_IDX) {
750  /*
751  * This was an indexing LEB and it cannot be
752  * immediately used. And instead of requesting the
753  * commit straight away, we try to garbage collect some
754  * more.
755  */
756  dbg_gc("indexing LEB %d freed, continue", lp.lnum);
757  continue;
758  }
759 
760  ubifs_assert(ret == LEB_RETAINED);
761  space_after = c->leb_size - wbuf->offs - wbuf->used;
762  dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
763  space_after - space_before);
764 
765  if (space_after > space_before) {
766  /* GC makes progress, keep working */
767  min_space >>= 1;
768  if (min_space < c->dead_wm)
769  min_space = c->dead_wm;
770  continue;
771  }
772 
773  dbg_gc("did not make progress");
774 
775  /*
776  * GC moved an LEB bud have not done any progress. This means
777  * that the previous GC head LEB contained too few free space
778  * and the LEB which was GC'ed contained only large nodes which
779  * did not fit that space.
780  *
781  * We can do 2 things:
782  * 1. pick another LEB in a hope it'll contain a small node
783  * which will fit the space we have at the end of current GC
784  * head LEB, but there is no guarantee, so we try this out
785  * unless we have already been working for too long;
786  * 2. request an LEB with more dirty space, which will force
787  * 'ubifs_find_dirty_leb()' to start scanning the lprops
788  * table, instead of just picking one from the heap
789  * (previously it already picked the dirtiest LEB).
790  */
791  if (i < SOFT_LEBS_LIMIT) {
792  dbg_gc("try again");
793  continue;
794  }
795 
796  min_space <<= 1;
797  if (min_space > c->dark_wm)
798  min_space = c->dark_wm;
799  dbg_gc("set min. space to %d", min_space);
800  }
801 
802  if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
803  dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
805  ret = -EAGAIN;
806  }
807 
808  err = ubifs_wbuf_sync_nolock(wbuf);
809  if (!err)
810  err = ubifs_leb_unmap(c, c->gc_lnum);
811  if (err) {
812  ret = err;
813  goto out;
814  }
815 out_unlock:
816  mutex_unlock(&wbuf->io_mutex);
817  return ret;
818 
819 out:
820  ubifs_assert(ret < 0);
821  ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
823  ubifs_ro_mode(c, ret);
824  mutex_unlock(&wbuf->io_mutex);
825  ubifs_return_leb(c, lp.lnum);
826  return ret;
827 }
828 
841 {
842  struct ubifs_gced_idx_leb *idx_gc;
843  const struct ubifs_lprops *lp;
844  int err = 0, flags;
845 
846  ubifs_get_lprops(c);
847 
848  /*
849  * Unmap (non-index) freeable LEBs. Note that recovery requires that all
850  * wbufs are sync'd before this, which is done in 'do_commit()'.
851  */
852  while (1) {
853  lp = ubifs_fast_find_freeable(c);
854  if (IS_ERR(lp)) {
855  err = PTR_ERR(lp);
856  goto out;
857  }
858  if (!lp)
859  break;
860  ubifs_assert(!(lp->flags & LPROPS_TAKEN));
861  ubifs_assert(!(lp->flags & LPROPS_INDEX));
862  err = ubifs_leb_unmap(c, lp->lnum);
863  if (err)
864  goto out;
865  lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
866  if (IS_ERR(lp)) {
867  err = PTR_ERR(lp);
868  goto out;
869  }
870  ubifs_assert(!(lp->flags & LPROPS_TAKEN));
871  ubifs_assert(!(lp->flags & LPROPS_INDEX));
872  }
873 
874  /* Mark GC'd index LEBs OK to unmap after this commit finishes */
875  list_for_each_entry(idx_gc, &c->idx_gc, list)
876  idx_gc->unmap = 1;
877 
878  /* Record index freeable LEBs for unmapping after commit */
879  while (1) {
880  lp = ubifs_fast_find_frdi_idx(c);
881  if (IS_ERR(lp)) {
882  err = PTR_ERR(lp);
883  goto out;
884  }
885  if (!lp)
886  break;
887  idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
888  if (!idx_gc) {
889  err = -ENOMEM;
890  goto out;
891  }
892  ubifs_assert(!(lp->flags & LPROPS_TAKEN));
894  /* Don't release the LEB until after the next commit */
895  flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
896  lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
897  if (IS_ERR(lp)) {
898  err = PTR_ERR(lp);
899  kfree(idx_gc);
900  goto out;
901  }
903  ubifs_assert(!(lp->flags & LPROPS_INDEX));
904  idx_gc->lnum = lp->lnum;
905  idx_gc->unmap = 1;
906  list_add(&idx_gc->list, &c->idx_gc);
907  }
908 out:
909  ubifs_release_lprops(c);
910  return err;
911 }
912 
920 {
921  struct ubifs_gced_idx_leb *idx_gc, *tmp;
922  struct ubifs_wbuf *wbuf;
923  int err = 0;
924 
925  wbuf = &c->jheads[GCHD].wbuf;
926  mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
927  list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
928  if (idx_gc->unmap) {
929  dbg_gc("LEB %d", idx_gc->lnum);
930  err = ubifs_leb_unmap(c, idx_gc->lnum);
931  if (err)
932  goto out;
933  err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
934  LPROPS_NC, 0, LPROPS_TAKEN, -1);
935  if (err)
936  goto out;
937  list_del(&idx_gc->list);
938  kfree(idx_gc);
939  }
940 out:
941  mutex_unlock(&wbuf->io_mutex);
942  return err;
943 }
944 
954 {
955  while (!list_empty(&c->idx_gc)) {
956  struct ubifs_gced_idx_leb *idx_gc;
957 
958  idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
959  list);
960  c->idx_gc_cnt -= 1;
961  list_del(&idx_gc->list);
962  kfree(idx_gc);
963  }
964 }
965 
973 {
974  struct ubifs_gced_idx_leb *idx_gc;
975  int lnum;
976 
977  if (list_empty(&c->idx_gc))
978  return -ENOSPC;
979  idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
980  lnum = idx_gc->lnum;
981  /* c->idx_gc_cnt is updated by the caller when lprops are updated */
982  list_del(&idx_gc->list);
983  kfree(idx_gc);
984  return lnum;
985 }