Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
replay.c
Go to the documentation of this file.
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Adrian Hunter
20  * Artem Bityutskiy (Битюцкий Артём)
21  */
22 
23 /*
24  * This file contains journal replay code. It runs when the file-system is being
25  * mounted and requires no locking.
26  *
27  * The larger is the journal, the longer it takes to scan it, so the longer it
28  * takes to mount UBIFS. This is why the journal has limited size which may be
29  * changed depending on the system requirements. But a larger journal gives
30  * faster I/O speed because it writes the index less frequently. So this is a
31  * trade-off. Also, the journal is indexed by the in-memory index (TNC), so the
32  * larger is the journal, the more memory its index may consume.
33  */
34 
35 #include "ubifs.h"
36 #include <linux/list_sort.h>
37 
55 struct replay_entry {
56  int lnum;
57  int offs;
58  int len;
59  unsigned int deletion:1;
60  unsigned long long sqnum;
61  struct list_head list;
62  union ubifs_key key;
63  union {
64  struct qstr nm;
65  struct {
66  loff_t old_size;
67  loff_t new_size;
68  };
69  };
70 };
71 
80 struct bud_entry {
81  struct list_head list;
82  struct ubifs_bud *bud;
83  unsigned long long sqnum;
84  int free;
85  int dirty;
86 };
87 
97 static int set_bud_lprops(struct ubifs_info *c, struct bud_entry *b)
98 {
99  const struct ubifs_lprops *lp;
100  int err = 0, dirty;
101 
102  ubifs_get_lprops(c);
103 
104  lp = ubifs_lpt_lookup_dirty(c, b->bud->lnum);
105  if (IS_ERR(lp)) {
106  err = PTR_ERR(lp);
107  goto out;
108  }
109 
110  dirty = lp->dirty;
111  if (b->bud->start == 0 && (lp->free != c->leb_size || lp->dirty != 0)) {
112  /*
113  * The LEB was added to the journal with a starting offset of
114  * zero which means the LEB must have been empty. The LEB
115  * property values should be @lp->free == @c->leb_size and
116  * @lp->dirty == 0, but that is not the case. The reason is that
117  * the LEB had been garbage collected before it became the bud,
118  * and there was not commit inbetween. The garbage collector
119  * resets the free and dirty space without recording it
120  * anywhere except lprops, so if there was no commit then
121  * lprops does not have that information.
122  *
123  * We do not need to adjust free space because the scan has told
124  * us the exact value which is recorded in the replay entry as
125  * @b->free.
126  *
127  * However we do need to subtract from the dirty space the
128  * amount of space that the garbage collector reclaimed, which
129  * is the whole LEB minus the amount of space that was free.
130  */
131  dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
132  lp->free, lp->dirty);
133  dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", b->bud->lnum,
134  lp->free, lp->dirty);
135  dirty -= c->leb_size - lp->free;
136  /*
137  * If the replay order was perfect the dirty space would now be
138  * zero. The order is not perfect because the journal heads
139  * race with each other. This is not a problem but is does mean
140  * that the dirty space may temporarily exceed c->leb_size
141  * during the replay.
142  */
143  if (dirty != 0)
144  dbg_mnt("LEB %d lp: %d free %d dirty replay: %d free %d dirty",
145  b->bud->lnum, lp->free, lp->dirty, b->free,
146  b->dirty);
147  }
148  lp = ubifs_change_lp(c, lp, b->free, dirty + b->dirty,
149  lp->flags | LPROPS_TAKEN, 0);
150  if (IS_ERR(lp)) {
151  err = PTR_ERR(lp);
152  goto out;
153  }
154 
155  /* Make sure the journal head points to the latest bud */
156  err = ubifs_wbuf_seek_nolock(&c->jheads[b->bud->jhead].wbuf,
157  b->bud->lnum, c->leb_size - b->free);
158 
159 out:
160  ubifs_release_lprops(c);
161  return err;
162 }
163 
171 static int set_buds_lprops(struct ubifs_info *c)
172 {
173  struct bud_entry *b;
174  int err;
175 
177  err = set_bud_lprops(c, b);
178  if (err)
179  return err;
180  }
181 
182  return 0;
183 }
184 
190 static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
191 {
192  unsigned min_blk, max_blk;
193  union ubifs_key min_key, max_key;
194  ino_t ino;
195 
196  min_blk = r->new_size / UBIFS_BLOCK_SIZE;
197  if (r->new_size & (UBIFS_BLOCK_SIZE - 1))
198  min_blk += 1;
199 
200  max_blk = r->old_size / UBIFS_BLOCK_SIZE;
201  if ((r->old_size & (UBIFS_BLOCK_SIZE - 1)) == 0)
202  max_blk -= 1;
203 
204  ino = key_inum(c, &r->key);
205 
206  data_key_init(c, &min_key, ino, min_blk);
207  data_key_init(c, &max_key, ino, max_blk);
208 
209  return ubifs_tnc_remove_range(c, &min_key, &max_key);
210 }
211 
219 static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
220 {
221  int err;
222 
223  dbg_mntk(&r->key, "LEB %d:%d len %d deletion %d sqnum %llu key ",
224  r->lnum, r->offs, r->len, r->deletion, r->sqnum);
225 
226  /* Set c->replay_sqnum to help deal with dangling branches. */
227  c->replay_sqnum = r->sqnum;
228 
229  if (is_hash_key(c, &r->key)) {
230  if (r->deletion)
231  err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
232  else
233  err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
234  r->len, &r->nm);
235  } else {
236  if (r->deletion)
237  switch (key_type(c, &r->key)) {
238  case UBIFS_INO_KEY:
239  {
240  ino_t inum = key_inum(c, &r->key);
241 
242  err = ubifs_tnc_remove_ino(c, inum);
243  break;
244  }
245  case UBIFS_TRUN_KEY:
246  err = trun_remove_range(c, r);
247  break;
248  default:
249  err = ubifs_tnc_remove(c, &r->key);
250  break;
251  }
252  else
253  err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs,
254  r->len);
255  if (err)
256  return err;
257 
258  if (c->need_recovery)
259  err = ubifs_recover_size_accum(c, &r->key, r->deletion,
260  r->new_size);
261  }
262 
263  return err;
264 }
265 
276 static int replay_entries_cmp(void *priv, struct list_head *a,
277  struct list_head *b)
278 {
279  struct replay_entry *ra, *rb;
280 
281  cond_resched();
282  if (a == b)
283  return 0;
284 
285  ra = list_entry(a, struct replay_entry, list);
286  rb = list_entry(b, struct replay_entry, list);
287  ubifs_assert(ra->sqnum != rb->sqnum);
288  if (ra->sqnum > rb->sqnum)
289  return 1;
290  return -1;
291 }
292 
300 static int apply_replay_list(struct ubifs_info *c)
301 {
302  struct replay_entry *r;
303  int err;
304 
305  list_sort(c, &c->replay_list, &replay_entries_cmp);
306 
308  cond_resched();
309 
310  err = apply_replay_entry(c, r);
311  if (err)
312  return err;
313  }
314 
315  return 0;
316 }
317 
324 static void destroy_replay_list(struct ubifs_info *c)
325 {
326  struct replay_entry *r, *tmp;
327 
329  if (is_hash_key(c, &r->key))
330  kfree(r->nm.name);
331  list_del(&r->list);
332  kfree(r);
333  }
334 }
335 
356 static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
357  union ubifs_key *key, unsigned long long sqnum,
358  int deletion, int *used, loff_t old_size,
359  loff_t new_size)
360 {
361  struct replay_entry *r;
362 
363  dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
364 
365  if (key_inum(c, key) >= c->highest_inum)
366  c->highest_inum = key_inum(c, key);
367 
368  r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
369  if (!r)
370  return -ENOMEM;
371 
372  if (!deletion)
373  *used += ALIGN(len, 8);
374  r->lnum = lnum;
375  r->offs = offs;
376  r->len = len;
377  r->deletion = !!deletion;
378  r->sqnum = sqnum;
379  key_copy(c, key, &r->key);
380  r->old_size = old_size;
381  r->new_size = new_size;
382 
383  list_add_tail(&r->list, &c->replay_list);
384  return 0;
385 }
386 
404 static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
405  union ubifs_key *key, const char *name, int nlen,
406  unsigned long long sqnum, int deletion, int *used)
407 {
408  struct replay_entry *r;
409  char *nbuf;
410 
411  dbg_mntk(key, "add LEB %d:%d, key ", lnum, offs);
412  if (key_inum(c, key) >= c->highest_inum)
413  c->highest_inum = key_inum(c, key);
414 
415  r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
416  if (!r)
417  return -ENOMEM;
418 
419  nbuf = kmalloc(nlen + 1, GFP_KERNEL);
420  if (!nbuf) {
421  kfree(r);
422  return -ENOMEM;
423  }
424 
425  if (!deletion)
426  *used += ALIGN(len, 8);
427  r->lnum = lnum;
428  r->offs = offs;
429  r->len = len;
430  r->deletion = !!deletion;
431  r->sqnum = sqnum;
432  key_copy(c, key, &r->key);
433  r->nm.len = nlen;
434  memcpy(nbuf, name, nlen);
435  nbuf[nlen] = '\0';
436  r->nm.name = nbuf;
437 
438  list_add_tail(&r->list, &c->replay_list);
439  return 0;
440 }
441 
451  const struct ubifs_dent_node *dent)
452 {
453  int key_type = key_type_flash(c, dent->key);
454  int nlen = le16_to_cpu(dent->nlen);
455 
456  if (le32_to_cpu(dent->ch.len) != nlen + UBIFS_DENT_NODE_SZ + 1 ||
457  dent->type >= UBIFS_ITYPES_CNT ||
458  nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
459  strnlen(dent->name, nlen) != nlen ||
460  le64_to_cpu(dent->inum) > MAX_INUM) {
461  ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ?
462  "directory entry" : "extended attribute entry");
463  return -EINVAL;
464  }
465 
466  if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
467  ubifs_err("bad key type %d", key_type);
468  return -EINVAL;
469  }
470 
471  return 0;
472 }
473 
484 static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
485 {
486  struct ubifs_jhead *jh = &c->jheads[bud->jhead];
487  struct ubifs_bud *next;
488  uint32_t data;
489  int err;
490 
491  if (list_is_last(&bud->list, &jh->buds_list))
492  return 1;
493 
494  /*
495  * The following is a quirk to make sure we work correctly with UBIFS
496  * images used with older UBIFS.
497  *
498  * Normally, the last bud will be the last in the journal head's list
499  * of bud. However, there is one exception if the UBIFS image belongs
500  * to older UBIFS. This is fairly unlikely: one would need to use old
501  * UBIFS, then have a power cut exactly at the right point, and then
502  * try to mount this image with new UBIFS.
503  *
504  * The exception is: it is possible to have 2 buds A and B, A goes
505  * before B, and B is the last, bud B is contains no data, and bud A is
506  * corrupted at the end. The reason is that in older versions when the
507  * journal code switched the next bud (from A to B), it first added a
508  * log reference node for the new bud (B), and only after this it
509  * synchronized the write-buffer of current bud (A). But later this was
510  * changed and UBIFS started to always synchronize the write-buffer of
511  * the bud (A) before writing the log reference for the new bud (B).
512  *
513  * But because older UBIFS always synchronized A's write-buffer before
514  * writing to B, we can recognize this exceptional situation but
515  * checking the contents of bud B - if it is empty, then A can be
516  * treated as the last and we can recover it.
517  *
518  * TODO: remove this piece of code in a couple of years (today it is
519  * 16.05.2011).
520  */
521  next = list_entry(bud->list.next, struct ubifs_bud, list);
522  if (!list_is_last(&next->list, &jh->buds_list))
523  return 0;
524 
525  err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1);
526  if (err)
527  return 0;
528 
529  return data == 0xFFFFFFFF;
530 }
531 
541 static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
542 {
543  int is_last = is_last_bud(c, b->bud);
544  int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start;
545  struct ubifs_scan_leb *sleb;
546  struct ubifs_scan_node *snod;
547 
548  dbg_mnt("replay bud LEB %d, head %d, offs %d, is_last %d",
549  lnum, b->bud->jhead, offs, is_last);
550 
551  if (c->need_recovery && is_last)
552  /*
553  * Recover only last LEBs in the journal heads, because power
554  * cuts may cause corruptions only in these LEBs, because only
555  * these LEBs could possibly be written to at the power cut
556  * time.
557  */
558  sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
559  else
560  sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
561  if (IS_ERR(sleb))
562  return PTR_ERR(sleb);
563 
564  /*
565  * The bud does not have to start from offset zero - the beginning of
566  * the 'lnum' LEB may contain previously committed data. One of the
567  * things we have to do in replay is to correctly update lprops with
568  * newer information about this LEB.
569  *
570  * At this point lprops thinks that this LEB has 'c->leb_size - offs'
571  * bytes of free space because it only contain information about
572  * committed data.
573  *
574  * But we know that real amount of free space is 'c->leb_size -
575  * sleb->endpt', and the space in the 'lnum' LEB between 'offs' and
576  * 'sleb->endpt' is used by bud data. We have to correctly calculate
577  * how much of these data are dirty and update lprops with this
578  * information.
579  *
580  * The dirt in that LEB region is comprised of padding nodes, deletion
581  * nodes, truncation nodes and nodes which are obsoleted by subsequent
582  * nodes in this LEB. So instead of calculating clean space, we
583  * calculate used space ('used' variable).
584  */
585 
586  list_for_each_entry(snod, &sleb->nodes, list) {
587  int deletion = 0;
588 
589  cond_resched();
590 
591  if (snod->sqnum >= SQNUM_WATERMARK) {
592  ubifs_err("file system's life ended");
593  goto out_dump;
594  }
595 
596  if (snod->sqnum > c->max_sqnum)
597  c->max_sqnum = snod->sqnum;
598 
599  switch (snod->type) {
600  case UBIFS_INO_NODE:
601  {
602  struct ubifs_ino_node *ino = snod->node;
603  loff_t new_size = le64_to_cpu(ino->size);
604 
605  if (le32_to_cpu(ino->nlink) == 0)
606  deletion = 1;
607  err = insert_node(c, lnum, snod->offs, snod->len,
608  &snod->key, snod->sqnum, deletion,
609  &used, 0, new_size);
610  break;
611  }
612  case UBIFS_DATA_NODE:
613  {
614  struct ubifs_data_node *dn = snod->node;
615  loff_t new_size = le32_to_cpu(dn->size) +
616  key_block(c, &snod->key) *
618 
619  err = insert_node(c, lnum, snod->offs, snod->len,
620  &snod->key, snod->sqnum, deletion,
621  &used, 0, new_size);
622  break;
623  }
624  case UBIFS_DENT_NODE:
625  case UBIFS_XENT_NODE:
626  {
627  struct ubifs_dent_node *dent = snod->node;
628 
629  err = ubifs_validate_entry(c, dent);
630  if (err)
631  goto out_dump;
632 
633  err = insert_dent(c, lnum, snod->offs, snod->len,
634  &snod->key, dent->name,
635  le16_to_cpu(dent->nlen), snod->sqnum,
636  !le64_to_cpu(dent->inum), &used);
637  break;
638  }
639  case UBIFS_TRUN_NODE:
640  {
641  struct ubifs_trun_node *trun = snod->node;
642  loff_t old_size = le64_to_cpu(trun->old_size);
643  loff_t new_size = le64_to_cpu(trun->new_size);
644  union ubifs_key key;
645 
646  /* Validate truncation node */
647  if (old_size < 0 || old_size > c->max_inode_sz ||
648  new_size < 0 || new_size > c->max_inode_sz ||
649  old_size <= new_size) {
650  ubifs_err("bad truncation node");
651  goto out_dump;
652  }
653 
654  /*
655  * Create a fake truncation key just to use the same
656  * functions which expect nodes to have keys.
657  */
658  trun_key_init(c, &key, le32_to_cpu(trun->inum));
659  err = insert_node(c, lnum, snod->offs, snod->len,
660  &key, snod->sqnum, 1, &used,
661  old_size, new_size);
662  break;
663  }
664  default:
665  ubifs_err("unexpected node type %d in bud LEB %d:%d",
666  snod->type, lnum, snod->offs);
667  err = -EINVAL;
668  goto out_dump;
669  }
670  if (err)
671  goto out;
672  }
673 
674  ubifs_assert(ubifs_search_bud(c, lnum));
675  ubifs_assert(sleb->endpt - offs >= used);
676  ubifs_assert(sleb->endpt % c->min_io_size == 0);
677 
678  b->dirty = sleb->endpt - offs - used;
679  b->free = c->leb_size - sleb->endpt;
680  dbg_mnt("bud LEB %d replied: dirty %d, free %d",
681  lnum, b->dirty, b->free);
682 
683 out:
684  ubifs_scan_destroy(sleb);
685  return err;
686 
687 out_dump:
688  ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
689  ubifs_dump_node(c, snod->node);
690  ubifs_scan_destroy(sleb);
691  return -EINVAL;
692 }
693 
701 static int replay_buds(struct ubifs_info *c)
702 {
703  struct bud_entry *b;
704  int err;
705  unsigned long long prev_sqnum = 0;
706 
708  err = replay_bud(c, b);
709  if (err)
710  return err;
711 
712  ubifs_assert(b->sqnum > prev_sqnum);
713  prev_sqnum = b->sqnum;
714  }
715 
716  return 0;
717 }
718 
723 static void destroy_bud_list(struct ubifs_info *c)
724 {
725  struct bud_entry *b;
726 
727  while (!list_empty(&c->replay_buds)) {
728  b = list_entry(c->replay_buds.next, struct bud_entry, list);
729  list_del(&b->list);
730  kfree(b);
731  }
732 }
733 
745 static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
746  unsigned long long sqnum)
747 {
748  struct ubifs_bud *bud;
749  struct bud_entry *b;
750 
751  dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead);
752 
753  bud = kmalloc(sizeof(struct ubifs_bud), GFP_KERNEL);
754  if (!bud)
755  return -ENOMEM;
756 
757  b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL);
758  if (!b) {
759  kfree(bud);
760  return -ENOMEM;
761  }
762 
763  bud->lnum = lnum;
764  bud->start = offs;
765  bud->jhead = jhead;
766  ubifs_add_bud(c, bud);
767 
768  b->bud = bud;
769  b->sqnum = sqnum;
770  list_add_tail(&b->list, &c->replay_buds);
771 
772  return 0;
773 }
774 
786 static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
787 {
788  struct ubifs_bud *bud;
789  int lnum = le32_to_cpu(ref->lnum);
790  unsigned int offs = le32_to_cpu(ref->offs);
791  unsigned int jhead = le32_to_cpu(ref->jhead);
792 
793  /*
794  * ref->offs may point to the end of LEB when the journal head points
795  * to the end of LEB and we write reference node for it during commit.
796  * So this is why we require 'offs > c->leb_size'.
797  */
798  if (jhead >= c->jhead_cnt || lnum >= c->leb_cnt ||
799  lnum < c->main_first || offs > c->leb_size ||
800  offs & (c->min_io_size - 1))
801  return -EINVAL;
802 
803  /* Make sure we have not already looked at this bud */
804  bud = ubifs_search_bud(c, lnum);
805  if (bud) {
806  if (bud->jhead == jhead && bud->start <= offs)
807  return 1;
808  ubifs_err("bud at LEB %d:%d was already referred", lnum, offs);
809  return -EINVAL;
810  }
811 
812  return 0;
813 }
814 
826 static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
827 {
828  int err;
829  struct ubifs_scan_leb *sleb;
830  struct ubifs_scan_node *snod;
831  const struct ubifs_cs_node *node;
832 
833  dbg_mnt("replay log LEB %d:%d", lnum, offs);
834  sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery);
835  if (IS_ERR(sleb)) {
836  if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
837  return PTR_ERR(sleb);
838  /*
839  * Note, the below function will recover this log LEB only if
840  * it is the last, because unclean reboots can possibly corrupt
841  * only the tail of the log.
842  */
843  sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
844  if (IS_ERR(sleb))
845  return PTR_ERR(sleb);
846  }
847 
848  if (sleb->nodes_cnt == 0) {
849  err = 1;
850  goto out;
851  }
852 
853  node = sleb->buf;
854  snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
855  if (c->cs_sqnum == 0) {
856  /*
857  * This is the first log LEB we are looking at, make sure that
858  * the first node is a commit start node. Also record its
859  * sequence number so that UBIFS can determine where the log
860  * ends, because all nodes which were have higher sequence
861  * numbers.
862  */
863  if (snod->type != UBIFS_CS_NODE) {
864  ubifs_err("first log node at LEB %d:%d is not CS node",
865  lnum, offs);
866  goto out_dump;
867  }
868  if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
869  ubifs_err("first CS node at LEB %d:%d has wrong commit number %llu expected %llu",
870  lnum, offs,
871  (unsigned long long)le64_to_cpu(node->cmt_no),
872  c->cmt_no);
873  goto out_dump;
874  }
875 
876  c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
877  dbg_mnt("commit start sqnum %llu", c->cs_sqnum);
878  }
879 
880  if (snod->sqnum < c->cs_sqnum) {
881  /*
882  * This means that we reached end of log and now
883  * look to the older log data, which was already
884  * committed but the eraseblock was not erased (UBIFS
885  * only un-maps it). So this basically means we have to
886  * exit with "end of log" code.
887  */
888  err = 1;
889  goto out;
890  }
891 
892  /* Make sure the first node sits at offset zero of the LEB */
893  if (snod->offs != 0) {
894  ubifs_err("first node is not at zero offset");
895  goto out_dump;
896  }
897 
898  list_for_each_entry(snod, &sleb->nodes, list) {
899  cond_resched();
900 
901  if (snod->sqnum >= SQNUM_WATERMARK) {
902  ubifs_err("file system's life ended");
903  goto out_dump;
904  }
905 
906  if (snod->sqnum < c->cs_sqnum) {
907  ubifs_err("bad sqnum %llu, commit sqnum %llu",
908  snod->sqnum, c->cs_sqnum);
909  goto out_dump;
910  }
911 
912  if (snod->sqnum > c->max_sqnum)
913  c->max_sqnum = snod->sqnum;
914 
915  switch (snod->type) {
916  case UBIFS_REF_NODE: {
917  const struct ubifs_ref_node *ref = snod->node;
918 
919  err = validate_ref(c, ref);
920  if (err == 1)
921  break; /* Already have this bud */
922  if (err)
923  goto out_dump;
924 
925  err = add_replay_bud(c, le32_to_cpu(ref->lnum),
926  le32_to_cpu(ref->offs),
927  le32_to_cpu(ref->jhead),
928  snod->sqnum);
929  if (err)
930  goto out;
931 
932  break;
933  }
934  case UBIFS_CS_NODE:
935  /* Make sure it sits at the beginning of LEB */
936  if (snod->offs != 0) {
937  ubifs_err("unexpected node in log");
938  goto out_dump;
939  }
940  break;
941  default:
942  ubifs_err("unexpected node in log");
943  goto out_dump;
944  }
945  }
946 
947  if (sleb->endpt || c->lhead_offs >= c->leb_size) {
948  c->lhead_lnum = lnum;
949  c->lhead_offs = sleb->endpt;
950  }
951 
952  err = !sleb->endpt;
953 out:
954  ubifs_scan_destroy(sleb);
955  return err;
956 
957 out_dump:
958  ubifs_err("log error detected while replaying the log at LEB %d:%d",
959  lnum, offs + snod->offs);
960  ubifs_dump_node(c, snod->node);
961  ubifs_scan_destroy(sleb);
962  return -EINVAL;
963 }
964 
972 static int take_ihead(struct ubifs_info *c)
973 {
974  const struct ubifs_lprops *lp;
975  int err, free;
976 
977  ubifs_get_lprops(c);
978 
980  if (IS_ERR(lp)) {
981  err = PTR_ERR(lp);
982  goto out;
983  }
984 
985  free = lp->free;
986 
987  lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC,
988  lp->flags | LPROPS_TAKEN, 0);
989  if (IS_ERR(lp)) {
990  err = PTR_ERR(lp);
991  goto out;
992  }
993 
994  err = free;
995 out:
996  ubifs_release_lprops(c);
997  return err;
998 }
999 
1009 {
1010  int err, lnum, free;
1011 
1013 
1014  /* Update the status of the index head in lprops to 'taken' */
1015  free = take_ihead(c);
1016  if (free < 0)
1017  return free; /* Error code */
1018 
1019  if (c->ihead_offs != c->leb_size - free) {
1020  ubifs_err("bad index head LEB %d:%d", c->ihead_lnum,
1021  c->ihead_offs);
1022  return -EINVAL;
1023  }
1024 
1025  dbg_mnt("start replaying the journal");
1026  c->replaying = 1;
1027  lnum = c->ltail_lnum = c->lhead_lnum;
1028 
1029  do {
1030  err = replay_log_leb(c, lnum, 0, c->sbuf);
1031  if (err == 1)
1032  /* We hit the end of the log */
1033  break;
1034  if (err)
1035  goto out;
1036  lnum = ubifs_next_log_lnum(c, lnum);
1037  } while (lnum != c->ltail_lnum);
1038 
1039  err = replay_buds(c);
1040  if (err)
1041  goto out;
1042 
1043  err = apply_replay_list(c);
1044  if (err)
1045  goto out;
1046 
1047  err = set_buds_lprops(c);
1048  if (err)
1049  goto out;
1050 
1051  /*
1052  * UBIFS budgeting calculations use @c->bi.uncommitted_idx variable
1053  * to roughly estimate index growth. Things like @c->bi.min_idx_lebs
1054  * depend on it. This means we have to initialize it to make sure
1055  * budgeting works properly.
1056  */
1057  c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt);
1058  c->bi.uncommitted_idx *= c->max_idx_node_sz;
1059 
1061  dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu",
1062  c->lhead_lnum, c->lhead_offs, c->max_sqnum,
1063  (unsigned long)c->highest_inum);
1064 out:
1065  destroy_replay_list(c);
1066  destroy_bud_list(c);
1067  c->replaying = 0;
1068  return err;
1069 }