Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
recovery.c
Go to the documentation of this file.
1 /*
2  * linux/fs/jbd/recovery.c
3  *
4  * Written by Stephen C. Tweedie <[email protected]>, 1999
5  *
6  * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Journal recovery routines for the generic filesystem journaling code;
13  * part of the ext2fs journaling system.
14  */
15 
16 #ifndef __KERNEL__
17 #include "jfs_user.h"
18 #else
19 #include <linux/time.h>
20 #include <linux/fs.h>
21 #include <linux/jbd.h>
22 #include <linux/errno.h>
23 #include <linux/blkdev.h>
24 #endif
25 
26 /*
27  * Maintain information about the progress of the recovery job, so that
28  * the different passes can carry information between them.
29  */
31 {
34 
38 };
39 
41 static int do_one_pass(journal_t *journal,
42  struct recovery_info *info, enum passtype pass);
43 static int scan_revoke_records(journal_t *, struct buffer_head *,
44  tid_t, struct recovery_info *);
45 
46 #ifdef __KERNEL__
47 
48 /* Release readahead buffers after use */
49 static void journal_brelse_array(struct buffer_head *b[], int n)
50 {
51  while (--n >= 0)
52  brelse (b[n]);
53 }
54 
55 
56 /*
57  * When reading from the journal, we are going through the block device
58  * layer directly and so there is no readahead being done for us. We
59  * need to implement any readahead ourselves if we want it to happen at
60  * all. Recovery is basically one long sequential read, so make sure we
61  * do the IO in reasonably large chunks.
62  *
63  * This is not so critical that we need to be enormously clever about
64  * the readahead size, though. 128K is a purely arbitrary, good-enough
65  * fixed value.
66  */
67 
68 #define MAXBUF 8
69 static int do_readahead(journal_t *journal, unsigned int start)
70 {
71  int err;
72  unsigned int max, nbufs, next;
73  unsigned int blocknr;
74  struct buffer_head *bh;
75 
76  struct buffer_head * bufs[MAXBUF];
77 
78  /* Do up to 128K of readahead */
79  max = start + (128 * 1024 / journal->j_blocksize);
80  if (max > journal->j_maxlen)
81  max = journal->j_maxlen;
82 
83  /* Do the readahead itself. We'll submit MAXBUF buffer_heads at
84  * a time to the block device IO layer. */
85 
86  nbufs = 0;
87 
88  for (next = start; next < max; next++) {
89  err = journal_bmap(journal, next, &blocknr);
90 
91  if (err) {
92  printk (KERN_ERR "JBD: bad block at offset %u\n",
93  next);
94  goto failed;
95  }
96 
97  bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
98  if (!bh) {
99  err = -ENOMEM;
100  goto failed;
101  }
102 
103  if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
104  bufs[nbufs++] = bh;
105  if (nbufs == MAXBUF) {
106  ll_rw_block(READ, nbufs, bufs);
107  journal_brelse_array(bufs, nbufs);
108  nbufs = 0;
109  }
110  } else
111  brelse(bh);
112  }
113 
114  if (nbufs)
115  ll_rw_block(READ, nbufs, bufs);
116  err = 0;
117 
118 failed:
119  if (nbufs)
120  journal_brelse_array(bufs, nbufs);
121  return err;
122 }
123 
124 #endif /* __KERNEL__ */
125 
126 
127 /*
128  * Read a block from the journal
129  */
130 
131 static int jread(struct buffer_head **bhp, journal_t *journal,
132  unsigned int offset)
133 {
134  int err;
135  unsigned int blocknr;
136  struct buffer_head *bh;
137 
138  *bhp = NULL;
139 
140  if (offset >= journal->j_maxlen) {
141  printk(KERN_ERR "JBD: corrupted journal superblock\n");
142  return -EIO;
143  }
144 
145  err = journal_bmap(journal, offset, &blocknr);
146 
147  if (err) {
148  printk (KERN_ERR "JBD: bad block at offset %u\n",
149  offset);
150  return err;
151  }
152 
153  bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
154  if (!bh)
155  return -ENOMEM;
156 
157  if (!buffer_uptodate(bh)) {
158  /* If this is a brand new buffer, start readahead.
159  Otherwise, we assume we are already reading it. */
160  if (!buffer_req(bh))
161  do_readahead(journal, offset);
162  wait_on_buffer(bh);
163  }
164 
165  if (!buffer_uptodate(bh)) {
166  printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
167  offset);
168  brelse(bh);
169  return -EIO;
170  }
171 
172  *bhp = bh;
173  return 0;
174 }
175 
176 
177 /*
178  * Count the number of in-use tags in a journal descriptor block.
179  */
180 
181 static int count_tags(struct buffer_head *bh, int size)
182 {
183  char * tagp;
185  int nr = 0;
186 
187  tagp = &bh->b_data[sizeof(journal_header_t)];
188 
189  while ((tagp - bh->b_data + sizeof(journal_block_tag_t)) <= size) {
190  tag = (journal_block_tag_t *) tagp;
191 
192  nr++;
193  tagp += sizeof(journal_block_tag_t);
194  if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
195  tagp += 16;
196 
198  break;
199  }
200 
201  return nr;
202 }
203 
204 
205 /* Make sure we wrap around the log correctly! */
206 #define wrap(journal, var) \
207 do { \
208  if (var >= (journal)->j_last) \
209  var -= ((journal)->j_last - (journal)->j_first); \
210 } while (0)
211 
224 int journal_recover(journal_t *journal)
225 {
226  int err, err2;
228 
229  struct recovery_info info;
230 
231  memset(&info, 0, sizeof(info));
232  sb = journal->j_superblock;
233 
234  /*
235  * The journal superblock's s_start field (the current log head)
236  * is always zero if, and only if, the journal was cleanly
237  * unmounted.
238  */
239 
240  if (!sb->s_start) {
241  jbd_debug(1, "No recovery required, last transaction %d\n",
242  be32_to_cpu(sb->s_sequence));
243  journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
244  return 0;
245  }
246 
247  err = do_one_pass(journal, &info, PASS_SCAN);
248  if (!err)
249  err = do_one_pass(journal, &info, PASS_REVOKE);
250  if (!err)
251  err = do_one_pass(journal, &info, PASS_REPLAY);
252 
253  jbd_debug(1, "JBD: recovery, exit status %d, "
254  "recovered transactions %u to %u\n",
255  err, info.start_transaction, info.end_transaction);
256  jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
257  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
258 
259  /* Restart the log at the next transaction ID, thus invalidating
260  * any existing commit records in the log. */
261  journal->j_transaction_sequence = ++info.end_transaction;
262 
263  journal_clear_revoke(journal);
264  err2 = sync_blockdev(journal->j_fs_dev);
265  if (!err)
266  err = err2;
267  /* Flush disk caches to get replayed data on the permanent storage */
268  if (journal->j_flags & JFS_BARRIER) {
269  err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
270  if (!err)
271  err = err2;
272  }
273 
274  return err;
275 }
276 
290 int journal_skip_recovery(journal_t *journal)
291 {
292  int err;
293  struct recovery_info info;
294 
295  memset (&info, 0, sizeof(info));
296 
297  err = do_one_pass(journal, &info, PASS_SCAN);
298 
299  if (err) {
300  printk(KERN_ERR "JBD: error %d scanning journal\n", err);
301  ++journal->j_transaction_sequence;
302  } else {
303 #ifdef CONFIG_JBD_DEBUG
304  int dropped = info.end_transaction -
305  be32_to_cpu(journal->j_superblock->s_sequence);
306  jbd_debug(1,
307  "JBD: ignoring %d transaction%s from the journal.\n",
308  dropped, (dropped == 1) ? "" : "s");
309 #endif
310  journal->j_transaction_sequence = ++info.end_transaction;
311  }
312 
313  journal->j_tail = 0;
314  return err;
315 }
316 
317 static int do_one_pass(journal_t *journal,
318  struct recovery_info *info, enum passtype pass)
319 {
320  unsigned int first_commit_ID, next_commit_ID;
321  unsigned int next_log_block;
322  int err, success = 0;
325  struct buffer_head * bh;
326  unsigned int sequence;
327  int blocktype;
328 
329  /*
330  * First thing is to establish what we expect to find in the log
331  * (in terms of transaction IDs), and where (in terms of log
332  * block offsets): query the superblock.
333  */
334 
335  sb = journal->j_superblock;
336  next_commit_ID = be32_to_cpu(sb->s_sequence);
337  next_log_block = be32_to_cpu(sb->s_start);
338 
339  first_commit_ID = next_commit_ID;
340  if (pass == PASS_SCAN)
341  info->start_transaction = first_commit_ID;
342 
343  jbd_debug(1, "Starting recovery pass %d\n", pass);
344 
345  /*
346  * Now we walk through the log, transaction by transaction,
347  * making sure that each transaction has a commit block in the
348  * expected place. Each complete transaction gets replayed back
349  * into the main filesystem.
350  */
351 
352  while (1) {
353  int flags;
354  char * tagp;
356  struct buffer_head * obh;
357  struct buffer_head * nbh;
358 
359  cond_resched();
360 
361  /* If we already know where to stop the log traversal,
362  * check right now that we haven't gone past the end of
363  * the log. */
364 
365  if (pass != PASS_SCAN)
366  if (tid_geq(next_commit_ID, info->end_transaction))
367  break;
368 
369  jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n",
370  next_commit_ID, next_log_block, journal->j_last);
371 
372  /* Skip over each chunk of the transaction looking
373  * either the next descriptor block or the final commit
374  * record. */
375 
376  jbd_debug(3, "JBD: checking block %u\n", next_log_block);
377  err = jread(&bh, journal, next_log_block);
378  if (err)
379  goto failed;
380 
381  next_log_block++;
382  wrap(journal, next_log_block);
383 
384  /* What kind of buffer is it?
385  *
386  * If it is a descriptor block, check that it has the
387  * expected sequence number. Otherwise, we're all done
388  * here. */
389 
390  tmp = (journal_header_t *)bh->b_data;
391 
393  brelse(bh);
394  break;
395  }
396 
397  blocktype = be32_to_cpu(tmp->h_blocktype);
398  sequence = be32_to_cpu(tmp->h_sequence);
399  jbd_debug(3, "Found magic %d, sequence %d\n",
400  blocktype, sequence);
401 
402  if (sequence != next_commit_ID) {
403  brelse(bh);
404  break;
405  }
406 
407  /* OK, we have a valid descriptor block which matches
408  * all of the sequence number checks. What are we going
409  * to do with it? That depends on the pass... */
410 
411  switch(blocktype) {
413  /* If it is a valid descriptor block, replay it
414  * in pass REPLAY; otherwise, just skip over the
415  * blocks it describes. */
416  if (pass != PASS_REPLAY) {
417  next_log_block +=
418  count_tags(bh, journal->j_blocksize);
419  wrap(journal, next_log_block);
420  brelse(bh);
421  continue;
422  }
423 
424  /* A descriptor block: we can now write all of
425  * the data blocks. Yay, useful work is finally
426  * getting done here! */
427 
428  tagp = &bh->b_data[sizeof(journal_header_t)];
429  while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
430  <= journal->j_blocksize) {
431  unsigned int io_block;
432 
433  tag = (journal_block_tag_t *) tagp;
434  flags = be32_to_cpu(tag->t_flags);
435 
436  io_block = next_log_block++;
437  wrap(journal, next_log_block);
438  err = jread(&obh, journal, io_block);
439  if (err) {
440  /* Recover what we can, but
441  * report failure at the end. */
442  success = err;
444  "JBD: IO error %d recovering "
445  "block %u in log\n",
446  err, io_block);
447  } else {
448  unsigned int blocknr;
449 
450  J_ASSERT(obh != NULL);
451  blocknr = be32_to_cpu(tag->t_blocknr);
452 
453  /* If the block has been
454  * revoked, then we're all done
455  * here. */
457  (journal, blocknr,
458  next_commit_ID)) {
459  brelse(obh);
460  ++info->nr_revoke_hits;
461  goto skip_write;
462  }
463 
464  /* Find a buffer for the new
465  * data being restored */
466  nbh = __getblk(journal->j_fs_dev,
467  blocknr,
468  journal->j_blocksize);
469  if (nbh == NULL) {
471  "JBD: Out of memory "
472  "during recovery.\n");
473  err = -ENOMEM;
474  brelse(bh);
475  brelse(obh);
476  goto failed;
477  }
478 
479  lock_buffer(nbh);
480  memcpy(nbh->b_data, obh->b_data,
481  journal->j_blocksize);
482  if (flags & JFS_FLAG_ESCAPE) {
483  *((__be32 *)nbh->b_data) =
485  }
486 
487  BUFFER_TRACE(nbh, "marking dirty");
488  set_buffer_uptodate(nbh);
489  mark_buffer_dirty(nbh);
490  BUFFER_TRACE(nbh, "marking uptodate");
491  ++info->nr_replays;
492  /* ll_rw_block(WRITE, 1, &nbh); */
493  unlock_buffer(nbh);
494  brelse(obh);
495  brelse(nbh);
496  }
497 
498  skip_write:
499  tagp += sizeof(journal_block_tag_t);
500  if (!(flags & JFS_FLAG_SAME_UUID))
501  tagp += 16;
502 
503  if (flags & JFS_FLAG_LAST_TAG)
504  break;
505  }
506 
507  brelse(bh);
508  continue;
509 
510  case JFS_COMMIT_BLOCK:
511  /* Found an expected commit block: not much to
512  * do other than move on to the next sequence
513  * number. */
514  brelse(bh);
515  next_commit_ID++;
516  continue;
517 
518  case JFS_REVOKE_BLOCK:
519  /* If we aren't in the REVOKE pass, then we can
520  * just skip over this block. */
521  if (pass != PASS_REVOKE) {
522  brelse(bh);
523  continue;
524  }
525 
526  err = scan_revoke_records(journal, bh,
527  next_commit_ID, info);
528  brelse(bh);
529  if (err)
530  goto failed;
531  continue;
532 
533  default:
534  jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
535  blocktype);
536  brelse(bh);
537  goto done;
538  }
539  }
540 
541  done:
542  /*
543  * We broke out of the log scan loop: either we came to the
544  * known end of the log or we found an unexpected block in the
545  * log. If the latter happened, then we know that the "current"
546  * transaction marks the end of the valid log.
547  */
548 
549  if (pass == PASS_SCAN)
550  info->end_transaction = next_commit_ID;
551  else {
552  /* It's really bad news if different passes end up at
553  * different places (but possible due to IO errors). */
554  if (info->end_transaction != next_commit_ID) {
555  printk (KERN_ERR "JBD: recovery pass %d ended at "
556  "transaction %u, expected %u\n",
557  pass, next_commit_ID, info->end_transaction);
558  if (!success)
559  success = -EIO;
560  }
561  }
562 
563  return success;
564 
565  failed:
566  return err;
567 }
568 
569 
570 /* Scan a revoke record, marking all blocks mentioned as revoked. */
571 
572 static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
573  tid_t sequence, struct recovery_info *info)
574 {
576  int offset, max;
577 
578  header = (journal_revoke_header_t *) bh->b_data;
579  offset = sizeof(journal_revoke_header_t);
580  max = be32_to_cpu(header->r_count);
581 
582  while (offset < max) {
583  unsigned int blocknr;
584  int err;
585 
586  blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
587  offset += 4;
588  err = journal_set_revoke(journal, blocknr, sequence);
589  if (err)
590  return err;
591  ++info->nr_revokes;
592  }
593  return 0;
594 }