Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lops.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/fs.h>
19 
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "inode.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "recovery.h"
28 #include "rgrp.h"
29 #include "trans.h"
30 #include "util.h"
31 #include "trace_gfs2.h"
32 
40 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
41 {
42  struct gfs2_bufdata *bd;
43 
44  BUG_ON(!current->journal_info);
45 
46  clear_buffer_dirty(bh);
47  if (test_set_buffer_pinned(bh))
48  gfs2_assert_withdraw(sdp, 0);
49  if (!buffer_uptodate(bh))
50  gfs2_io_error_bh(sdp, bh);
51  bd = bh->b_private;
52  /* If this buffer is in the AIL and it has already been written
53  * to in-place disk block, remove it from the AIL.
54  */
55  spin_lock(&sdp->sd_ail_lock);
56  if (bd->bd_ail)
57  list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
58  spin_unlock(&sdp->sd_ail_lock);
59  get_bh(bh);
61  trace_gfs2_pin(bd, 1);
62 }
63 
64 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
65 {
66  return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
67 }
68 
69 static void maybe_release_space(struct gfs2_bufdata *bd)
70 {
71  struct gfs2_glock *gl = bd->bd_gl;
72  struct gfs2_sbd *sdp = gl->gl_sbd;
73  struct gfs2_rgrpd *rgd = gl->gl_object;
74  unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
75  struct gfs2_bitmap *bi = rgd->rd_bits + index;
76 
77  if (bi->bi_clone == 0)
78  return;
79  if (sdp->sd_args.ar_discard)
80  gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
81  memcpy(bi->bi_clone + bi->bi_offset,
82  bd->bd_bh->b_data + bi->bi_offset, bi->bi_len);
84  rgd->rd_free_clone = rgd->rd_free;
85 }
86 
96 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
97  struct gfs2_ail *ai)
98 {
99  struct gfs2_bufdata *bd = bh->b_private;
100 
101  BUG_ON(!buffer_uptodate(bh));
102  BUG_ON(!buffer_pinned(bh));
103 
104  lock_buffer(bh);
105  mark_buffer_dirty(bh);
106  clear_buffer_pinned(bh);
107 
108  if (buffer_is_rgrp(bd))
109  maybe_release_space(bd);
110 
111  spin_lock(&sdp->sd_ail_lock);
112  if (bd->bd_ail) {
113  list_del(&bd->bd_ail_st_list);
114  brelse(bh);
115  } else {
116  struct gfs2_glock *gl = bd->bd_gl;
117  list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
118  atomic_inc(&gl->gl_ail_count);
119  }
120  bd->bd_ail = ai;
121  list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
122  spin_unlock(&sdp->sd_ail_lock);
123 
124  clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
125  trace_gfs2_pin(bd, 0);
126  unlock_buffer(bh);
127  atomic_dec(&sdp->sd_log_pinned);
128 }
129 
130 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
131 {
132  BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
133  (sdp->sd_log_flush_head != sdp->sd_log_head));
134 
135  if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
136  sdp->sd_log_flush_head = 0;
137  sdp->sd_log_flush_wrapped = 1;
138  }
139 }
140 
141 static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143  unsigned int lbn = sdp->sd_log_flush_head;
144  struct gfs2_journal_extent *je;
145  u64 block;
146 
147  list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) {
148  if (lbn >= je->lblock && lbn < je->lblock + je->blocks) {
149  block = je->dblock + lbn - je->lblock;
150  gfs2_log_incr_head(sdp);
151  return block;
152  }
153  }
154 
155  return -1;
156 }
157 
170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
171  int error)
172 {
173  struct buffer_head *bh, *next;
174  struct page *page = bvec->bv_page;
175  unsigned size;
176 
177  bh = page_buffers(page);
178  size = bvec->bv_len;
179  while (bh_offset(bh) < bvec->bv_offset)
180  bh = bh->b_this_page;
181  do {
182  if (error)
183  set_buffer_write_io_error(bh);
184  unlock_buffer(bh);
185  next = bh->b_this_page;
186  size -= bh->b_size;
187  brelse(bh);
188  bh = next;
189  } while(bh && size);
190 }
191 
203 static void gfs2_end_log_write(struct bio *bio, int error)
204 {
205  struct gfs2_sbd *sdp = bio->bi_private;
206  struct bio_vec *bvec;
207  struct page *page;
208  int i;
209 
210  if (error) {
211  sdp->sd_log_error = error;
212  fs_err(sdp, "Error %d writing to log\n", error);
213  }
214 
215  bio_for_each_segment(bvec, bio, i) {
216  page = bvec->bv_page;
217  if (page_has_buffers(page))
218  gfs2_end_log_write_bh(sdp, bvec, error);
219  else
220  mempool_free(page, gfs2_page_pool);
221  }
222 
223  bio_put(bio);
225  wake_up(&sdp->sd_log_flush_wait);
226 }
227 
237 void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
238 {
239  if (sdp->sd_log_bio) {
241  submit_bio(rw, sdp->sd_log_bio);
242  sdp->sd_log_bio = NULL;
243  }
244 }
245 
259 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
260 {
261  struct super_block *sb = sdp->sd_vfs;
262  unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
263  struct bio *bio;
264 
265  BUG_ON(sdp->sd_log_bio);
266 
267  while (1) {
268  bio = bio_alloc(GFP_NOIO, nrvecs);
269  if (likely(bio))
270  break;
271  nrvecs = max(nrvecs/2, 1U);
272  }
273 
274  bio->bi_sector = blkno * (sb->s_blocksize >> 9);
275  bio->bi_bdev = sb->s_bdev;
276  bio->bi_end_io = gfs2_end_log_write;
277  bio->bi_private = sdp;
278 
279  sdp->sd_log_bio = bio;
280 
281  return bio;
282 }
283 
297 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
298 {
299  struct bio *bio = sdp->sd_log_bio;
300  u64 nblk;
301 
302  if (bio) {
303  nblk = bio->bi_sector + bio_sectors(bio);
304  nblk >>= sdp->sd_fsb2bb_shift;
305  if (blkno == nblk)
306  return bio;
308  }
309 
310  return gfs2_log_alloc_bio(sdp, blkno);
311 }
312 
313 
326 static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
327  unsigned size, unsigned offset)
328 {
329  u64 blkno = gfs2_log_bmap(sdp);
330  struct bio *bio;
331  int ret;
332 
333  bio = gfs2_log_get_bio(sdp, blkno);
334  ret = bio_add_page(bio, page, size, offset);
335  if (ret == 0) {
337  bio = gfs2_log_alloc_bio(sdp, blkno);
338  ret = bio_add_page(bio, page, size, offset);
339  WARN_ON(ret == 0);
340  }
341 }
342 
353 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
354 {
355  gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
356 }
357 
369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370 {
371  struct super_block *sb = sdp->sd_vfs;
372  gfs2_log_write(sdp, page, sb->s_blocksize, 0);
373 }
374 
375 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
376  u32 ld_length, u32 ld_data1)
377 {
378  struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
379  struct gfs2_log_descriptor *ld = page_address(page);
380  clear_page(ld);
381  ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
382  ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
383  ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
384  ld->ld_type = cpu_to_be32(ld_type);
385  ld->ld_length = cpu_to_be32(ld_length);
386  ld->ld_data1 = cpu_to_be32(ld_data1);
387  ld->ld_data2 = 0;
388  return page;
389 }
390 
391 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
392 {
393  struct gfs2_meta_header *mh;
394  struct gfs2_trans *tr;
395 
396  tr = current->journal_info;
397  tr->tr_touched = 1;
398  if (!list_empty(&bd->bd_list))
399  return;
400  set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
401  set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
402  mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
405  "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
406  (unsigned long long)bd->bd_bh->b_blocknr);
407  BUG();
408  }
409  gfs2_pin(sdp, bd->bd_bh);
410  mh->__pad0 = cpu_to_be64(0);
411  mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
412  sdp->sd_log_num_buf++;
413  list_add(&bd->bd_list, &sdp->sd_log_le_buf);
414  tr->tr_num_buf_new++;
415 }
416 
417 static void gfs2_check_magic(struct buffer_head *bh)
418 {
419  void *kaddr;
420  __be32 *ptr;
421 
422  clear_buffer_escaped(bh);
423  kaddr = kmap_atomic(bh->b_page);
424  ptr = kaddr + bh_offset(bh);
425  if (*ptr == cpu_to_be32(GFS2_MAGIC))
426  set_buffer_escaped(bh);
427  kunmap_atomic(kaddr);
428 }
429 
430 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
431  unsigned int total, struct list_head *blist,
432  bool is_databuf)
433 {
434  struct gfs2_log_descriptor *ld;
435  struct gfs2_bufdata *bd1 = NULL, *bd2;
436  struct page *page;
437  unsigned int num;
438  unsigned n;
439  __be64 *ptr;
440 
441  gfs2_log_lock(sdp);
442  bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
443  while(total) {
444  num = total;
445  if (total > limit)
446  num = limit;
447  gfs2_log_unlock(sdp);
448  page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA, num + 1, num);
449  ld = page_address(page);
450  gfs2_log_lock(sdp);
451  ptr = (__be64 *)(ld + 1);
452 
453  n = 0;
454  list_for_each_entry_continue(bd1, blist, bd_list) {
455  *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
456  if (is_databuf) {
457  gfs2_check_magic(bd1->bd_bh);
458  *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
459  }
460  if (++n >= num)
461  break;
462  }
463 
464  gfs2_log_unlock(sdp);
465  gfs2_log_write_page(sdp, page);
466  gfs2_log_lock(sdp);
467 
468  n = 0;
469  list_for_each_entry_continue(bd2, blist, bd_list) {
470  get_bh(bd2->bd_bh);
471  gfs2_log_unlock(sdp);
472  lock_buffer(bd2->bd_bh);
473 
474  if (buffer_escaped(bd2->bd_bh)) {
475  void *kaddr;
476  page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
477  ptr = page_address(page);
478  kaddr = kmap_atomic(bd2->bd_bh->b_page);
479  memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
480  bd2->bd_bh->b_size);
481  kunmap_atomic(kaddr);
482  *(__be32 *)ptr = 0;
483  clear_buffer_escaped(bd2->bd_bh);
484  unlock_buffer(bd2->bd_bh);
485  brelse(bd2->bd_bh);
486  gfs2_log_write_page(sdp, page);
487  } else {
488  gfs2_log_write_bh(sdp, bd2->bd_bh);
489  }
490  gfs2_log_lock(sdp);
491  if (++n >= num)
492  break;
493  }
494 
495  BUG_ON(total < num);
496  total -= num;
497  }
498  gfs2_log_unlock(sdp);
499 }
500 
501 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
502 {
503  unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
504 
505  gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf,
506  &sdp->sd_log_le_buf, 0);
507 }
508 
509 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
510 {
511  struct list_head *head = &sdp->sd_log_le_buf;
512  struct gfs2_bufdata *bd;
513 
514  while (!list_empty(head)) {
515  bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
516  list_del_init(&bd->bd_list);
517  sdp->sd_log_num_buf--;
518 
519  gfs2_unpin(sdp, bd->bd_bh, ai);
520  }
521  gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
522 }
523 
524 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
525  struct gfs2_log_header_host *head, int pass)
526 {
527  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
528 
529  if (pass != 0)
530  return;
531 
532  sdp->sd_found_blocks = 0;
533  sdp->sd_replayed_blocks = 0;
534 }
535 
536 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
537  struct gfs2_log_descriptor *ld, __be64 *ptr,
538  int pass)
539 {
540  struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
541  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
542  struct gfs2_glock *gl = ip->i_gl;
543  unsigned int blks = be32_to_cpu(ld->ld_data1);
544  struct buffer_head *bh_log, *bh_ip;
545  u64 blkno;
546  int error = 0;
547 
548  if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
549  return 0;
550 
551  gfs2_replay_incr_blk(sdp, &start);
552 
553  for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
554  blkno = be64_to_cpu(*ptr++);
555 
556  sdp->sd_found_blocks++;
557 
558  if (gfs2_revoke_check(sdp, blkno, start))
559  continue;
560 
561  error = gfs2_replay_read_block(jd, start, &bh_log);
562  if (error)
563  return error;
564 
565  bh_ip = gfs2_meta_new(gl, blkno);
566  memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
567 
568  if (gfs2_meta_check(sdp, bh_ip))
569  error = -EIO;
570  else
571  mark_buffer_dirty(bh_ip);
572 
573  brelse(bh_log);
574  brelse(bh_ip);
575 
576  if (error)
577  break;
578 
579  sdp->sd_replayed_blocks++;
580  }
581 
582  return error;
583 }
584 
585 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
586 {
587  struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
588  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
589 
590  if (error) {
591  gfs2_meta_sync(ip->i_gl);
592  return;
593  }
594  if (pass != 1)
595  return;
596 
597  gfs2_meta_sync(ip->i_gl);
598 
599  fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
600  jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
601 }
602 
603 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
604 {
605  struct gfs2_glock *gl = bd->bd_gl;
606  struct gfs2_trans *tr;
607 
608  tr = current->journal_info;
609  tr->tr_touched = 1;
610  tr->tr_num_revoke++;
611  sdp->sd_log_num_revoke++;
612  atomic_inc(&gl->gl_revokes);
613  set_bit(GLF_LFLUSH, &gl->gl_flags);
614  list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
615 }
616 
617 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
618 {
619  struct gfs2_meta_header *mh;
620  unsigned int offset;
621  struct list_head *head = &sdp->sd_log_le_revoke;
622  struct gfs2_bufdata *bd;
623  struct page *page;
624  unsigned int length;
625 
626  if (!sdp->sd_log_num_revoke)
627  return;
628 
629  length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
630  page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
631  offset = sizeof(struct gfs2_log_descriptor);
632 
633  list_for_each_entry(bd, head, bd_list) {
634  sdp->sd_log_num_revoke--;
635 
636  if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
637 
638  gfs2_log_write_page(sdp, page);
639  page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
640  mh = page_address(page);
641  clear_page(mh);
645  offset = sizeof(struct gfs2_meta_header);
646  }
647 
648  *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
649  offset += sizeof(u64);
650  }
652 
653  gfs2_log_write_page(sdp, page);
654 }
655 
656 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
657 {
658  struct list_head *head = &sdp->sd_log_le_revoke;
659  struct gfs2_bufdata *bd;
660  struct gfs2_glock *gl;
661 
662  while (!list_empty(head)) {
663  bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
664  list_del_init(&bd->bd_list);
665  gl = bd->bd_gl;
666  atomic_dec(&gl->gl_revokes);
668  kmem_cache_free(gfs2_bufdata_cachep, bd);
669  }
670 }
671 
672 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
673  struct gfs2_log_header_host *head, int pass)
674 {
675  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
676 
677  if (pass != 0)
678  return;
679 
680  sdp->sd_found_revokes = 0;
681  sdp->sd_replay_tail = head->lh_tail;
682 }
683 
684 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
685  struct gfs2_log_descriptor *ld, __be64 *ptr,
686  int pass)
687 {
688  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
689  unsigned int blks = be32_to_cpu(ld->ld_length);
690  unsigned int revokes = be32_to_cpu(ld->ld_data1);
691  struct buffer_head *bh;
692  unsigned int offset;
693  u64 blkno;
694  int first = 1;
695  int error;
696 
697  if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
698  return 0;
699 
700  offset = sizeof(struct gfs2_log_descriptor);
701 
702  for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
703  error = gfs2_replay_read_block(jd, start, &bh);
704  if (error)
705  return error;
706 
707  if (!first)
709 
710  while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
711  blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
712 
713  error = gfs2_revoke_add(sdp, blkno, start);
714  if (error < 0) {
715  brelse(bh);
716  return error;
717  }
718  else if (error)
719  sdp->sd_found_revokes++;
720 
721  if (!--revokes)
722  break;
723  offset += sizeof(u64);
724  }
725 
726  brelse(bh);
727  offset = sizeof(struct gfs2_meta_header);
728  first = 0;
729  }
730 
731  return 0;
732 }
733 
734 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
735 {
736  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
737 
738  if (error) {
739  gfs2_revoke_clean(sdp);
740  return;
741  }
742  if (pass != 1)
743  return;
744 
745  fs_info(sdp, "jid=%u: Found %u revoke tags\n",
746  jd->jd_jid, sdp->sd_found_revokes);
747 
748  gfs2_revoke_clean(sdp);
749 }
750 
767 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
768 {
769  struct gfs2_trans *tr = current->journal_info;
770  struct address_space *mapping = bd->bd_bh->b_page->mapping;
771  struct gfs2_inode *ip = GFS2_I(mapping->host);
772 
773  if (tr)
774  tr->tr_touched = 1;
775  if (!list_empty(&bd->bd_list))
776  return;
777  set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
778  set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
779  if (gfs2_is_jdata(ip)) {
780  gfs2_pin(sdp, bd->bd_bh);
781  tr->tr_num_databuf_new++;
782  sdp->sd_log_num_databuf++;
784  } else {
786  }
787 }
788 
794 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
795 {
796  unsigned int limit = buf_limit(sdp) / 2;
797 
798  gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf,
799  &sdp->sd_log_le_databuf, 1);
800 }
801 
802 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
803  struct gfs2_log_descriptor *ld,
804  __be64 *ptr, int pass)
805 {
806  struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
807  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
808  struct gfs2_glock *gl = ip->i_gl;
809  unsigned int blks = be32_to_cpu(ld->ld_data1);
810  struct buffer_head *bh_log, *bh_ip;
811  u64 blkno;
812  u64 esc;
813  int error = 0;
814 
815  if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
816  return 0;
817 
818  gfs2_replay_incr_blk(sdp, &start);
819  for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
820  blkno = be64_to_cpu(*ptr++);
821  esc = be64_to_cpu(*ptr++);
822 
823  sdp->sd_found_blocks++;
824 
825  if (gfs2_revoke_check(sdp, blkno, start))
826  continue;
827 
828  error = gfs2_replay_read_block(jd, start, &bh_log);
829  if (error)
830  return error;
831 
832  bh_ip = gfs2_meta_new(gl, blkno);
833  memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
834 
835  /* Unescape */
836  if (esc) {
837  __be32 *eptr = (__be32 *)bh_ip->b_data;
838  *eptr = cpu_to_be32(GFS2_MAGIC);
839  }
840  mark_buffer_dirty(bh_ip);
841 
842  brelse(bh_log);
843  brelse(bh_ip);
844 
845  sdp->sd_replayed_blocks++;
846  }
847 
848  return error;
849 }
850 
851 /* FIXME: sort out accounting for log blocks etc. */
852 
853 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
854 {
855  struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
856  struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
857 
858  if (error) {
859  gfs2_meta_sync(ip->i_gl);
860  return;
861  }
862  if (pass != 1)
863  return;
864 
865  /* data sync? */
866  gfs2_meta_sync(ip->i_gl);
867 
868  fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
869  jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
870 }
871 
872 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
873 {
874  struct list_head *head = &sdp->sd_log_le_databuf;
875  struct gfs2_bufdata *bd;
876 
877  while (!list_empty(head)) {
878  bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
879  list_del_init(&bd->bd_list);
880  sdp->sd_log_num_databuf--;
881  gfs2_unpin(sdp, bd->bd_bh, ai);
882  }
884 }
885 
886 
888  .lo_add = buf_lo_add,
889  .lo_before_commit = buf_lo_before_commit,
890  .lo_after_commit = buf_lo_after_commit,
891  .lo_before_scan = buf_lo_before_scan,
892  .lo_scan_elements = buf_lo_scan_elements,
893  .lo_after_scan = buf_lo_after_scan,
894  .lo_name = "buf",
895 };
896 
898  .lo_add = revoke_lo_add,
899  .lo_before_commit = revoke_lo_before_commit,
900  .lo_after_commit = revoke_lo_after_commit,
901  .lo_before_scan = revoke_lo_before_scan,
902  .lo_scan_elements = revoke_lo_scan_elements,
903  .lo_after_scan = revoke_lo_after_scan,
904  .lo_name = "revoke",
905 };
906 
908  .lo_name = "rg",
909 };
910 
912  .lo_add = databuf_lo_add,
913  .lo_before_commit = databuf_lo_before_commit,
914  .lo_after_commit = databuf_lo_after_commit,
915  .lo_scan_elements = databuf_lo_scan_elements,
916  .lo_after_scan = databuf_lo_after_scan,
917  .lo_name = "databuf",
918 };
919 
922  &gfs2_buf_lops,
923  &gfs2_rg_lops,
925  NULL,
926 };
927