Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bmap.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/blkdev.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/crc32.h>
16 
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "inode.h"
22 #include "meta_io.h"
23 #include "quota.h"
24 #include "rgrp.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "util.h"
29 #include "trace_gfs2.h"
30 
31 /* This doesn't need to be that large as max 64 bit pointers in a 4k
32  * block is 512, so __u16 is fine for that. It saves stack space to
33  * keep it small.
34  */
35 struct metapath {
36  struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 };
39 
40 struct strip_mine {
41  int sm_first;
42  unsigned int sm_height;
43 };
44 
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56  u64 block, struct page *page)
57 {
58  struct inode *inode = &ip->i_inode;
59  struct buffer_head *bh;
60  int release = 0;
61 
62  if (!page || page->index) {
63  page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
64  if (!page)
65  return -ENOMEM;
66  release = 1;
67  }
68 
69  if (!PageUptodate(page)) {
70  void *kaddr = kmap(page);
71  u64 dsize = i_size_read(inode);
72 
73  if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
74  dsize = dibh->b_size - sizeof(struct gfs2_dinode);
75 
76  memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77  memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
78  kunmap(page);
79 
80  SetPageUptodate(page);
81  }
82 
83  if (!page_has_buffers(page))
84  create_empty_buffers(page, 1 << inode->i_blkbits,
85  (1 << BH_Uptodate));
86 
87  bh = page_buffers(page);
88 
89  if (!buffer_mapped(bh))
90  map_bh(bh, inode->i_sb, block);
91 
92  set_buffer_uptodate(bh);
93  if (!gfs2_is_jdata(ip))
95  if (!gfs2_is_writeback(ip))
96  gfs2_trans_add_bh(ip->i_gl, bh, 0);
97 
98  if (release) {
99  unlock_page(page);
100  page_cache_release(page);
101  }
102 
103  return 0;
104 }
105 
117 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
118 {
119  struct buffer_head *bh, *dibh;
120  struct gfs2_dinode *di;
121  u64 block = 0;
122  int isdir = gfs2_is_dir(ip);
123  int error;
124 
125  down_write(&ip->i_rw_mutex);
126 
127  error = gfs2_meta_inode_buffer(ip, &dibh);
128  if (error)
129  goto out;
130 
131  if (i_size_read(&ip->i_inode)) {
132  /* Get a free block, fill it with the stuffed data,
133  and write it out to disk */
134 
135  unsigned int n = 1;
136  error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
137  if (error)
138  goto out_brelse;
139  if (isdir) {
140  gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
141  error = gfs2_dir_get_new_buffer(ip, block, &bh);
142  if (error)
143  goto out_brelse;
144  gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
145  dibh, sizeof(struct gfs2_dinode));
146  brelse(bh);
147  } else {
148  error = gfs2_unstuffer_page(ip, dibh, block, page);
149  if (error)
150  goto out_brelse;
151  }
152  }
153 
154  /* Set up the pointer to the new block */
155 
156  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
157  di = (struct gfs2_dinode *)dibh->b_data;
158  gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
159 
160  if (i_size_read(&ip->i_inode)) {
161  *(__be64 *)(di + 1) = cpu_to_be64(block);
162  gfs2_add_inode_blocks(&ip->i_inode, 1);
163  di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
164  }
165 
166  ip->i_height = 1;
167  di->di_height = cpu_to_be16(1);
168 
169 out_brelse:
170  brelse(dibh);
171 out:
172  up_write(&ip->i_rw_mutex);
173  return error;
174 }
175 
176 
237 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
238  struct metapath *mp, unsigned int height)
239 {
240  unsigned int i;
241 
242  for (i = height; i--;)
243  mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
244 
245 }
246 
247 static inline unsigned int metapath_branch_start(const struct metapath *mp)
248 {
249  if (mp->mp_list[0] == 0)
250  return 2;
251  return 1;
252 }
253 
264 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
265 {
266  struct buffer_head *bh = mp->mp_bh[height];
267  unsigned int head_size = (height > 0) ?
268  sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
269  return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
270 }
271 
272 static void gfs2_metapath_ra(struct gfs2_glock *gl,
273  const struct buffer_head *bh, const __be64 *pos)
274 {
275  struct buffer_head *rabh;
276  const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
277  const __be64 *t;
278 
279  for (t = pos; t < endp; t++) {
280  if (!*t)
281  continue;
282 
283  rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
284  if (trylock_buffer(rabh)) {
285  if (!buffer_uptodate(rabh)) {
286  rabh->b_end_io = end_buffer_read_sync;
287  submit_bh(READA | REQ_META, rabh);
288  continue;
289  }
290  unlock_buffer(rabh);
291  }
292  brelse(rabh);
293  }
294 }
295 
313 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
314 {
315  unsigned int end_of_metadata = ip->i_height - 1;
316  unsigned int x;
317  __be64 *ptr;
318  u64 dblock;
319  int ret;
320 
321  for (x = 0; x < end_of_metadata; x++) {
322  ptr = metapointer(x, mp);
323  dblock = be64_to_cpu(*ptr);
324  if (!dblock)
325  return x + 1;
326 
327  ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
328  if (ret)
329  return ret;
330  }
331 
332  return ip->i_height;
333 }
334 
335 static inline void release_metapath(struct metapath *mp)
336 {
337  int i;
338 
339  for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
340  if (mp->mp_bh[i] == NULL)
341  break;
342  brelse(mp->mp_bh[i]);
343  }
344 }
345 
361 static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
362 {
363  const __be64 *end = (start + len);
364  const __be64 *first = ptr;
365  u64 d = be64_to_cpu(*ptr);
366 
367  *eob = 0;
368  do {
369  ptr++;
370  if (ptr >= end)
371  break;
372  if (limit && --limit == 0)
373  break;
374  if (d)
375  d++;
376  } while(be64_to_cpu(*ptr) == d);
377  if (ptr >= end)
378  *eob = 1;
379  return (ptr - first);
380 }
381 
382 static inline void bmap_lock(struct gfs2_inode *ip, int create)
383 {
384  if (create)
385  down_write(&ip->i_rw_mutex);
386  else
387  down_read(&ip->i_rw_mutex);
388 }
389 
390 static inline void bmap_unlock(struct gfs2_inode *ip, int create)
391 {
392  if (create)
393  up_write(&ip->i_rw_mutex);
394  else
395  up_read(&ip->i_rw_mutex);
396 }
397 
398 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
399  struct gfs2_glock *gl, unsigned int i,
400  unsigned offset, u64 bn)
401 {
402  __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
403  ((i > 1) ? sizeof(struct gfs2_meta_header) :
404  sizeof(struct gfs2_dinode)));
405  BUG_ON(i < 1);
406  BUG_ON(mp->mp_bh[i] != NULL);
407  mp->mp_bh[i] = gfs2_meta_new(gl, bn);
408  gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
409  gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
410  gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
411  ptr += offset;
412  *ptr = cpu_to_be64(bn);
413  return ptr;
414 }
415 
420  /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
421 };
422 
447 static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
448  struct buffer_head *bh_map, struct metapath *mp,
449  const unsigned int sheight,
450  const unsigned int height,
451  const unsigned int maxlen)
452 {
453  struct gfs2_inode *ip = GFS2_I(inode);
454  struct gfs2_sbd *sdp = GFS2_SB(inode);
455  struct super_block *sb = sdp->sd_vfs;
456  struct buffer_head *dibh = mp->mp_bh[0];
457  u64 bn, dblock = 0;
458  unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
459  unsigned dblks = 0;
460  unsigned ptrs_per_blk;
461  const unsigned end_of_metadata = height - 1;
462  int ret;
463  int eob = 0;
464  enum alloc_state state;
465  __be64 *ptr;
466  __be64 zero_bn = 0;
467 
468  BUG_ON(sheight < 1);
469  BUG_ON(dibh == NULL);
470 
471  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
472 
473  if (height == sheight) {
474  struct buffer_head *bh;
475  /* Bottom indirect block exists, find unalloced extent size */
476  ptr = metapointer(end_of_metadata, mp);
477  bh = mp->mp_bh[end_of_metadata];
478  dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
479  &eob);
480  BUG_ON(dblks < 1);
481  state = ALLOC_DATA;
482  } else {
483  /* Need to allocate indirect blocks */
484  ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
485  dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
486  if (height == ip->i_height) {
487  /* Writing into existing tree, extend tree down */
488  iblks = height - sheight;
489  state = ALLOC_GROW_DEPTH;
490  } else {
491  /* Building up tree height */
492  state = ALLOC_GROW_HEIGHT;
493  iblks = height - ip->i_height;
494  branch_start = metapath_branch_start(mp);
495  iblks += (height - branch_start);
496  }
497  }
498 
499  /* start of the second part of the function (state machine) */
500 
501  blks = dblks + iblks;
502  i = sheight;
503  do {
504  int error;
505  n = blks - alloced;
506  error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
507  if (error)
508  return error;
509  alloced += n;
510  if (state != ALLOC_DATA || gfs2_is_jdata(ip))
511  gfs2_trans_add_unrevoke(sdp, bn, n);
512  switch (state) {
513  /* Growing height of tree */
514  case ALLOC_GROW_HEIGHT:
515  if (i == 1) {
516  ptr = (__be64 *)(dibh->b_data +
517  sizeof(struct gfs2_dinode));
518  zero_bn = *ptr;
519  }
520  for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
521  gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
522  if (i - 1 == height - ip->i_height) {
523  i--;
524  gfs2_buffer_copy_tail(mp->mp_bh[i],
525  sizeof(struct gfs2_meta_header),
526  dibh, sizeof(struct gfs2_dinode));
527  gfs2_buffer_clear_tail(dibh,
528  sizeof(struct gfs2_dinode) +
529  sizeof(__be64));
530  ptr = (__be64 *)(mp->mp_bh[i]->b_data +
531  sizeof(struct gfs2_meta_header));
532  *ptr = zero_bn;
533  state = ALLOC_GROW_DEPTH;
534  for(i = branch_start; i < height; i++) {
535  if (mp->mp_bh[i] == NULL)
536  break;
537  brelse(mp->mp_bh[i]);
538  mp->mp_bh[i] = NULL;
539  }
540  i = branch_start;
541  }
542  if (n == 0)
543  break;
544  /* Branching from existing tree */
545  case ALLOC_GROW_DEPTH:
546  if (i > 1 && i < height)
547  gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
548  for (; i < height && n > 0; i++, n--)
549  gfs2_indirect_init(mp, ip->i_gl, i,
550  mp->mp_list[i-1], bn++);
551  if (i == height)
552  state = ALLOC_DATA;
553  if (n == 0)
554  break;
555  /* Tree complete, adding data blocks */
556  case ALLOC_DATA:
557  BUG_ON(n > dblks);
558  BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
559  gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
560  dblks = n;
561  ptr = metapointer(end_of_metadata, mp);
562  dblock = bn;
563  while (n-- > 0)
564  *ptr++ = cpu_to_be64(bn++);
565  if (buffer_zeronew(bh_map)) {
566  ret = sb_issue_zeroout(sb, dblock, dblks,
567  GFP_NOFS);
568  if (ret) {
569  fs_err(sdp,
570  "Failed to zero data buffers\n");
571  clear_buffer_zeronew(bh_map);
572  }
573  }
574  break;
575  }
576  } while ((state != ALLOC_DATA) || !dblock);
577 
578  ip->i_height = height;
579  gfs2_add_inode_blocks(&ip->i_inode, alloced);
580  gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
581  map_bh(bh_map, inode->i_sb, dblock);
582  bh_map->b_size = dblks << inode->i_blkbits;
583  set_buffer_new(bh_map);
584  return 0;
585 }
586 
601 int gfs2_block_map(struct inode *inode, sector_t lblock,
602  struct buffer_head *bh_map, int create)
603 {
604  struct gfs2_inode *ip = GFS2_I(inode);
605  struct gfs2_sbd *sdp = GFS2_SB(inode);
606  unsigned int bsize = sdp->sd_sb.sb_bsize;
607  const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
608  const u64 *arr = sdp->sd_heightsize;
609  __be64 *ptr;
610  u64 size;
611  struct metapath mp;
612  int ret;
613  int eob;
614  unsigned int len;
615  struct buffer_head *bh;
616  u8 height;
617 
618  BUG_ON(maxlen == 0);
619 
620  memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
621  bmap_lock(ip, create);
622  clear_buffer_mapped(bh_map);
623  clear_buffer_new(bh_map);
624  clear_buffer_boundary(bh_map);
625  trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
626  if (gfs2_is_dir(ip)) {
627  bsize = sdp->sd_jbsize;
628  arr = sdp->sd_jheightsize;
629  }
630 
631  ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
632  if (ret)
633  goto out;
634 
635  height = ip->i_height;
636  size = (lblock + 1) * bsize;
637  while (size > arr[height])
638  height++;
639  find_metapath(sdp, lblock, &mp, height);
640  ret = 1;
641  if (height > ip->i_height || gfs2_is_stuffed(ip))
642  goto do_alloc;
643  ret = lookup_metapath(ip, &mp);
644  if (ret < 0)
645  goto out;
646  if (ret != ip->i_height)
647  goto do_alloc;
648  ptr = metapointer(ip->i_height - 1, &mp);
649  if (*ptr == 0)
650  goto do_alloc;
651  map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
652  bh = mp.mp_bh[ip->i_height - 1];
653  len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
654  bh_map->b_size = (len << inode->i_blkbits);
655  if (eob)
656  set_buffer_boundary(bh_map);
657  ret = 0;
658 out:
659  release_metapath(&mp);
660  trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
661  bmap_unlock(ip, create);
662  return ret;
663 
664 do_alloc:
665  /* All allocations are done here, firstly check create flag */
666  if (!create) {
667  BUG_ON(gfs2_is_stuffed(ip));
668  ret = 0;
669  goto out;
670  }
671 
672  /* At this point ret is the tree depth of already allocated blocks */
673  ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
674  goto out;
675 }
676 
677 /*
678  * Deprecated: do not use in new code
679  */
680 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
681 {
682  struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
683  int ret;
684  int create = *new;
685 
686  BUG_ON(!extlen);
687  BUG_ON(!dblock);
688  BUG_ON(!new);
689 
690  bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
691  ret = gfs2_block_map(inode, lblock, &bh, create);
692  *extlen = bh.b_size >> inode->i_blkbits;
693  *dblock = bh.b_blocknr;
694  if (buffer_new(&bh))
695  *new = 1;
696  else
697  *new = 0;
698  return ret;
699 }
700 
714 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
715  struct buffer_head *bh, __be64 *top, __be64 *bottom,
716  unsigned int height, struct strip_mine *sm)
717 {
718  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
719  struct gfs2_rgrp_list rlist;
720  u64 bn, bstart;
721  u32 blen, btotal;
722  __be64 *p;
723  unsigned int rg_blocks = 0;
724  int metadata;
725  unsigned int revokes = 0;
726  int x;
727  int error;
728 
729  error = gfs2_rindex_update(sdp);
730  if (error)
731  return error;
732 
733  if (!*top)
734  sm->sm_first = 0;
735 
736  if (height != sm->sm_height)
737  return 0;
738 
739  if (sm->sm_first) {
740  top++;
741  sm->sm_first = 0;
742  }
743 
744  metadata = (height != ip->i_height - 1);
745  if (metadata)
746  revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
747  else if (ip->i_depth)
748  revokes = sdp->sd_inptrs;
749 
750  memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
751  bstart = 0;
752  blen = 0;
753 
754  for (p = top; p < bottom; p++) {
755  if (!*p)
756  continue;
757 
758  bn = be64_to_cpu(*p);
759 
760  if (bstart + blen == bn)
761  blen++;
762  else {
763  if (bstart)
764  gfs2_rlist_add(ip, &rlist, bstart);
765 
766  bstart = bn;
767  blen = 1;
768  }
769  }
770 
771  if (bstart)
772  gfs2_rlist_add(ip, &rlist, bstart);
773  else
774  goto out; /* Nothing to do */
775 
777 
778  for (x = 0; x < rlist.rl_rgrps; x++) {
779  struct gfs2_rgrpd *rgd;
780  rgd = rlist.rl_ghs[x].gh_gl->gl_object;
781  rg_blocks += rgd->rd_length;
782  }
783 
784  error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
785  if (error)
786  goto out_rlist;
787 
788  if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
789  gfs2_rs_deltree(ip, ip->i_res);
790 
791  error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
793  revokes);
794  if (error)
795  goto out_rg_gunlock;
796 
797  down_write(&ip->i_rw_mutex);
798 
799  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
800  gfs2_trans_add_bh(ip->i_gl, bh, 1);
801 
802  bstart = 0;
803  blen = 0;
804  btotal = 0;
805 
806  for (p = top; p < bottom; p++) {
807  if (!*p)
808  continue;
809 
810  bn = be64_to_cpu(*p);
811 
812  if (bstart + blen == bn)
813  blen++;
814  else {
815  if (bstart) {
816  __gfs2_free_blocks(ip, bstart, blen, metadata);
817  btotal += blen;
818  }
819 
820  bstart = bn;
821  blen = 1;
822  }
823 
824  *p = 0;
825  gfs2_add_inode_blocks(&ip->i_inode, -1);
826  }
827  if (bstart) {
828  __gfs2_free_blocks(ip, bstart, blen, metadata);
829  btotal += blen;
830  }
831 
832  gfs2_statfs_change(sdp, 0, +btotal, 0);
833  gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
834  ip->i_inode.i_gid);
835 
836  ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
837 
838  gfs2_dinode_out(ip, dibh->b_data);
839 
840  up_write(&ip->i_rw_mutex);
841 
842  gfs2_trans_end(sdp);
843 
844 out_rg_gunlock:
845  gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
846 out_rlist:
848 out:
849  return error;
850 }
851 
868 static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
869  struct metapath *mp, unsigned int height,
870  u64 block, int first, struct strip_mine *sm)
871 {
872  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
873  struct buffer_head *bh = NULL;
874  __be64 *top, *bottom;
875  u64 bn;
876  int error;
877  int mh_size = sizeof(struct gfs2_meta_header);
878 
879  if (!height) {
880  error = gfs2_meta_inode_buffer(ip, &bh);
881  if (error)
882  return error;
883  dibh = bh;
884 
885  top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
886  bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
887  } else {
888  error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
889  if (error)
890  return error;
891 
892  top = (__be64 *)(bh->b_data + mh_size) +
893  (first ? mp->mp_list[height] : 0);
894 
895  bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
896  }
897 
898  error = do_strip(ip, dibh, bh, top, bottom, height, sm);
899  if (error)
900  goto out;
901 
902  if (height < ip->i_height - 1) {
903 
904  gfs2_metapath_ra(ip->i_gl, bh, top);
905 
906  for (; top < bottom; top++, first = 0) {
907  if (!*top)
908  continue;
909 
910  bn = be64_to_cpu(*top);
911 
912  error = recursive_scan(ip, dibh, mp, height + 1, bn,
913  first, sm);
914  if (error)
915  break;
916  }
917  }
918 out:
919  brelse(bh);
920  return error;
921 }
922 
923 
929 static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
930 {
931  struct inode *inode = mapping->host;
932  struct gfs2_inode *ip = GFS2_I(inode);
933  unsigned long index = from >> PAGE_CACHE_SHIFT;
934  unsigned offset = from & (PAGE_CACHE_SIZE-1);
935  unsigned blocksize, iblock, length, pos;
936  struct buffer_head *bh;
937  struct page *page;
938  int err;
939 
940  page = find_or_create_page(mapping, index, GFP_NOFS);
941  if (!page)
942  return 0;
943 
944  blocksize = inode->i_sb->s_blocksize;
945  length = blocksize - (offset & (blocksize - 1));
946  iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
947 
948  if (!page_has_buffers(page))
949  create_empty_buffers(page, blocksize, 0);
950 
951  /* Find the buffer that contains "offset" */
952  bh = page_buffers(page);
953  pos = blocksize;
954  while (offset >= pos) {
955  bh = bh->b_this_page;
956  iblock++;
957  pos += blocksize;
958  }
959 
960  err = 0;
961 
962  if (!buffer_mapped(bh)) {
963  gfs2_block_map(inode, iblock, bh, 0);
964  /* unmapped? It's a hole - nothing to do */
965  if (!buffer_mapped(bh))
966  goto unlock;
967  }
968 
969  /* Ok, it's mapped. Make sure it's up-to-date */
970  if (PageUptodate(page))
971  set_buffer_uptodate(bh);
972 
973  if (!buffer_uptodate(bh)) {
974  err = -EIO;
975  ll_rw_block(READ, 1, &bh);
976  wait_on_buffer(bh);
977  /* Uhhuh. Read error. Complain and punt. */
978  if (!buffer_uptodate(bh))
979  goto unlock;
980  err = 0;
981  }
982 
983  if (!gfs2_is_writeback(ip))
984  gfs2_trans_add_bh(ip->i_gl, bh, 0);
985 
986  zero_user(page, offset, length);
987  mark_buffer_dirty(bh);
988 unlock:
989  unlock_page(page);
990  page_cache_release(page);
991  return err;
992 }
993 
994 static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
995 {
996  struct gfs2_inode *ip = GFS2_I(inode);
997  struct gfs2_sbd *sdp = GFS2_SB(inode);
998  struct address_space *mapping = inode->i_mapping;
999  struct buffer_head *dibh;
1000  int journaled = gfs2_is_jdata(ip);
1001  int error;
1002 
1003  error = gfs2_trans_begin(sdp,
1004  RES_DINODE + (journaled ? RES_JDATA : 0), 0);
1005  if (error)
1006  return error;
1007 
1008  error = gfs2_meta_inode_buffer(ip, &dibh);
1009  if (error)
1010  goto out;
1011 
1012  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1013 
1014  if (gfs2_is_stuffed(ip)) {
1015  gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1016  } else {
1017  if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1018  error = gfs2_block_truncate_page(mapping, newsize);
1019  if (error)
1020  goto out_brelse;
1021  }
1023  }
1024 
1025  i_size_write(inode, newsize);
1026  ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1027  gfs2_dinode_out(ip, dibh->b_data);
1028 
1029  truncate_pagecache(inode, oldsize, newsize);
1030 out_brelse:
1031  brelse(dibh);
1032 out:
1033  gfs2_trans_end(sdp);
1034  return error;
1035 }
1036 
1037 static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1038 {
1039  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1040  unsigned int height = ip->i_height;
1041  u64 lblock;
1042  struct metapath mp;
1043  int error;
1044 
1045  if (!size)
1046  lblock = 0;
1047  else
1048  lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1049 
1050  find_metapath(sdp, lblock, &mp, ip->i_height);
1051  error = gfs2_rindex_update(sdp);
1052  if (error)
1053  return error;
1054 
1056  if (error)
1057  return error;
1058 
1059  while (height--) {
1060  struct strip_mine sm;
1061  sm.sm_first = !!size;
1062  sm.sm_height = height;
1063 
1064  error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1065  if (error)
1066  break;
1067  }
1068 
1069  gfs2_quota_unhold(ip);
1070 
1071  return error;
1072 }
1073 
1074 static int trunc_end(struct gfs2_inode *ip)
1075 {
1076  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1077  struct buffer_head *dibh;
1078  int error;
1079 
1080  error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1081  if (error)
1082  return error;
1083 
1084  down_write(&ip->i_rw_mutex);
1085 
1086  error = gfs2_meta_inode_buffer(ip, &dibh);
1087  if (error)
1088  goto out;
1089 
1090  if (!i_size_read(&ip->i_inode)) {
1091  ip->i_height = 0;
1092  ip->i_goal = ip->i_no_addr;
1093  gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1094  }
1095  ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1097 
1098  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1099  gfs2_dinode_out(ip, dibh->b_data);
1100  brelse(dibh);
1101 
1102 out:
1103  up_write(&ip->i_rw_mutex);
1104  gfs2_trans_end(sdp);
1105  return error;
1106 }
1107 
1120 static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1121 {
1122  struct gfs2_inode *ip = GFS2_I(inode);
1123  int error;
1124 
1125  error = trunc_start(inode, oldsize, newsize);
1126  if (error < 0)
1127  return error;
1128  if (gfs2_is_stuffed(ip))
1129  return 0;
1130 
1131  error = trunc_dealloc(ip, newsize);
1132  if (error == 0)
1133  error = trunc_end(ip);
1134 
1135  return error;
1136 }
1137 
1138 void gfs2_trim_blocks(struct inode *inode)
1139 {
1140  u64 size = inode->i_size;
1141  int ret;
1142 
1143  ret = do_shrink(inode, size, size);
1144  WARN_ON(ret != 0);
1145 }
1146 
1167 static int do_grow(struct inode *inode, u64 size)
1168 {
1169  struct gfs2_inode *ip = GFS2_I(inode);
1170  struct gfs2_sbd *sdp = GFS2_SB(inode);
1171  struct buffer_head *dibh;
1172  int error;
1173  int unstuff = 0;
1174 
1175  if (gfs2_is_stuffed(ip) &&
1176  (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1177  error = gfs2_quota_lock_check(ip);
1178  if (error)
1179  return error;
1180 
1181  error = gfs2_inplace_reserve(ip, 1);
1182  if (error)
1183  goto do_grow_qunlock;
1184  unstuff = 1;
1185  }
1186 
1187  error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
1188  if (error)
1189  goto do_grow_release;
1190 
1191  if (unstuff) {
1192  error = gfs2_unstuff_dinode(ip, NULL);
1193  if (error)
1194  goto do_end_trans;
1195  }
1196 
1197  error = gfs2_meta_inode_buffer(ip, &dibh);
1198  if (error)
1199  goto do_end_trans;
1200 
1201  i_size_write(inode, size);
1202  ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1203  gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1204  gfs2_dinode_out(ip, dibh->b_data);
1205  brelse(dibh);
1206 
1207 do_end_trans:
1208  gfs2_trans_end(sdp);
1209 do_grow_release:
1210  if (unstuff) {
1212 do_grow_qunlock:
1213  gfs2_quota_unlock(ip);
1214  }
1215  return error;
1216 }
1217 
1230 int gfs2_setattr_size(struct inode *inode, u64 newsize)
1231 {
1232  int ret;
1233  u64 oldsize;
1234 
1235  BUG_ON(!S_ISREG(inode->i_mode));
1236 
1237  ret = inode_newsize_ok(inode, newsize);
1238  if (ret)
1239  return ret;
1240 
1241  inode_dio_wait(inode);
1242 
1243  oldsize = inode->i_size;
1244  if (newsize >= oldsize)
1245  return do_grow(inode, newsize);
1246 
1247  return do_shrink(inode, oldsize, newsize);
1248 }
1249 
1251 {
1252  int error;
1253  error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1254  if (!error)
1255  error = trunc_end(ip);
1256  return error;
1257 }
1258 
1260 {
1261  return trunc_dealloc(ip, 0);
1262 }
1263 
1274  unsigned int len)
1275 {
1276  struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1277  struct buffer_head bh;
1278  unsigned int shift;
1279  u64 lblock, lblock_stop, size;
1280  u64 end_of_file;
1281 
1282  if (!len)
1283  return 0;
1284 
1285  if (gfs2_is_stuffed(ip)) {
1286  if (offset + len >
1287  sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1288  return 1;
1289  return 0;
1290  }
1291 
1292  shift = sdp->sd_sb.sb_bsize_shift;
1293  BUG_ON(gfs2_is_dir(ip));
1294  end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1295  lblock = offset >> shift;
1296  lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1297  if (lblock_stop > end_of_file)
1298  return 1;
1299 
1300  size = (lblock_stop - lblock) << shift;
1301  do {
1302  bh.b_state = 0;
1303  bh.b_size = size;
1304  gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1305  if (!buffer_mapped(&bh))
1306  return 1;
1307  size -= bh.b_size;
1308  lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1309  } while(size > 0);
1310 
1311  return 0;
1312 }
1313