Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
indirect.c
Go to the documentation of this file.
1 /*
2  * linux/fs/ext4/indirect.c
3  *
4  * from
5  *
6  * linux/fs/ext4/inode.c
7  *
8  * Copyright (C) 1992, 1993, 1994, 1995
9  * Remy Card ([email protected])
10  * Laboratoire MASI - Institut Blaise Pascal
11  * Universite Pierre et Marie Curie (Paris VI)
12  *
13  * from
14  *
15  * linux/fs/minix/inode.c
16  *
17  * Copyright (C) 1991, 1992 Linus Torvalds
18  *
19  * Goal-directed block allocation by Stephen Tweedie
20  * ([email protected]), 1993, 1998
21  */
22 
23 #include "ext4_jbd2.h"
24 #include "truncate.h"
25 
26 #include <trace/events/ext4.h>
27 
28 typedef struct {
29  __le32 *p;
30  __le32 key;
31  struct buffer_head *bh;
32 } Indirect;
33 
34 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
35 {
36  p->key = *(p->p = v);
37  p->bh = bh;
38 }
39 
61 /*
62  * Portability note: the last comparison (check that we fit into triple
63  * indirect block) is spelled differently, because otherwise on an
64  * architecture with 32-bit longs and 8Kb pages we might get into trouble
65  * if our filesystem had 8Kb blocks. We might use long long, but that would
66  * kill us on x86. Oh, well, at least the sign propagation does not matter -
67  * i_block would have to be negative in the very beginning, so we would not
68  * get there at all.
69  */
70 
71 static int ext4_block_to_path(struct inode *inode,
72  ext4_lblk_t i_block,
73  ext4_lblk_t offsets[4], int *boundary)
74 {
75  int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
76  int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
77  const long direct_blocks = EXT4_NDIR_BLOCKS,
78  indirect_blocks = ptrs,
79  double_blocks = (1 << (ptrs_bits * 2));
80  int n = 0;
81  int final = 0;
82 
83  if (i_block < direct_blocks) {
84  offsets[n++] = i_block;
85  final = direct_blocks;
86  } else if ((i_block -= direct_blocks) < indirect_blocks) {
87  offsets[n++] = EXT4_IND_BLOCK;
88  offsets[n++] = i_block;
89  final = ptrs;
90  } else if ((i_block -= indirect_blocks) < double_blocks) {
91  offsets[n++] = EXT4_DIND_BLOCK;
92  offsets[n++] = i_block >> ptrs_bits;
93  offsets[n++] = i_block & (ptrs - 1);
94  final = ptrs;
95  } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
96  offsets[n++] = EXT4_TIND_BLOCK;
97  offsets[n++] = i_block >> (ptrs_bits * 2);
98  offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
99  offsets[n++] = i_block & (ptrs - 1);
100  final = ptrs;
101  } else {
102  ext4_warning(inode->i_sb, "block %lu > max in inode %lu",
103  i_block + direct_blocks +
104  indirect_blocks + double_blocks, inode->i_ino);
105  }
106  if (boundary)
107  *boundary = final - 1 - (i_block & (ptrs - 1));
108  return n;
109 }
110 
141 static Indirect *ext4_get_branch(struct inode *inode, int depth,
142  ext4_lblk_t *offsets,
143  Indirect chain[4], int *err)
144 {
145  struct super_block *sb = inode->i_sb;
146  Indirect *p = chain;
147  struct buffer_head *bh;
148 
149  *err = 0;
150  /* i_data is not going away, no lock needed */
151  add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
152  if (!p->key)
153  goto no_block;
154  while (--depth) {
155  bh = sb_getblk(sb, le32_to_cpu(p->key));
156  if (unlikely(!bh))
157  goto failure;
158 
159  if (!bh_uptodate_or_lock(bh)) {
160  if (bh_submit_read(bh) < 0) {
161  put_bh(bh);
162  goto failure;
163  }
164  /* validate block references */
165  if (ext4_check_indirect_blockref(inode, bh)) {
166  put_bh(bh);
167  goto failure;
168  }
169  }
170 
171  add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
172  /* Reader: end */
173  if (!p->key)
174  goto no_block;
175  }
176  return NULL;
177 
178 failure:
179  *err = -EIO;
180 no_block:
181  return p;
182 }
183 
204 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
205 {
206  struct ext4_inode_info *ei = EXT4_I(inode);
207  __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
208  __le32 *p;
209 
210  /* Try to find previous block */
211  for (p = ind->p - 1; p >= start; p--) {
212  if (*p)
213  return le32_to_cpu(*p);
214  }
215 
216  /* No such thing, so let's try location of indirect block */
217  if (ind->bh)
218  return ind->bh->b_blocknr;
219 
220  /*
221  * It is going to be referred to from the inode itself? OK, just put it
222  * into the same cylinder group then.
223  */
224  return ext4_inode_to_goal_block(inode);
225 }
226 
238 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
239  Indirect *partial)
240 {
241  ext4_fsblk_t goal;
242 
243  /*
244  * XXX need to get goal block from mballoc's data structures
245  */
246 
247  goal = ext4_find_near(inode, partial);
248  goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
249  return goal;
250 }
251 
264 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
265  int blocks_to_boundary)
266 {
267  unsigned int count = 0;
268 
269  /*
270  * Simple case, [t,d]Indirect block(s) has not allocated yet
271  * then it's clear blocks on that path have not allocated
272  */
273  if (k > 0) {
274  /* right now we don't handle cross boundary allocation */
275  if (blks < blocks_to_boundary + 1)
276  count += blks;
277  else
278  count += blocks_to_boundary + 1;
279  return count;
280  }
281 
282  count++;
283  while (count < blks && count <= blocks_to_boundary &&
284  le32_to_cpu(*(branch[0].p + count)) == 0) {
285  count++;
286  }
287  return count;
288 }
289 
306 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
307  ext4_lblk_t iblock, ext4_fsblk_t goal,
308  int indirect_blks, int blks,
309  ext4_fsblk_t new_blocks[4], int *err)
310 {
312  int target, i;
313  unsigned long count = 0, blk_allocated = 0;
314  int index = 0;
315  ext4_fsblk_t current_block = 0;
316  int ret = 0;
317 
318  /*
319  * Here we try to allocate the requested multiple blocks at once,
320  * on a best-effort basis.
321  * To build a branch, we should allocate blocks for
322  * the indirect blocks(if not allocated yet), and at least
323  * the first direct block of this branch. That's the
324  * minimum number of blocks need to allocate(required)
325  */
326  /* first we try to allocate the indirect blocks */
327  target = indirect_blks;
328  while (target > 0) {
329  count = target;
330  /* allocating blocks for indirect blocks and direct blocks */
331  current_block = ext4_new_meta_blocks(handle, inode, goal,
332  0, &count, err);
333  if (*err)
334  goto failed_out;
335 
336  if (unlikely(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS)) {
337  EXT4_ERROR_INODE(inode,
338  "current_block %llu + count %lu > %d!",
339  current_block, count,
341  *err = -EIO;
342  goto failed_out;
343  }
344 
345  target -= count;
346  /* allocate blocks for indirect blocks */
347  while (index < indirect_blks && count) {
348  new_blocks[index++] = current_block++;
349  count--;
350  }
351  if (count > 0) {
352  /*
353  * save the new block number
354  * for the first direct block
355  */
356  new_blocks[index] = current_block;
357  printk(KERN_INFO "%s returned more blocks than "
358  "requested\n", __func__);
359  WARN_ON(1);
360  break;
361  }
362  }
363 
364  target = blks - count ;
365  blk_allocated = count;
366  if (!target)
367  goto allocated;
368  /* Now allocate data blocks */
369  memset(&ar, 0, sizeof(ar));
370  ar.inode = inode;
371  ar.goal = goal;
372  ar.len = target;
373  ar.logical = iblock;
374  if (S_ISREG(inode->i_mode))
375  /* enable in-core preallocation only for regular files */
376  ar.flags = EXT4_MB_HINT_DATA;
377 
378  current_block = ext4_mb_new_blocks(handle, &ar, err);
379  if (unlikely(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS)) {
380  EXT4_ERROR_INODE(inode,
381  "current_block %llu + ar.len %d > %d!",
382  current_block, ar.len,
384  *err = -EIO;
385  goto failed_out;
386  }
387 
388  if (*err && (target == blks)) {
389  /*
390  * if the allocation failed and we didn't allocate
391  * any blocks before
392  */
393  goto failed_out;
394  }
395  if (!*err) {
396  if (target == blks) {
397  /*
398  * save the new block number
399  * for the first direct block
400  */
401  new_blocks[index] = current_block;
402  }
403  blk_allocated += ar.len;
404  }
405 allocated:
406  /* total number of blocks allocated for direct blocks */
407  ret = blk_allocated;
408  *err = 0;
409  return ret;
410 failed_out:
411  for (i = 0; i < index; i++)
412  ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
413  return ret;
414 }
415 
443 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
444  ext4_lblk_t iblock, int indirect_blks,
445  int *blks, ext4_fsblk_t goal,
446  ext4_lblk_t *offsets, Indirect *branch)
447 {
448  int blocksize = inode->i_sb->s_blocksize;
449  int i, n = 0;
450  int err = 0;
451  struct buffer_head *bh;
452  int num;
453  ext4_fsblk_t new_blocks[4];
454  ext4_fsblk_t current_block;
455 
456  num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
457  *blks, new_blocks, &err);
458  if (err)
459  return err;
460 
461  branch[0].key = cpu_to_le32(new_blocks[0]);
462  /*
463  * metadata blocks and data blocks are allocated.
464  */
465  for (n = 1; n <= indirect_blks; n++) {
466  /*
467  * Get buffer_head for parent block, zero it out
468  * and set the pointer to new one, then send
469  * parent to disk.
470  */
471  bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
472  if (unlikely(!bh)) {
473  err = -EIO;
474  goto failed;
475  }
476 
477  branch[n].bh = bh;
478  lock_buffer(bh);
479  BUFFER_TRACE(bh, "call get_create_access");
480  err = ext4_journal_get_create_access(handle, bh);
481  if (err) {
482  /* Don't brelse(bh) here; it's done in
483  * ext4_journal_forget() below */
484  unlock_buffer(bh);
485  goto failed;
486  }
487 
488  memset(bh->b_data, 0, blocksize);
489  branch[n].p = (__le32 *) bh->b_data + offsets[n];
490  branch[n].key = cpu_to_le32(new_blocks[n]);
491  *branch[n].p = branch[n].key;
492  if (n == indirect_blks) {
493  current_block = new_blocks[n];
494  /*
495  * End of chain, update the last new metablock of
496  * the chain to point to the new allocated
497  * data blocks numbers
498  */
499  for (i = 1; i < num; i++)
500  *(branch[n].p + i) = cpu_to_le32(++current_block);
501  }
502  BUFFER_TRACE(bh, "marking uptodate");
503  set_buffer_uptodate(bh);
504  unlock_buffer(bh);
505 
506  BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
507  err = ext4_handle_dirty_metadata(handle, inode, bh);
508  if (err)
509  goto failed;
510  }
511  *blks = num;
512  return err;
513 failed:
514  /* Allocation failed, free what we already allocated */
515  ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
516  for (i = 1; i <= n ; i++) {
517  /*
518  * branch[i].bh is newly allocated, so there is no
519  * need to revoke the block, which is why we don't
520  * need to set EXT4_FREE_BLOCKS_METADATA.
521  */
522  ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
524  }
525  for (i = n+1; i < indirect_blks; i++)
526  ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
527 
528  ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
529 
530  return err;
531 }
532 
548 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
549  ext4_lblk_t block, Indirect *where, int num,
550  int blks)
551 {
552  int i;
553  int err = 0;
554  ext4_fsblk_t current_block;
555 
556  /*
557  * If we're splicing into a [td]indirect block (as opposed to the
558  * inode) then we need to get write access to the [td]indirect block
559  * before the splice.
560  */
561  if (where->bh) {
562  BUFFER_TRACE(where->bh, "get_write_access");
563  err = ext4_journal_get_write_access(handle, where->bh);
564  if (err)
565  goto err_out;
566  }
567  /* That's it */
568 
569  *where->p = where->key;
570 
571  /*
572  * Update the host buffer_head or inode to point to more just allocated
573  * direct blocks blocks
574  */
575  if (num == 0 && blks > 1) {
576  current_block = le32_to_cpu(where->key) + 1;
577  for (i = 1; i < blks; i++)
578  *(where->p + i) = cpu_to_le32(current_block++);
579  }
580 
581  /* We are done with atomic stuff, now do the rest of housekeeping */
582  /* had we spliced it onto indirect block? */
583  if (where->bh) {
584  /*
585  * If we spliced it onto an indirect block, we haven't
586  * altered the inode. Note however that if it is being spliced
587  * onto an indirect block at the very end of the file (the
588  * file is growing) then we *will* alter the inode to reflect
589  * the new i_size. But that is not done here - it is done in
590  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
591  */
592  jbd_debug(5, "splicing indirect only\n");
593  BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
594  err = ext4_handle_dirty_metadata(handle, inode, where->bh);
595  if (err)
596  goto err_out;
597  } else {
598  /*
599  * OK, we spliced it into the inode itself on a direct block.
600  */
601  ext4_mark_inode_dirty(handle, inode);
602  jbd_debug(5, "splicing direct\n");
603  }
604  return err;
605 
606 err_out:
607  for (i = 1; i <= num; i++) {
608  /*
609  * branch[i].bh is newly allocated, so there is no
610  * need to revoke the block, which is why we don't
611  * need to set EXT4_FREE_BLOCKS_METADATA.
612  */
613  ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
615  }
616  ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
617  blks, 0);
618 
619  return err;
620 }
621 
622 /*
623  * The ext4_ind_map_blocks() function handles non-extents inodes
624  * (i.e., using the traditional indirect/double-indirect i_blocks
625  * scheme) for ext4_map_blocks().
626  *
627  * Allocation strategy is simple: if we have to allocate something, we will
628  * have to go the whole way to leaf. So let's do it before attaching anything
629  * to tree, set linkage between the newborn blocks, write them if sync is
630  * required, recheck the path, free and repeat if check fails, otherwise
631  * set the last missing link (that will protect us from any truncate-generated
632  * removals - all blocks on the path are immune now) and possibly force the
633  * write on the parent block.
634  * That has a nice additional property: no special recovery from the failed
635  * allocations is needed - we simply release blocks and do not touch anything
636  * reachable from inode.
637  *
638  * `handle' can be NULL if create == 0.
639  *
640  * return > 0, # of blocks mapped or allocated.
641  * return = 0, if plain lookup failed.
642  * return < 0, error case.
643  *
644  * The ext4_ind_get_blocks() function should be called with
645  * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
646  * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
647  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
648  * blocks.
649  */
650 int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
651  struct ext4_map_blocks *map,
652  int flags)
653 {
654  int err = -EIO;
655  ext4_lblk_t offsets[4];
656  Indirect chain[4];
657  Indirect *partial;
658  ext4_fsblk_t goal;
659  int indirect_blks;
660  int blocks_to_boundary = 0;
661  int depth;
662  int count = 0;
663  ext4_fsblk_t first_block = 0;
664 
665  trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
666  J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
667  J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
668  depth = ext4_block_to_path(inode, map->m_lblk, offsets,
669  &blocks_to_boundary);
670 
671  if (depth == 0)
672  goto out;
673 
674  partial = ext4_get_branch(inode, depth, offsets, chain, &err);
675 
676  /* Simplest case - block found, no allocation needed */
677  if (!partial) {
678  first_block = le32_to_cpu(chain[depth - 1].key);
679  count++;
680  /*map more blocks*/
681  while (count < map->m_len && count <= blocks_to_boundary) {
683 
684  blk = le32_to_cpu(*(chain[depth-1].p + count));
685 
686  if (blk == first_block + count)
687  count++;
688  else
689  break;
690  }
691  goto got_it;
692  }
693 
694  /* Next simple case - plain lookup or failed read of indirect block */
695  if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
696  goto cleanup;
697 
698  /*
699  * Okay, we need to do block allocation.
700  */
701  if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
703  EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
704  "non-extent mapped inodes with bigalloc");
705  return -ENOSPC;
706  }
707 
708  goal = ext4_find_goal(inode, map->m_lblk, partial);
709 
710  /* the number of blocks need to allocate for [d,t]indirect blocks */
711  indirect_blks = (chain + depth) - partial - 1;
712 
713  /*
714  * Next look up the indirect map to count the totoal number of
715  * direct blocks to allocate for this branch.
716  */
717  count = ext4_blks_to_allocate(partial, indirect_blks,
718  map->m_len, blocks_to_boundary);
719  /*
720  * Block out ext4_truncate while we alter the tree
721  */
722  err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
723  &count, goal,
724  offsets + (partial - chain), partial);
725 
726  /*
727  * The ext4_splice_branch call will free and forget any buffers
728  * on the new chain if there is a failure, but that risks using
729  * up transaction credits, especially for bitmaps where the
730  * credits cannot be returned. Can we handle this somehow? We
731  * may need to return -EAGAIN upwards in the worst case. --sct
732  */
733  if (!err)
734  err = ext4_splice_branch(handle, inode, map->m_lblk,
735  partial, indirect_blks, count);
736  if (err)
737  goto cleanup;
738 
739  map->m_flags |= EXT4_MAP_NEW;
740 
741  ext4_update_inode_fsync_trans(handle, inode, 1);
742 got_it:
743  map->m_flags |= EXT4_MAP_MAPPED;
744  map->m_pblk = le32_to_cpu(chain[depth-1].key);
745  map->m_len = count;
746  if (count > blocks_to_boundary)
747  map->m_flags |= EXT4_MAP_BOUNDARY;
748  err = count;
749  /* Clean up and exit */
750  partial = chain + depth - 1; /* the whole chain */
751 cleanup:
752  while (partial > chain) {
753  BUFFER_TRACE(partial->bh, "call brelse");
754  brelse(partial->bh);
755  partial--;
756  }
757 out:
758  trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
759  map->m_pblk, map->m_len, err);
760  return err;
761 }
762 
763 /*
764  * O_DIRECT for ext3 (or indirect map) based files
765  *
766  * If the O_DIRECT write will extend the file then add this inode to the
767  * orphan list. So recovery will truncate it back to the original size
768  * if the machine crashes during the write.
769  *
770  * If the O_DIRECT write is intantiating holes inside i_size and the machine
771  * crashes then stale disk data _may_ be exposed inside the file. But current
772  * VFS code falls back into buffered path in that case so we are safe.
773  */
775  const struct iovec *iov, loff_t offset,
776  unsigned long nr_segs)
777 {
778  struct file *file = iocb->ki_filp;
779  struct inode *inode = file->f_mapping->host;
780  struct ext4_inode_info *ei = EXT4_I(inode);
781  handle_t *handle;
782  ssize_t ret;
783  int orphan = 0;
784  size_t count = iov_length(iov, nr_segs);
785  int retries = 0;
786 
787  if (rw == WRITE) {
788  loff_t final_size = offset + count;
789 
790  if (final_size > inode->i_size) {
791  /* Credits for sb + inode write */
792  handle = ext4_journal_start(inode, 2);
793  if (IS_ERR(handle)) {
794  ret = PTR_ERR(handle);
795  goto out;
796  }
797  ret = ext4_orphan_add(handle, inode);
798  if (ret) {
799  ext4_journal_stop(handle);
800  goto out;
801  }
802  orphan = 1;
803  ei->i_disksize = inode->i_size;
804  ext4_journal_stop(handle);
805  }
806  }
807 
808 retry:
809  if (rw == READ && ext4_should_dioread_nolock(inode)) {
810  if (unlikely(atomic_read(&EXT4_I(inode)->i_unwritten))) {
811  mutex_lock(&inode->i_mutex);
813  mutex_unlock(&inode->i_mutex);
814  }
815  /*
816  * Nolock dioread optimization may be dynamically disabled
817  * via ext4_inode_block_unlocked_dio(). Check inode's state
818  * while holding extra i_dio_count ref.
819  */
820  atomic_inc(&inode->i_dio_count);
821  smp_mb();
822  if (unlikely(ext4_test_inode_state(inode,
823  EXT4_STATE_DIOREAD_LOCK))) {
824  inode_dio_done(inode);
825  goto locked;
826  }
827  ret = __blockdev_direct_IO(rw, iocb, inode,
828  inode->i_sb->s_bdev, iov,
829  offset, nr_segs,
830  ext4_get_block, NULL, NULL, 0);
831  inode_dio_done(inode);
832  } else {
833 locked:
834  ret = blockdev_direct_IO(rw, iocb, inode, iov,
835  offset, nr_segs, ext4_get_block);
836 
837  if (unlikely((rw & WRITE) && ret < 0)) {
838  loff_t isize = i_size_read(inode);
839  loff_t end = offset + iov_length(iov, nr_segs);
840 
841  if (end > isize)
842  ext4_truncate_failed_write(inode);
843  }
844  }
845  if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
846  goto retry;
847 
848  if (orphan) {
849  int err;
850 
851  /* Credits for sb + inode write */
852  handle = ext4_journal_start(inode, 2);
853  if (IS_ERR(handle)) {
854  /* This is really bad luck. We've written the data
855  * but cannot extend i_size. Bail out and pretend
856  * the write failed... */
857  ret = PTR_ERR(handle);
858  if (inode->i_nlink)
859  ext4_orphan_del(NULL, inode);
860 
861  goto out;
862  }
863  if (inode->i_nlink)
864  ext4_orphan_del(handle, inode);
865  if (ret > 0) {
866  loff_t end = offset + ret;
867  if (end > inode->i_size) {
868  ei->i_disksize = end;
869  i_size_write(inode, end);
870  /*
871  * We're going to return a positive `ret'
872  * here due to non-zero-length I/O, so there's
873  * no way of reporting error returns from
874  * ext4_mark_inode_dirty() to userspace. So
875  * ignore it.
876  */
877  ext4_mark_inode_dirty(handle, inode);
878  }
879  }
880  err = ext4_journal_stop(handle);
881  if (ret == 0)
882  ret = err;
883  }
884 out:
885  return ret;
886 }
887 
888 /*
889  * Calculate the number of metadata blocks need to reserve
890  * to allocate a new block at @lblocks for non extent file based file
891  */
892 int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock)
893 {
894  struct ext4_inode_info *ei = EXT4_I(inode);
895  sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
896  int blk_bits;
897 
898  if (lblock < EXT4_NDIR_BLOCKS)
899  return 0;
900 
901  lblock -= EXT4_NDIR_BLOCKS;
902 
903  if (ei->i_da_metadata_calc_len &&
904  (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
906  return 0;
907  }
908  ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
909  ei->i_da_metadata_calc_len = 1;
910  blk_bits = order_base_2(lblock);
911  return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
912 }
913 
914 int ext4_ind_trans_blocks(struct inode *inode, int nrblocks, int chunk)
915 {
916  int indirects;
917 
918  /* if nrblocks are contiguous */
919  if (chunk) {
920  /*
921  * With N contiguous data blocks, we need at most
922  * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
923  * 2 dindirect blocks, and 1 tindirect block
924  */
925  return DIV_ROUND_UP(nrblocks,
926  EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
927  }
928  /*
929  * if nrblocks are not contiguous, worse case, each block touch
930  * a indirect block, and each indirect block touch a double indirect
931  * block, plus a triple indirect block
932  */
933  indirects = nrblocks * 2 + 1;
934  return indirects;
935 }
936 
937 /*
938  * Truncate transactions can be complex and absolutely huge. So we need to
939  * be able to restart the transaction at a conventient checkpoint to make
940  * sure we don't overflow the journal.
941  *
942  * start_transaction gets us a new handle for a truncate transaction,
943  * and extend_transaction tries to extend the existing one a bit. If
944  * extend fails, we need to propagate the failure up and restart the
945  * transaction in the top-level truncate loop. --sct
946  */
947 static handle_t *start_transaction(struct inode *inode)
948 {
949  handle_t *result;
950 
951  result = ext4_journal_start(inode, ext4_blocks_for_truncate(inode));
952  if (!IS_ERR(result))
953  return result;
954 
955  ext4_std_error(inode->i_sb, PTR_ERR(result));
956  return result;
957 }
958 
959 /*
960  * Try to extend this transaction for the purposes of truncation.
961  *
962  * Returns 0 if we managed to create more room. If we can't create more
963  * room, and the transaction must be restarted we return 1.
964  */
965 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
966 {
967  if (!ext4_handle_valid(handle))
968  return 0;
969  if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
970  return 0;
971  if (!ext4_journal_extend(handle, ext4_blocks_for_truncate(inode)))
972  return 0;
973  return 1;
974 }
975 
976 /*
977  * Probably it should be a library function... search for first non-zero word
978  * or memcmp with zero_page, whatever is better for particular architecture.
979  * Linus?
980  */
981 static inline int all_zeroes(__le32 *p, __le32 *q)
982 {
983  while (p < q)
984  if (*p++)
985  return 0;
986  return 1;
987 }
988 
1024 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1025  ext4_lblk_t offsets[4], Indirect chain[4],
1026  __le32 *top)
1027 {
1028  Indirect *partial, *p;
1029  int k, err;
1030 
1031  *top = 0;
1032  /* Make k index the deepest non-null offset + 1 */
1033  for (k = depth; k > 1 && !offsets[k-1]; k--)
1034  ;
1035  partial = ext4_get_branch(inode, k, offsets, chain, &err);
1036  /* Writer: pointers */
1037  if (!partial)
1038  partial = chain + k-1;
1039  /*
1040  * If the branch acquired continuation since we've looked at it -
1041  * fine, it should all survive and (new) top doesn't belong to us.
1042  */
1043  if (!partial->key && *partial->p)
1044  /* Writer: end */
1045  goto no_top;
1046  for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
1047  ;
1048  /*
1049  * OK, we've found the last block that must survive. The rest of our
1050  * branch should be detached before unlocking. However, if that rest
1051  * of branch is all ours and does not grow immediately from the inode
1052  * it's easier to cheat and just decrement partial->p.
1053  */
1054  if (p == chain + k - 1 && p > chain) {
1055  p->p--;
1056  } else {
1057  *top = *p->p;
1058  /* Nope, don't do this in ext4. Must leave the tree intact */
1059 #if 0
1060  *p->p = 0;
1061 #endif
1062  }
1063  /* Writer: end */
1064 
1065  while (partial > p) {
1066  brelse(partial->bh);
1067  partial--;
1068  }
1069 no_top:
1070  return partial;
1071 }
1072 
1073 /*
1074  * Zero a number of block pointers in either an inode or an indirect block.
1075  * If we restart the transaction we must again get write access to the
1076  * indirect block for further modification.
1077  *
1078  * We release `count' blocks on disk, but (last - first) may be greater
1079  * than `count' because there can be holes in there.
1080  *
1081  * Return 0 on success, 1 on invalid block range
1082  * and < 0 on fatal error.
1083  */
1084 static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
1085  struct buffer_head *bh,
1086  ext4_fsblk_t block_to_free,
1087  unsigned long count, __le32 *first,
1088  __le32 *last)
1089 {
1090  __le32 *p;
1092  int err;
1093 
1094  if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1095  flags |= EXT4_FREE_BLOCKS_METADATA;
1096 
1097  if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
1098  count)) {
1099  EXT4_ERROR_INODE(inode, "attempt to clear invalid "
1100  "blocks %llu len %lu",
1101  (unsigned long long) block_to_free, count);
1102  return 1;
1103  }
1104 
1105  if (try_to_extend_transaction(handle, inode)) {
1106  if (bh) {
1107  BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1108  err = ext4_handle_dirty_metadata(handle, inode, bh);
1109  if (unlikely(err))
1110  goto out_err;
1111  }
1112  err = ext4_mark_inode_dirty(handle, inode);
1113  if (unlikely(err))
1114  goto out_err;
1115  err = ext4_truncate_restart_trans(handle, inode,
1116  ext4_blocks_for_truncate(inode));
1117  if (unlikely(err))
1118  goto out_err;
1119  if (bh) {
1120  BUFFER_TRACE(bh, "retaking write access");
1121  err = ext4_journal_get_write_access(handle, bh);
1122  if (unlikely(err))
1123  goto out_err;
1124  }
1125  }
1126 
1127  for (p = first; p < last; p++)
1128  *p = 0;
1129 
1130  ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
1131  return 0;
1132 out_err:
1133  ext4_std_error(inode->i_sb, err);
1134  return err;
1135 }
1136 
1156 static void ext4_free_data(handle_t *handle, struct inode *inode,
1157  struct buffer_head *this_bh,
1158  __le32 *first, __le32 *last)
1159 {
1160  ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
1161  unsigned long count = 0; /* Number of blocks in the run */
1162  __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
1163  corresponding to
1164  block_to_free */
1165  ext4_fsblk_t nr; /* Current block # */
1166  __le32 *p; /* Pointer into inode/ind
1167  for current block */
1168  int err = 0;
1169 
1170  if (this_bh) { /* For indirect block */
1171  BUFFER_TRACE(this_bh, "get_write_access");
1172  err = ext4_journal_get_write_access(handle, this_bh);
1173  /* Important: if we can't update the indirect pointers
1174  * to the blocks, we can't free them. */
1175  if (err)
1176  return;
1177  }
1178 
1179  for (p = first; p < last; p++) {
1180  nr = le32_to_cpu(*p);
1181  if (nr) {
1182  /* accumulate blocks to free if they're contiguous */
1183  if (count == 0) {
1184  block_to_free = nr;
1185  block_to_free_p = p;
1186  count = 1;
1187  } else if (nr == block_to_free + count) {
1188  count++;
1189  } else {
1190  err = ext4_clear_blocks(handle, inode, this_bh,
1191  block_to_free, count,
1192  block_to_free_p, p);
1193  if (err)
1194  break;
1195  block_to_free = nr;
1196  block_to_free_p = p;
1197  count = 1;
1198  }
1199  }
1200  }
1201 
1202  if (!err && count > 0)
1203  err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
1204  count, block_to_free_p, p);
1205  if (err < 0)
1206  /* fatal error */
1207  return;
1208 
1209  if (this_bh) {
1210  BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
1211 
1212  /*
1213  * The buffer head should have an attached journal head at this
1214  * point. However, if the data is corrupted and an indirect
1215  * block pointed to itself, it would have been detached when
1216  * the block was cleared. Check for this instead of OOPSing.
1217  */
1218  if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
1219  ext4_handle_dirty_metadata(handle, inode, this_bh);
1220  else
1221  EXT4_ERROR_INODE(inode,
1222  "circular indirect block detected at "
1223  "block %llu",
1224  (unsigned long long) this_bh->b_blocknr);
1225  }
1226 }
1227 
1241 static void ext4_free_branches(handle_t *handle, struct inode *inode,
1242  struct buffer_head *parent_bh,
1243  __le32 *first, __le32 *last, int depth)
1244 {
1245  ext4_fsblk_t nr;
1246  __le32 *p;
1247 
1248  if (ext4_handle_is_aborted(handle))
1249  return;
1250 
1251  if (depth--) {
1252  struct buffer_head *bh;
1253  int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1254  p = last;
1255  while (--p >= first) {
1256  nr = le32_to_cpu(*p);
1257  if (!nr)
1258  continue; /* A hole */
1259 
1260  if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
1261  nr, 1)) {
1262  EXT4_ERROR_INODE(inode,
1263  "invalid indirect mapped "
1264  "block %lu (level %d)",
1265  (unsigned long) nr, depth);
1266  break;
1267  }
1268 
1269  /* Go read the buffer for the next level down */
1270  bh = sb_bread(inode->i_sb, nr);
1271 
1272  /*
1273  * A read failure? Report error and clear slot
1274  * (should be rare).
1275  */
1276  if (!bh) {
1277  EXT4_ERROR_INODE_BLOCK(inode, nr,
1278  "Read failure");
1279  continue;
1280  }
1281 
1282  /* This zaps the entire block. Bottom up. */
1283  BUFFER_TRACE(bh, "free child branches");
1284  ext4_free_branches(handle, inode, bh,
1285  (__le32 *) bh->b_data,
1286  (__le32 *) bh->b_data + addr_per_block,
1287  depth);
1288  brelse(bh);
1289 
1290  /*
1291  * Everything below this this pointer has been
1292  * released. Now let this top-of-subtree go.
1293  *
1294  * We want the freeing of this indirect block to be
1295  * atomic in the journal with the updating of the
1296  * bitmap block which owns it. So make some room in
1297  * the journal.
1298  *
1299  * We zero the parent pointer *after* freeing its
1300  * pointee in the bitmaps, so if extend_transaction()
1301  * for some reason fails to put the bitmap changes and
1302  * the release into the same transaction, recovery
1303  * will merely complain about releasing a free block,
1304  * rather than leaking blocks.
1305  */
1306  if (ext4_handle_is_aborted(handle))
1307  return;
1308  if (try_to_extend_transaction(handle, inode)) {
1309  ext4_mark_inode_dirty(handle, inode);
1310  ext4_truncate_restart_trans(handle, inode,
1311  ext4_blocks_for_truncate(inode));
1312  }
1313 
1314  /*
1315  * The forget flag here is critical because if
1316  * we are journaling (and not doing data
1317  * journaling), we have to make sure a revoke
1318  * record is written to prevent the journal
1319  * replay from overwriting the (former)
1320  * indirect block if it gets reallocated as a
1321  * data block. This must happen in the same
1322  * transaction where the data blocks are
1323  * actually freed.
1324  */
1325  ext4_free_blocks(handle, inode, NULL, nr, 1,
1328 
1329  if (parent_bh) {
1330  /*
1331  * The block which we have just freed is
1332  * pointed to by an indirect block: journal it
1333  */
1334  BUFFER_TRACE(parent_bh, "get_write_access");
1335  if (!ext4_journal_get_write_access(handle,
1336  parent_bh)){
1337  *p = 0;
1338  BUFFER_TRACE(parent_bh,
1339  "call ext4_handle_dirty_metadata");
1341  inode,
1342  parent_bh);
1343  }
1344  }
1345  }
1346  } else {
1347  /* We have reached the bottom of the tree. */
1348  BUFFER_TRACE(parent_bh, "free data blocks");
1349  ext4_free_data(handle, inode, parent_bh, first, last);
1350  }
1351 }
1352 
1353 void ext4_ind_truncate(struct inode *inode)
1354 {
1355  handle_t *handle;
1356  struct ext4_inode_info *ei = EXT4_I(inode);
1357  __le32 *i_data = ei->i_data;
1358  int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
1359  struct address_space *mapping = inode->i_mapping;
1360  ext4_lblk_t offsets[4];
1361  Indirect chain[4];
1362  Indirect *partial;
1363  __le32 nr = 0;
1364  int n = 0;
1365  ext4_lblk_t last_block, max_block;
1366  loff_t page_len;
1367  unsigned blocksize = inode->i_sb->s_blocksize;
1368  int err;
1369 
1370  handle = start_transaction(inode);
1371  if (IS_ERR(handle))
1372  return; /* AKPM: return what? */
1373 
1374  last_block = (inode->i_size + blocksize-1)
1375  >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1376  max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
1377  >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
1378 
1379  if (inode->i_size % PAGE_CACHE_SIZE != 0) {
1380  page_len = PAGE_CACHE_SIZE -
1381  (inode->i_size & (PAGE_CACHE_SIZE - 1));
1382 
1383  err = ext4_discard_partial_page_buffers(handle,
1384  mapping, inode->i_size, page_len, 0);
1385 
1386  if (err)
1387  goto out_stop;
1388  }
1389 
1390  if (last_block != max_block) {
1391  n = ext4_block_to_path(inode, last_block, offsets, NULL);
1392  if (n == 0)
1393  goto out_stop; /* error */
1394  }
1395 
1396  /*
1397  * OK. This truncate is going to happen. We add the inode to the
1398  * orphan list, so that if this truncate spans multiple transactions,
1399  * and we crash, we will resume the truncate when the filesystem
1400  * recovers. It also marks the inode dirty, to catch the new size.
1401  *
1402  * Implication: the file must always be in a sane, consistent
1403  * truncatable state while each transaction commits.
1404  */
1405  if (ext4_orphan_add(handle, inode))
1406  goto out_stop;
1407 
1408  /*
1409  * From here we block out all ext4_get_block() callers who want to
1410  * modify the block allocation tree.
1411  */
1412  down_write(&ei->i_data_sem);
1413 
1415 
1416  /*
1417  * The orphan list entry will now protect us from any crash which
1418  * occurs before the truncate completes, so it is now safe to propagate
1419  * the new, shorter inode size (held for now in i_size) into the
1420  * on-disk inode. We do this via i_disksize, which is the value which
1421  * ext4 *really* writes onto the disk inode.
1422  */
1423  ei->i_disksize = inode->i_size;
1424 
1425  if (last_block == max_block) {
1426  /*
1427  * It is unnecessary to free any data blocks if last_block is
1428  * equal to the indirect block limit.
1429  */
1430  goto out_unlock;
1431  } else if (n == 1) { /* direct blocks */
1432  ext4_free_data(handle, inode, NULL, i_data+offsets[0],
1433  i_data + EXT4_NDIR_BLOCKS);
1434  goto do_indirects;
1435  }
1436 
1437  partial = ext4_find_shared(inode, n, offsets, chain, &nr);
1438  /* Kill the top of shared branch (not detached) */
1439  if (nr) {
1440  if (partial == chain) {
1441  /* Shared branch grows from the inode */
1442  ext4_free_branches(handle, inode, NULL,
1443  &nr, &nr+1, (chain+n-1) - partial);
1444  *partial->p = 0;
1445  /*
1446  * We mark the inode dirty prior to restart,
1447  * and prior to stop. No need for it here.
1448  */
1449  } else {
1450  /* Shared branch grows from an indirect block */
1451  BUFFER_TRACE(partial->bh, "get_write_access");
1452  ext4_free_branches(handle, inode, partial->bh,
1453  partial->p,
1454  partial->p+1, (chain+n-1) - partial);
1455  }
1456  }
1457  /* Clear the ends of indirect blocks on the shared branch */
1458  while (partial > chain) {
1459  ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
1460  (__le32*)partial->bh->b_data+addr_per_block,
1461  (chain+n-1) - partial);
1462  BUFFER_TRACE(partial->bh, "call brelse");
1463  brelse(partial->bh);
1464  partial--;
1465  }
1466 do_indirects:
1467  /* Kill the remaining (whole) subtrees */
1468  switch (offsets[0]) {
1469  default:
1470  nr = i_data[EXT4_IND_BLOCK];
1471  if (nr) {
1472  ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
1473  i_data[EXT4_IND_BLOCK] = 0;
1474  }
1475  case EXT4_IND_BLOCK:
1476  nr = i_data[EXT4_DIND_BLOCK];
1477  if (nr) {
1478  ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
1479  i_data[EXT4_DIND_BLOCK] = 0;
1480  }
1481  case EXT4_DIND_BLOCK:
1482  nr = i_data[EXT4_TIND_BLOCK];
1483  if (nr) {
1484  ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
1485  i_data[EXT4_TIND_BLOCK] = 0;
1486  }
1487  case EXT4_TIND_BLOCK:
1488  ;
1489  }
1490 
1491 out_unlock:
1492  up_write(&ei->i_data_sem);
1493  inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
1494  ext4_mark_inode_dirty(handle, inode);
1495 
1496  /*
1497  * In a multi-transaction truncate, we only make the final transaction
1498  * synchronous
1499  */
1500  if (IS_SYNC(inode))
1501  ext4_handle_sync(handle);
1502 out_stop:
1503  /*
1504  * If this was a simple ftruncate(), and the file will remain alive
1505  * then we need to clear up the orphan record which we created above.
1506  * However, if this was a real unlink then we were called by
1507  * ext4_delete_inode(), and we allow that function to clean up the
1508  * orphan info for us.
1509  */
1510  if (inode->i_nlink)
1511  ext4_orphan_del(handle, inode);
1512 
1513  ext4_journal_stop(handle);
1514  trace_ext4_truncate_exit(inode);
1515 }
1516