Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inode.c
Go to the documentation of this file.
1 /*
2  * linux/fs/ext3/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card ([email protected])
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  * from
10  *
11  * linux/fs/minix/inode.c
12  *
13  * Copyright (C) 1991, 1992 Linus Torvalds
14  *
15  * Goal-directed block allocation by Stephen Tweedie
16  * ([email protected]), 1993, 1998
17  * Big-endian to little-endian byte-swapping/bitmaps by
18  * David S. Miller ([email protected]), 1995
19  * 64-bit file support on 64-bit platforms by Jakub Jelinek
21  *
22  * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23  */
24 
25 #include <linux/highuid.h>
26 #include <linux/quotaops.h>
27 #include <linux/writeback.h>
28 #include <linux/mpage.h>
29 #include <linux/namei.h>
30 #include "ext3.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 static int ext3_writepage_trans_blocks(struct inode *inode);
35 static int ext3_block_truncate_page(struct inode *inode, loff_t from);
36 
37 /*
38  * Test whether an inode is a fast symlink.
39  */
40 static int ext3_inode_is_fast_symlink(struct inode *inode)
41 {
42  int ea_blocks = EXT3_I(inode)->i_file_acl ?
43  (inode->i_sb->s_blocksize >> 9) : 0;
44 
45  return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
46 }
47 
48 /*
49  * The ext3 forget function must perform a revoke if we are freeing data
50  * which has been journaled. Metadata (eg. indirect blocks) must be
51  * revoked in all cases.
52  *
53  * "bh" may be NULL: a metadata block may have been freed from memory
54  * but there may still be a record of it in the journal, and that record
55  * still needs to be revoked.
56  */
57 int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
58  struct buffer_head *bh, ext3_fsblk_t blocknr)
59 {
60  int err;
61 
62  might_sleep();
63 
64  trace_ext3_forget(inode, is_metadata, blocknr);
65  BUFFER_TRACE(bh, "enter");
66 
67  jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
68  "data mode %lx\n",
69  bh, is_metadata, inode->i_mode,
70  test_opt(inode->i_sb, DATA_FLAGS));
71 
72  /* Never use the revoke function if we are doing full data
73  * journaling: there is no need to, and a V1 superblock won't
74  * support it. Otherwise, only skip the revoke on un-journaled
75  * data blocks. */
76 
77  if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
78  (!is_metadata && !ext3_should_journal_data(inode))) {
79  if (bh) {
80  BUFFER_TRACE(bh, "call journal_forget");
81  return ext3_journal_forget(handle, bh);
82  }
83  return 0;
84  }
85 
86  /*
87  * data!=journal && (is_metadata || should_journal_data(inode))
88  */
89  BUFFER_TRACE(bh, "call ext3_journal_revoke");
90  err = ext3_journal_revoke(handle, blocknr, bh);
91  if (err)
92  ext3_abort(inode->i_sb, __func__,
93  "error %d when attempting revoke", err);
94  BUFFER_TRACE(bh, "exit");
95  return err;
96 }
97 
98 /*
99  * Work out how many blocks we need to proceed with the next chunk of a
100  * truncate transaction.
101  */
102 static unsigned long blocks_for_truncate(struct inode *inode)
103 {
104  unsigned long needed;
105 
106  needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
107 
108  /* Give ourselves just enough room to cope with inodes in which
109  * i_blocks is corrupt: we've seen disk corruptions in the past
110  * which resulted in random data in an inode which looked enough
111  * like a regular file for ext3 to try to delete it. Things
112  * will go a bit crazy if that happens, but at least we should
113  * try not to panic the whole kernel. */
114  if (needed < 2)
115  needed = 2;
116 
117  /* But we need to bound the transaction so we don't overflow the
118  * journal. */
119  if (needed > EXT3_MAX_TRANS_DATA)
120  needed = EXT3_MAX_TRANS_DATA;
121 
122  return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
123 }
124 
125 /*
126  * Truncate transactions can be complex and absolutely huge. So we need to
127  * be able to restart the transaction at a conventient checkpoint to make
128  * sure we don't overflow the journal.
129  *
130  * start_transaction gets us a new handle for a truncate transaction,
131  * and extend_transaction tries to extend the existing one a bit. If
132  * extend fails, we need to propagate the failure up and restart the
133  * transaction in the top-level truncate loop. --sct
134  */
135 static handle_t *start_transaction(struct inode *inode)
136 {
137  handle_t *result;
138 
139  result = ext3_journal_start(inode, blocks_for_truncate(inode));
140  if (!IS_ERR(result))
141  return result;
142 
143  ext3_std_error(inode->i_sb, PTR_ERR(result));
144  return result;
145 }
146 
147 /*
148  * Try to extend this transaction for the purposes of truncation.
149  *
150  * Returns 0 if we managed to create more room. If we can't create more
151  * room, and the transaction must be restarted we return 1.
152  */
153 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
154 {
155  if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
156  return 0;
157  if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
158  return 0;
159  return 1;
160 }
161 
162 /*
163  * Restart the transaction associated with *handle. This does a commit,
164  * so before we call here everything must be consistently dirtied against
165  * this transaction.
166  */
167 static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
168 {
169  int ret;
170 
171  jbd_debug(2, "restarting handle %p\n", handle);
172  /*
173  * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
174  * At this moment, get_block can be called only for blocks inside
175  * i_size since page cache has been already dropped and writes are
176  * blocked by i_mutex. So we can safely drop the truncate_mutex.
177  */
178  mutex_unlock(&EXT3_I(inode)->truncate_mutex);
179  ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
180  mutex_lock(&EXT3_I(inode)->truncate_mutex);
181  return ret;
182 }
183 
184 /*
185  * Called at inode eviction from icache
186  */
187 void ext3_evict_inode (struct inode *inode)
188 {
189  struct ext3_inode_info *ei = EXT3_I(inode);
190  struct ext3_block_alloc_info *rsv;
191  handle_t *handle;
192  int want_delete = 0;
193 
194  trace_ext3_evict_inode(inode);
195  if (!inode->i_nlink && !is_bad_inode(inode)) {
196  dquot_initialize(inode);
197  want_delete = 1;
198  }
199 
200  /*
201  * When journalling data dirty buffers are tracked only in the journal.
202  * So although mm thinks everything is clean and ready for reaping the
203  * inode might still have some pages to write in the running
204  * transaction or waiting to be checkpointed. Thus calling
205  * journal_invalidatepage() (via truncate_inode_pages()) to discard
206  * these buffers can cause data loss. Also even if we did not discard
207  * these buffers, we would have no way to find them after the inode
208  * is reaped and thus user could see stale data if he tries to read
209  * them before the transaction is checkpointed. So be careful and
210  * force everything to disk here... We use ei->i_datasync_tid to
211  * store the newest transaction containing inode's data.
212  *
213  * Note that directories do not have this problem because they don't
214  * use page cache.
215  *
216  * The s_journal check handles the case when ext3_get_journal() fails
217  * and puts the journal inode.
218  */
219  if (inode->i_nlink && ext3_should_journal_data(inode) &&
220  EXT3_SB(inode->i_sb)->s_journal &&
221  (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
222  tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
223  journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
224 
225  log_start_commit(journal, commit_tid);
226  log_wait_commit(journal, commit_tid);
228  }
229  truncate_inode_pages(&inode->i_data, 0);
230 
232  rsv = ei->i_block_alloc_info;
233  ei->i_block_alloc_info = NULL;
234  if (unlikely(rsv))
235  kfree(rsv);
236 
237  if (!want_delete)
238  goto no_delete;
239 
240  handle = start_transaction(inode);
241  if (IS_ERR(handle)) {
242  /*
243  * If we're going to skip the normal cleanup, we still need to
244  * make sure that the in-core orphan linked list is properly
245  * cleaned up.
246  */
247  ext3_orphan_del(NULL, inode);
248  goto no_delete;
249  }
250 
251  if (IS_SYNC(inode))
252  handle->h_sync = 1;
253  inode->i_size = 0;
254  if (inode->i_blocks)
255  ext3_truncate(inode);
256  /*
257  * Kill off the orphan record created when the inode lost the last
258  * link. Note that ext3_orphan_del() has to be able to cope with the
259  * deletion of a non-existent orphan - ext3_truncate() could
260  * have removed the record.
261  */
262  ext3_orphan_del(handle, inode);
263  ei->i_dtime = get_seconds();
264 
265  /*
266  * One subtle ordering requirement: if anything has gone wrong
267  * (transaction abort, IO errors, whatever), then we can still
268  * do these next steps (the fs will already have been marked as
269  * having errors), but we can't free the inode if the mark_dirty
270  * fails.
271  */
272  if (ext3_mark_inode_dirty(handle, inode)) {
273  /* If that failed, just dquot_drop() and be done with that */
274  dquot_drop(inode);
275  clear_inode(inode);
276  } else {
277  ext3_xattr_delete_inode(handle, inode);
278  dquot_free_inode(inode);
279  dquot_drop(inode);
280  clear_inode(inode);
281  ext3_free_inode(handle, inode);
282  }
283  ext3_journal_stop(handle);
284  return;
285 no_delete:
286  clear_inode(inode);
287  dquot_drop(inode);
288 }
289 
290 typedef struct {
291  __le32 *p;
292  __le32 key;
293  struct buffer_head *bh;
294 } Indirect;
295 
296 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
297 {
298  p->key = *(p->p = v);
299  p->bh = bh;
300 }
301 
302 static int verify_chain(Indirect *from, Indirect *to)
303 {
304  while (from <= to && from->key == *from->p)
305  from++;
306  return (from > to);
307 }
308 
330 /*
331  * Portability note: the last comparison (check that we fit into triple
332  * indirect block) is spelled differently, because otherwise on an
333  * architecture with 32-bit longs and 8Kb pages we might get into trouble
334  * if our filesystem had 8Kb blocks. We might use long long, but that would
335  * kill us on x86. Oh, well, at least the sign propagation does not matter -
336  * i_block would have to be negative in the very beginning, so we would not
337  * get there at all.
338  */
339 
340 static int ext3_block_to_path(struct inode *inode,
341  long i_block, int offsets[4], int *boundary)
342 {
343  int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
344  int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
345  const long direct_blocks = EXT3_NDIR_BLOCKS,
346  indirect_blocks = ptrs,
347  double_blocks = (1 << (ptrs_bits * 2));
348  int n = 0;
349  int final = 0;
350 
351  if (i_block < 0) {
352  ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
353  } else if (i_block < direct_blocks) {
354  offsets[n++] = i_block;
355  final = direct_blocks;
356  } else if ( (i_block -= direct_blocks) < indirect_blocks) {
357  offsets[n++] = EXT3_IND_BLOCK;
358  offsets[n++] = i_block;
359  final = ptrs;
360  } else if ((i_block -= indirect_blocks) < double_blocks) {
361  offsets[n++] = EXT3_DIND_BLOCK;
362  offsets[n++] = i_block >> ptrs_bits;
363  offsets[n++] = i_block & (ptrs - 1);
364  final = ptrs;
365  } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
366  offsets[n++] = EXT3_TIND_BLOCK;
367  offsets[n++] = i_block >> (ptrs_bits * 2);
368  offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
369  offsets[n++] = i_block & (ptrs - 1);
370  final = ptrs;
371  } else {
372  ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
373  }
374  if (boundary)
375  *boundary = final - 1 - (i_block & (ptrs - 1));
376  return n;
377 }
378 
408 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
409  Indirect chain[4], int *err)
410 {
411  struct super_block *sb = inode->i_sb;
412  Indirect *p = chain;
413  struct buffer_head *bh;
414 
415  *err = 0;
416  /* i_data is not going away, no lock needed */
417  add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
418  if (!p->key)
419  goto no_block;
420  while (--depth) {
421  bh = sb_bread(sb, le32_to_cpu(p->key));
422  if (!bh)
423  goto failure;
424  /* Reader: pointers */
425  if (!verify_chain(chain, p))
426  goto changed;
427  add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
428  /* Reader: end */
429  if (!p->key)
430  goto no_block;
431  }
432  return NULL;
433 
434 changed:
435  brelse(bh);
436  *err = -EAGAIN;
437  goto no_block;
438 failure:
439  *err = -EIO;
440 no_block:
441  return p;
442 }
443 
464 static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
465 {
466  struct ext3_inode_info *ei = EXT3_I(inode);
467  __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
468  __le32 *p;
469  ext3_fsblk_t bg_start;
470  ext3_grpblk_t colour;
471 
472  /* Try to find previous block */
473  for (p = ind->p - 1; p >= start; p--) {
474  if (*p)
475  return le32_to_cpu(*p);
476  }
477 
478  /* No such thing, so let's try location of indirect block */
479  if (ind->bh)
480  return ind->bh->b_blocknr;
481 
482  /*
483  * It is going to be referred to from the inode itself? OK, just put it
484  * into the same cylinder group then.
485  */
486  bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
487  colour = (current->pid % 16) *
488  (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
489  return bg_start + colour;
490 }
491 
502 static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
503  Indirect *partial)
504 {
505  struct ext3_block_alloc_info *block_i;
506 
507  block_i = EXT3_I(inode)->i_block_alloc_info;
508 
509  /*
510  * try the heuristic for sequential allocation,
511  * failing that at least try to get decent locality.
512  */
513  if (block_i && (block == block_i->last_alloc_logical_block + 1)
514  && (block_i->last_alloc_physical_block != 0)) {
515  return block_i->last_alloc_physical_block + 1;
516  }
517 
518  return ext3_find_near(inode, partial);
519 }
520 
533 static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
534  int blocks_to_boundary)
535 {
536  unsigned long count = 0;
537 
538  /*
539  * Simple case, [t,d]Indirect block(s) has not allocated yet
540  * then it's clear blocks on that path have not allocated
541  */
542  if (k > 0) {
543  /* right now we don't handle cross boundary allocation */
544  if (blks < blocks_to_boundary + 1)
545  count += blks;
546  else
547  count += blocks_to_boundary + 1;
548  return count;
549  }
550 
551  count++;
552  while (count < blks && count <= blocks_to_boundary &&
553  le32_to_cpu(*(branch[0].p + count)) == 0) {
554  count++;
555  }
556  return count;
557 }
558 
573 static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
574  ext3_fsblk_t goal, int indirect_blks, int blks,
575  ext3_fsblk_t new_blocks[4], int *err)
576 {
577  int target, i;
578  unsigned long count = 0;
579  int index = 0;
580  ext3_fsblk_t current_block = 0;
581  int ret = 0;
582 
583  /*
584  * Here we try to allocate the requested multiple blocks at once,
585  * on a best-effort basis.
586  * To build a branch, we should allocate blocks for
587  * the indirect blocks(if not allocated yet), and at least
588  * the first direct block of this branch. That's the
589  * minimum number of blocks need to allocate(required)
590  */
591  target = blks + indirect_blks;
592 
593  while (1) {
594  count = target;
595  /* allocating blocks for indirect blocks and direct blocks */
596  current_block = ext3_new_blocks(handle,inode,goal,&count,err);
597  if (*err)
598  goto failed_out;
599 
600  target -= count;
601  /* allocate blocks for indirect blocks */
602  while (index < indirect_blks && count) {
603  new_blocks[index++] = current_block++;
604  count--;
605  }
606 
607  if (count > 0)
608  break;
609  }
610 
611  /* save the new block number for the first direct block */
612  new_blocks[index] = current_block;
613 
614  /* total number of blocks allocated for direct blocks */
615  ret = count;
616  *err = 0;
617  return ret;
618 failed_out:
619  for (i = 0; i <index; i++)
620  ext3_free_blocks(handle, inode, new_blocks[i], 1);
621  return ret;
622 }
623 
651 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
652  int indirect_blks, int *blks, ext3_fsblk_t goal,
653  int *offsets, Indirect *branch)
654 {
655  int blocksize = inode->i_sb->s_blocksize;
656  int i, n = 0;
657  int err = 0;
658  struct buffer_head *bh;
659  int num;
660  ext3_fsblk_t new_blocks[4];
661  ext3_fsblk_t current_block;
662 
663  num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
664  *blks, new_blocks, &err);
665  if (err)
666  return err;
667 
668  branch[0].key = cpu_to_le32(new_blocks[0]);
669  /*
670  * metadata blocks and data blocks are allocated.
671  */
672  for (n = 1; n <= indirect_blks; n++) {
673  /*
674  * Get buffer_head for parent block, zero it out
675  * and set the pointer to new one, then send
676  * parent to disk.
677  */
678  bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
679  branch[n].bh = bh;
680  lock_buffer(bh);
681  BUFFER_TRACE(bh, "call get_create_access");
682  err = ext3_journal_get_create_access(handle, bh);
683  if (err) {
684  unlock_buffer(bh);
685  brelse(bh);
686  goto failed;
687  }
688 
689  memset(bh->b_data, 0, blocksize);
690  branch[n].p = (__le32 *) bh->b_data + offsets[n];
691  branch[n].key = cpu_to_le32(new_blocks[n]);
692  *branch[n].p = branch[n].key;
693  if ( n == indirect_blks) {
694  current_block = new_blocks[n];
695  /*
696  * End of chain, update the last new metablock of
697  * the chain to point to the new allocated
698  * data blocks numbers
699  */
700  for (i=1; i < num; i++)
701  *(branch[n].p + i) = cpu_to_le32(++current_block);
702  }
703  BUFFER_TRACE(bh, "marking uptodate");
704  set_buffer_uptodate(bh);
705  unlock_buffer(bh);
706 
707  BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
708  err = ext3_journal_dirty_metadata(handle, bh);
709  if (err)
710  goto failed;
711  }
712  *blks = num;
713  return err;
714 failed:
715  /* Allocation failed, free what we already allocated */
716  for (i = 1; i <= n ; i++) {
717  BUFFER_TRACE(branch[i].bh, "call journal_forget");
718  ext3_journal_forget(handle, branch[i].bh);
719  }
720  for (i = 0; i <indirect_blks; i++)
721  ext3_free_blocks(handle, inode, new_blocks[i], 1);
722 
723  ext3_free_blocks(handle, inode, new_blocks[i], num);
724 
725  return err;
726 }
727 
741 static int ext3_splice_branch(handle_t *handle, struct inode *inode,
742  long block, Indirect *where, int num, int blks)
743 {
744  int i;
745  int err = 0;
746  struct ext3_block_alloc_info *block_i;
747  ext3_fsblk_t current_block;
748  struct ext3_inode_info *ei = EXT3_I(inode);
749  struct timespec now;
750 
751  block_i = ei->i_block_alloc_info;
752  /*
753  * If we're splicing into a [td]indirect block (as opposed to the
754  * inode) then we need to get write access to the [td]indirect block
755  * before the splice.
756  */
757  if (where->bh) {
758  BUFFER_TRACE(where->bh, "get_write_access");
759  err = ext3_journal_get_write_access(handle, where->bh);
760  if (err)
761  goto err_out;
762  }
763  /* That's it */
764 
765  *where->p = where->key;
766 
767  /*
768  * Update the host buffer_head or inode to point to more just allocated
769  * direct blocks blocks
770  */
771  if (num == 0 && blks > 1) {
772  current_block = le32_to_cpu(where->key) + 1;
773  for (i = 1; i < blks; i++)
774  *(where->p + i ) = cpu_to_le32(current_block++);
775  }
776 
777  /*
778  * update the most recently allocated logical & physical block
779  * in i_block_alloc_info, to assist find the proper goal block for next
780  * allocation
781  */
782  if (block_i) {
783  block_i->last_alloc_logical_block = block + blks - 1;
784  block_i->last_alloc_physical_block =
785  le32_to_cpu(where[num].key) + blks - 1;
786  }
787 
788  /* We are done with atomic stuff, now do the rest of housekeeping */
789  now = CURRENT_TIME_SEC;
790  if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
791  inode->i_ctime = now;
792  ext3_mark_inode_dirty(handle, inode);
793  }
794  /* ext3_mark_inode_dirty already updated i_sync_tid */
795  atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
796 
797  /* had we spliced it onto indirect block? */
798  if (where->bh) {
799  /*
800  * If we spliced it onto an indirect block, we haven't
801  * altered the inode. Note however that if it is being spliced
802  * onto an indirect block at the very end of the file (the
803  * file is growing) then we *will* alter the inode to reflect
804  * the new i_size. But that is not done here - it is done in
805  * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
806  */
807  jbd_debug(5, "splicing indirect only\n");
808  BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
809  err = ext3_journal_dirty_metadata(handle, where->bh);
810  if (err)
811  goto err_out;
812  } else {
813  /*
814  * OK, we spliced it into the inode itself on a direct block.
815  * Inode was dirtied above.
816  */
817  jbd_debug(5, "splicing direct\n");
818  }
819  return err;
820 
821 err_out:
822  for (i = 1; i <= num; i++) {
823  BUFFER_TRACE(where[i].bh, "call journal_forget");
824  ext3_journal_forget(handle, where[i].bh);
825  ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
826  }
827  ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
828 
829  return err;
830 }
831 
832 /*
833  * Allocation strategy is simple: if we have to allocate something, we will
834  * have to go the whole way to leaf. So let's do it before attaching anything
835  * to tree, set linkage between the newborn blocks, write them if sync is
836  * required, recheck the path, free and repeat if check fails, otherwise
837  * set the last missing link (that will protect us from any truncate-generated
838  * removals - all blocks on the path are immune now) and possibly force the
839  * write on the parent block.
840  * That has a nice additional property: no special recovery from the failed
841  * allocations is needed - we simply release blocks and do not touch anything
842  * reachable from inode.
843  *
844  * `handle' can be NULL if create == 0.
845  *
846  * The BKL may not be held on entry here. Be sure to take it early.
847  * return > 0, # of blocks mapped or allocated.
848  * return = 0, if plain lookup failed.
849  * return < 0, error case.
850  */
851 int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
852  sector_t iblock, unsigned long maxblocks,
853  struct buffer_head *bh_result,
854  int create)
855 {
856  int err = -EIO;
857  int offsets[4];
858  Indirect chain[4];
859  Indirect *partial;
860  ext3_fsblk_t goal;
861  int indirect_blks;
862  int blocks_to_boundary = 0;
863  int depth;
864  struct ext3_inode_info *ei = EXT3_I(inode);
865  int count = 0;
866  ext3_fsblk_t first_block = 0;
867 
868 
869  trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
870  J_ASSERT(handle != NULL || create == 0);
871  depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
872 
873  if (depth == 0)
874  goto out;
875 
876  partial = ext3_get_branch(inode, depth, offsets, chain, &err);
877 
878  /* Simplest case - block found, no allocation needed */
879  if (!partial) {
880  first_block = le32_to_cpu(chain[depth - 1].key);
881  clear_buffer_new(bh_result);
882  count++;
883  /*map more blocks*/
884  while (count < maxblocks && count <= blocks_to_boundary) {
886 
887  if (!verify_chain(chain, chain + depth - 1)) {
888  /*
889  * Indirect block might be removed by
890  * truncate while we were reading it.
891  * Handling of that case: forget what we've
892  * got now. Flag the err as EAGAIN, so it
893  * will reread.
894  */
895  err = -EAGAIN;
896  count = 0;
897  break;
898  }
899  blk = le32_to_cpu(*(chain[depth-1].p + count));
900 
901  if (blk == first_block + count)
902  count++;
903  else
904  break;
905  }
906  if (err != -EAGAIN)
907  goto got_it;
908  }
909 
910  /* Next simple case - plain lookup or failed read of indirect block */
911  if (!create || err == -EIO)
912  goto cleanup;
913 
914  /*
915  * Block out ext3_truncate while we alter the tree
916  */
918 
919  /*
920  * If the indirect block is missing while we are reading
921  * the chain(ext3_get_branch() returns -EAGAIN err), or
922  * if the chain has been changed after we grab the semaphore,
923  * (either because another process truncated this branch, or
924  * another get_block allocated this branch) re-grab the chain to see if
925  * the request block has been allocated or not.
926  *
927  * Since we already block the truncate/other get_block
928  * at this point, we will have the current copy of the chain when we
929  * splice the branch into the tree.
930  */
931  if (err == -EAGAIN || !verify_chain(chain, partial)) {
932  while (partial > chain) {
933  brelse(partial->bh);
934  partial--;
935  }
936  partial = ext3_get_branch(inode, depth, offsets, chain, &err);
937  if (!partial) {
938  count++;
940  if (err)
941  goto cleanup;
942  clear_buffer_new(bh_result);
943  goto got_it;
944  }
945  }
946 
947  /*
948  * Okay, we need to do block allocation. Lazily initialize the block
949  * allocation info here if necessary
950  */
951  if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
953 
954  goal = ext3_find_goal(inode, iblock, partial);
955 
956  /* the number of blocks need to allocate for [d,t]indirect blocks */
957  indirect_blks = (chain + depth) - partial - 1;
958 
959  /*
960  * Next look up the indirect map to count the totoal number of
961  * direct blocks to allocate for this branch.
962  */
963  count = ext3_blks_to_allocate(partial, indirect_blks,
964  maxblocks, blocks_to_boundary);
965  err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
966  offsets + (partial - chain), partial);
967 
968  /*
969  * The ext3_splice_branch call will free and forget any buffers
970  * on the new chain if there is a failure, but that risks using
971  * up transaction credits, especially for bitmaps where the
972  * credits cannot be returned. Can we handle this somehow? We
973  * may need to return -EAGAIN upwards in the worst case. --sct
974  */
975  if (!err)
976  err = ext3_splice_branch(handle, inode, iblock,
977  partial, indirect_blks, count);
979  if (err)
980  goto cleanup;
981 
982  set_buffer_new(bh_result);
983 got_it:
984  map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
985  if (count > blocks_to_boundary)
986  set_buffer_boundary(bh_result);
987  err = count;
988  /* Clean up and exit */
989  partial = chain + depth - 1; /* the whole chain */
990 cleanup:
991  while (partial > chain) {
992  BUFFER_TRACE(partial->bh, "call brelse");
993  brelse(partial->bh);
994  partial--;
995  }
996  BUFFER_TRACE(bh_result, "returned");
997 out:
998  trace_ext3_get_blocks_exit(inode, iblock,
999  depth ? le32_to_cpu(chain[depth-1].key) : 0,
1000  count, err);
1001  return err;
1002 }
1003 
1004 /* Maximum number of blocks we map for direct IO at once. */
1005 #define DIO_MAX_BLOCKS 4096
1006 /*
1007  * Number of credits we need for writing DIO_MAX_BLOCKS:
1008  * We need sb + group descriptor + bitmap + inode -> 4
1009  * For B blocks with A block pointers per block we need:
1010  * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
1011  * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
1012  */
1013 #define DIO_CREDITS 25
1014 
1015 static int ext3_get_block(struct inode *inode, sector_t iblock,
1016  struct buffer_head *bh_result, int create)
1017 {
1018  handle_t *handle = ext3_journal_current_handle();
1019  int ret = 0, started = 0;
1020  unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1021 
1022  if (create && !handle) { /* Direct IO write... */
1023  if (max_blocks > DIO_MAX_BLOCKS)
1024  max_blocks = DIO_MAX_BLOCKS;
1025  handle = ext3_journal_start(inode, DIO_CREDITS +
1027  if (IS_ERR(handle)) {
1028  ret = PTR_ERR(handle);
1029  goto out;
1030  }
1031  started = 1;
1032  }
1033 
1034  ret = ext3_get_blocks_handle(handle, inode, iblock,
1035  max_blocks, bh_result, create);
1036  if (ret > 0) {
1037  bh_result->b_size = (ret << inode->i_blkbits);
1038  ret = 0;
1039  }
1040  if (started)
1041  ext3_journal_stop(handle);
1042 out:
1043  return ret;
1044 }
1045 
1046 int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1047  u64 start, u64 len)
1048 {
1049  return generic_block_fiemap(inode, fieinfo, start, len,
1050  ext3_get_block);
1051 }
1052 
1053 /*
1054  * `handle' can be NULL if create is zero
1055  */
1056 struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1057  long block, int create, int *errp)
1058 {
1059  struct buffer_head dummy;
1060  int fatal = 0, err;
1061 
1062  J_ASSERT(handle != NULL || create == 0);
1063 
1064  dummy.b_state = 0;
1065  dummy.b_blocknr = -1000;
1066  buffer_trace_init(&dummy.b_history);
1067  err = ext3_get_blocks_handle(handle, inode, block, 1,
1068  &dummy, create);
1069  /*
1070  * ext3_get_blocks_handle() returns number of blocks
1071  * mapped. 0 in case of a HOLE.
1072  */
1073  if (err > 0) {
1074  if (err > 1)
1075  WARN_ON(1);
1076  err = 0;
1077  }
1078  *errp = err;
1079  if (!err && buffer_mapped(&dummy)) {
1080  struct buffer_head *bh;
1081  bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1082  if (!bh) {
1083  *errp = -EIO;
1084  goto err;
1085  }
1086  if (buffer_new(&dummy)) {
1087  J_ASSERT(create != 0);
1088  J_ASSERT(handle != NULL);
1089 
1090  /*
1091  * Now that we do not always journal data, we should
1092  * keep in mind whether this should always journal the
1093  * new buffer as metadata. For now, regular file
1094  * writes use ext3_get_block instead, so it's not a
1095  * problem.
1096  */
1097  lock_buffer(bh);
1098  BUFFER_TRACE(bh, "call get_create_access");
1099  fatal = ext3_journal_get_create_access(handle, bh);
1100  if (!fatal && !buffer_uptodate(bh)) {
1101  memset(bh->b_data,0,inode->i_sb->s_blocksize);
1102  set_buffer_uptodate(bh);
1103  }
1104  unlock_buffer(bh);
1105  BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1106  err = ext3_journal_dirty_metadata(handle, bh);
1107  if (!fatal)
1108  fatal = err;
1109  } else {
1110  BUFFER_TRACE(bh, "not a new buffer");
1111  }
1112  if (fatal) {
1113  *errp = fatal;
1114  brelse(bh);
1115  bh = NULL;
1116  }
1117  return bh;
1118  }
1119 err:
1120  return NULL;
1121 }
1122 
1123 struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1124  int block, int create, int *err)
1125 {
1126  struct buffer_head * bh;
1127 
1128  bh = ext3_getblk(handle, inode, block, create, err);
1129  if (!bh)
1130  return bh;
1131  if (bh_uptodate_or_lock(bh))
1132  return bh;
1133  get_bh(bh);
1134  bh->b_end_io = end_buffer_read_sync;
1135  submit_bh(READ | REQ_META | REQ_PRIO, bh);
1136  wait_on_buffer(bh);
1137  if (buffer_uptodate(bh))
1138  return bh;
1139  put_bh(bh);
1140  *err = -EIO;
1141  return NULL;
1142 }
1143 
1144 static int walk_page_buffers( handle_t *handle,
1145  struct buffer_head *head,
1146  unsigned from,
1147  unsigned to,
1148  int *partial,
1149  int (*fn)( handle_t *handle,
1150  struct buffer_head *bh))
1151 {
1152  struct buffer_head *bh;
1153  unsigned block_start, block_end;
1154  unsigned blocksize = head->b_size;
1155  int err, ret = 0;
1156  struct buffer_head *next;
1157 
1158  for ( bh = head, block_start = 0;
1159  ret == 0 && (bh != head || !block_start);
1160  block_start = block_end, bh = next)
1161  {
1162  next = bh->b_this_page;
1163  block_end = block_start + blocksize;
1164  if (block_end <= from || block_start >= to) {
1165  if (partial && !buffer_uptodate(bh))
1166  *partial = 1;
1167  continue;
1168  }
1169  err = (*fn)(handle, bh);
1170  if (!ret)
1171  ret = err;
1172  }
1173  return ret;
1174 }
1175 
1176 /*
1177  * To preserve ordering, it is essential that the hole instantiation and
1178  * the data write be encapsulated in a single transaction. We cannot
1179  * close off a transaction and start a new one between the ext3_get_block()
1180  * and the commit_write(). So doing the journal_start at the start of
1181  * prepare_write() is the right place.
1182  *
1183  * Also, this function can nest inside ext3_writepage() ->
1184  * block_write_full_page(). In that case, we *know* that ext3_writepage()
1185  * has generated enough buffer credits to do the whole page. So we won't
1186  * block on the journal in that case, which is good, because the caller may
1187  * be PF_MEMALLOC.
1188  *
1189  * By accident, ext3 can be reentered when a transaction is open via
1190  * quota file writes. If we were to commit the transaction while thus
1191  * reentered, there can be a deadlock - we would be holding a quota
1192  * lock, and the commit would never complete if another thread had a
1193  * transaction open and was blocking on the quota lock - a ranking
1194  * violation.
1195  *
1196  * So what we do is to rely on the fact that journal_stop/journal_start
1197  * will _not_ run commit under these circumstances because handle->h_ref
1198  * is elevated. We'll still have enough credits for the tiny quotafile
1199  * write.
1200  */
1201 static int do_journal_get_write_access(handle_t *handle,
1202  struct buffer_head *bh)
1203 {
1204  int dirty = buffer_dirty(bh);
1205  int ret;
1206 
1207  if (!buffer_mapped(bh) || buffer_freed(bh))
1208  return 0;
1209  /*
1210  * __block_prepare_write() could have dirtied some buffers. Clean
1211  * the dirty bit as jbd2_journal_get_write_access() could complain
1212  * otherwise about fs integrity issues. Setting of the dirty bit
1213  * by __block_prepare_write() isn't a real problem here as we clear
1214  * the bit before releasing a page lock and thus writeback cannot
1215  * ever write the buffer.
1216  */
1217  if (dirty)
1218  clear_buffer_dirty(bh);
1219  ret = ext3_journal_get_write_access(handle, bh);
1220  if (!ret && dirty)
1221  ret = ext3_journal_dirty_metadata(handle, bh);
1222  return ret;
1223 }
1224 
1225 /*
1226  * Truncate blocks that were not used by write. We have to truncate the
1227  * pagecache as well so that corresponding buffers get properly unmapped.
1228  */
1229 static void ext3_truncate_failed_write(struct inode *inode)
1230 {
1231  truncate_inode_pages(inode->i_mapping, inode->i_size);
1232  ext3_truncate(inode);
1233 }
1234 
1235 /*
1236  * Truncate blocks that were not used by direct IO write. We have to zero out
1237  * the last file block as well because direct IO might have written to it.
1238  */
1239 static void ext3_truncate_failed_direct_write(struct inode *inode)
1240 {
1241  ext3_block_truncate_page(inode, inode->i_size);
1242  ext3_truncate(inode);
1243 }
1244 
1245 static int ext3_write_begin(struct file *file, struct address_space *mapping,
1246  loff_t pos, unsigned len, unsigned flags,
1247  struct page **pagep, void **fsdata)
1248 {
1249  struct inode *inode = mapping->host;
1250  int ret;
1251  handle_t *handle;
1252  int retries = 0;
1253  struct page *page;
1254  pgoff_t index;
1255  unsigned from, to;
1256  /* Reserve one block more for addition to orphan list in case
1257  * we allocate blocks but write fails for some reason */
1258  int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
1259 
1260  trace_ext3_write_begin(inode, pos, len, flags);
1261 
1262  index = pos >> PAGE_CACHE_SHIFT;
1263  from = pos & (PAGE_CACHE_SIZE - 1);
1264  to = from + len;
1265 
1266 retry:
1267  page = grab_cache_page_write_begin(mapping, index, flags);
1268  if (!page)
1269  return -ENOMEM;
1270  *pagep = page;
1271 
1272  handle = ext3_journal_start(inode, needed_blocks);
1273  if (IS_ERR(handle)) {
1274  unlock_page(page);
1275  page_cache_release(page);
1276  ret = PTR_ERR(handle);
1277  goto out;
1278  }
1279  ret = __block_write_begin(page, pos, len, ext3_get_block);
1280  if (ret)
1281  goto write_begin_failed;
1282 
1283  if (ext3_should_journal_data(inode)) {
1284  ret = walk_page_buffers(handle, page_buffers(page),
1285  from, to, NULL, do_journal_get_write_access);
1286  }
1287 write_begin_failed:
1288  if (ret) {
1289  /*
1290  * block_write_begin may have instantiated a few blocks
1291  * outside i_size. Trim these off again. Don't need
1292  * i_size_read because we hold i_mutex.
1293  *
1294  * Add inode to orphan list in case we crash before truncate
1295  * finishes. Do this only if ext3_can_truncate() agrees so
1296  * that orphan processing code is happy.
1297  */
1298  if (pos + len > inode->i_size && ext3_can_truncate(inode))
1299  ext3_orphan_add(handle, inode);
1300  ext3_journal_stop(handle);
1301  unlock_page(page);
1302  page_cache_release(page);
1303  if (pos + len > inode->i_size)
1304  ext3_truncate_failed_write(inode);
1305  }
1306  if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1307  goto retry;
1308 out:
1309  return ret;
1310 }
1311 
1312 
1313 int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1314 {
1315  int err = journal_dirty_data(handle, bh);
1316  if (err)
1317  ext3_journal_abort_handle(__func__, __func__,
1318  bh, handle, err);
1319  return err;
1320 }
1321 
1322 /* For ordered writepage and write_end functions */
1323 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1324 {
1325  /*
1326  * Write could have mapped the buffer but it didn't copy the data in
1327  * yet. So avoid filing such buffer into a transaction.
1328  */
1329  if (buffer_mapped(bh) && buffer_uptodate(bh))
1330  return ext3_journal_dirty_data(handle, bh);
1331  return 0;
1332 }
1333 
1334 /* For write_end() in data=journal mode */
1335 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1336 {
1337  if (!buffer_mapped(bh) || buffer_freed(bh))
1338  return 0;
1339  set_buffer_uptodate(bh);
1340  return ext3_journal_dirty_metadata(handle, bh);
1341 }
1342 
1343 /*
1344  * This is nasty and subtle: ext3_write_begin() could have allocated blocks
1345  * for the whole page but later we failed to copy the data in. Update inode
1346  * size according to what we managed to copy. The rest is going to be
1347  * truncated in write_end function.
1348  */
1349 static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
1350 {
1351  /* What matters to us is i_disksize. We don't write i_size anywhere */
1352  if (pos + copied > inode->i_size)
1353  i_size_write(inode, pos + copied);
1354  if (pos + copied > EXT3_I(inode)->i_disksize) {
1355  EXT3_I(inode)->i_disksize = pos + copied;
1356  mark_inode_dirty(inode);
1357  }
1358 }
1359 
1360 /*
1361  * We need to pick up the new inode size which generic_commit_write gave us
1362  * `file' can be NULL - eg, when called from page_symlink().
1363  *
1364  * ext3 never places buffers on inode->i_mapping->private_list. metadata
1365  * buffers are managed internally.
1366  */
1367 static int ext3_ordered_write_end(struct file *file,
1368  struct address_space *mapping,
1369  loff_t pos, unsigned len, unsigned copied,
1370  struct page *page, void *fsdata)
1371 {
1372  handle_t *handle = ext3_journal_current_handle();
1373  struct inode *inode = file->f_mapping->host;
1374  unsigned from, to;
1375  int ret = 0, ret2;
1376 
1377  trace_ext3_ordered_write_end(inode, pos, len, copied);
1378  copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1379 
1380  from = pos & (PAGE_CACHE_SIZE - 1);
1381  to = from + copied;
1382  ret = walk_page_buffers(handle, page_buffers(page),
1383  from, to, NULL, journal_dirty_data_fn);
1384 
1385  if (ret == 0)
1386  update_file_sizes(inode, pos, copied);
1387  /*
1388  * There may be allocated blocks outside of i_size because
1389  * we failed to copy some data. Prepare for truncate.
1390  */
1391  if (pos + len > inode->i_size && ext3_can_truncate(inode))
1392  ext3_orphan_add(handle, inode);
1393  ret2 = ext3_journal_stop(handle);
1394  if (!ret)
1395  ret = ret2;
1396  unlock_page(page);
1397  page_cache_release(page);
1398 
1399  if (pos + len > inode->i_size)
1400  ext3_truncate_failed_write(inode);
1401  return ret ? ret : copied;
1402 }
1403 
1404 static int ext3_writeback_write_end(struct file *file,
1405  struct address_space *mapping,
1406  loff_t pos, unsigned len, unsigned copied,
1407  struct page *page, void *fsdata)
1408 {
1409  handle_t *handle = ext3_journal_current_handle();
1410  struct inode *inode = file->f_mapping->host;
1411  int ret;
1412 
1413  trace_ext3_writeback_write_end(inode, pos, len, copied);
1414  copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1415  update_file_sizes(inode, pos, copied);
1416  /*
1417  * There may be allocated blocks outside of i_size because
1418  * we failed to copy some data. Prepare for truncate.
1419  */
1420  if (pos + len > inode->i_size && ext3_can_truncate(inode))
1421  ext3_orphan_add(handle, inode);
1422  ret = ext3_journal_stop(handle);
1423  unlock_page(page);
1424  page_cache_release(page);
1425 
1426  if (pos + len > inode->i_size)
1427  ext3_truncate_failed_write(inode);
1428  return ret ? ret : copied;
1429 }
1430 
1431 static int ext3_journalled_write_end(struct file *file,
1432  struct address_space *mapping,
1433  loff_t pos, unsigned len, unsigned copied,
1434  struct page *page, void *fsdata)
1435 {
1436  handle_t *handle = ext3_journal_current_handle();
1437  struct inode *inode = mapping->host;
1438  struct ext3_inode_info *ei = EXT3_I(inode);
1439  int ret = 0, ret2;
1440  int partial = 0;
1441  unsigned from, to;
1442 
1443  trace_ext3_journalled_write_end(inode, pos, len, copied);
1444  from = pos & (PAGE_CACHE_SIZE - 1);
1445  to = from + len;
1446 
1447  if (copied < len) {
1448  if (!PageUptodate(page))
1449  copied = 0;
1450  page_zero_new_buffers(page, from + copied, to);
1451  to = from + copied;
1452  }
1453 
1454  ret = walk_page_buffers(handle, page_buffers(page), from,
1455  to, &partial, write_end_fn);
1456  if (!partial)
1457  SetPageUptodate(page);
1458 
1459  if (pos + copied > inode->i_size)
1460  i_size_write(inode, pos + copied);
1461  /*
1462  * There may be allocated blocks outside of i_size because
1463  * we failed to copy some data. Prepare for truncate.
1464  */
1465  if (pos + len > inode->i_size && ext3_can_truncate(inode))
1466  ext3_orphan_add(handle, inode);
1467  ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1468  atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
1469  if (inode->i_size > ei->i_disksize) {
1470  ei->i_disksize = inode->i_size;
1471  ret2 = ext3_mark_inode_dirty(handle, inode);
1472  if (!ret)
1473  ret = ret2;
1474  }
1475 
1476  ret2 = ext3_journal_stop(handle);
1477  if (!ret)
1478  ret = ret2;
1479  unlock_page(page);
1480  page_cache_release(page);
1481 
1482  if (pos + len > inode->i_size)
1483  ext3_truncate_failed_write(inode);
1484  return ret ? ret : copied;
1485 }
1486 
1487 /*
1488  * bmap() is special. It gets used by applications such as lilo and by
1489  * the swapper to find the on-disk block of a specific piece of data.
1490  *
1491  * Naturally, this is dangerous if the block concerned is still in the
1492  * journal. If somebody makes a swapfile on an ext3 data-journaling
1493  * filesystem and enables swap, then they may get a nasty shock when the
1494  * data getting swapped to that swapfile suddenly gets overwritten by
1495  * the original zero's written out previously to the journal and
1496  * awaiting writeback in the kernel's buffer cache.
1497  *
1498  * So, if we see any bmap calls here on a modified, data-journaled file,
1499  * take extra steps to flush any blocks which might be in the cache.
1500  */
1501 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1502 {
1503  struct inode *inode = mapping->host;
1504  journal_t *journal;
1505  int err;
1506 
1507  if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
1508  /*
1509  * This is a REALLY heavyweight approach, but the use of
1510  * bmap on dirty files is expected to be extremely rare:
1511  * only if we run lilo or swapon on a freshly made file
1512  * do we expect this to happen.
1513  *
1514  * (bmap requires CAP_SYS_RAWIO so this does not
1515  * represent an unprivileged user DOS attack --- we'd be
1516  * in trouble if mortal users could trigger this path at
1517  * will.)
1518  *
1519  * NB. EXT3_STATE_JDATA is not set on files other than
1520  * regular files. If somebody wants to bmap a directory
1521  * or symlink and gets confused because the buffer
1522  * hasn't yet been flushed to disk, they deserve
1523  * everything they get.
1524  */
1525 
1526  ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
1527  journal = EXT3_JOURNAL(inode);
1528  journal_lock_updates(journal);
1529  err = journal_flush(journal);
1530  journal_unlock_updates(journal);
1531 
1532  if (err)
1533  return 0;
1534  }
1535 
1536  return generic_block_bmap(mapping,block,ext3_get_block);
1537 }
1538 
1539 static int bget_one(handle_t *handle, struct buffer_head *bh)
1540 {
1541  get_bh(bh);
1542  return 0;
1543 }
1544 
1545 static int bput_one(handle_t *handle, struct buffer_head *bh)
1546 {
1547  put_bh(bh);
1548  return 0;
1549 }
1550 
1551 static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
1552 {
1553  return !buffer_mapped(bh);
1554 }
1555 
1556 /*
1557  * Note that we always start a transaction even if we're not journalling
1558  * data. This is to preserve ordering: any hole instantiation within
1559  * __block_write_full_page -> ext3_get_block() should be journalled
1560  * along with the data so we don't crash and then get metadata which
1561  * refers to old data.
1562  *
1563  * In all journalling modes block_write_full_page() will start the I/O.
1564  *
1565  * Problem:
1566  *
1567  * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1568  * ext3_writepage()
1569  *
1570  * Similar for:
1571  *
1572  * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1573  *
1574  * Same applies to ext3_get_block(). We will deadlock on various things like
1575  * lock_journal and i_truncate_mutex.
1576  *
1577  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1578  * allocations fail.
1579  *
1580  * 16May01: If we're reentered then journal_current_handle() will be
1581  * non-zero. We simply *return*.
1582  *
1583  * 1 July 2001: @@@ FIXME:
1584  * In journalled data mode, a data buffer may be metadata against the
1585  * current transaction. But the same file is part of a shared mapping
1586  * and someone does a writepage() on it.
1587  *
1588  * We will move the buffer onto the async_data list, but *after* it has
1589  * been dirtied. So there's a small window where we have dirty data on
1590  * BJ_Metadata.
1591  *
1592  * Note that this only applies to the last partial page in the file. The
1593  * bit which block_write_full_page() uses prepare/commit for. (That's
1594  * broken code anyway: it's wrong for msync()).
1595  *
1596  * It's a rare case: affects the final partial page, for journalled data
1597  * where the file is subject to bith write() and writepage() in the same
1598  * transction. To fix it we'll need a custom block_write_full_page().
1599  * We'll probably need that anyway for journalling writepage() output.
1600  *
1601  * We don't honour synchronous mounts for writepage(). That would be
1602  * disastrous. Any write() or metadata operation will sync the fs for
1603  * us.
1604  *
1605  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1606  * we don't need to open a transaction here.
1607  */
1608 static int ext3_ordered_writepage(struct page *page,
1609  struct writeback_control *wbc)
1610 {
1611  struct inode *inode = page->mapping->host;
1612  struct buffer_head *page_bufs;
1613  handle_t *handle = NULL;
1614  int ret = 0;
1615  int err;
1616 
1617  J_ASSERT(PageLocked(page));
1618  /*
1619  * We don't want to warn for emergency remount. The condition is
1620  * ordered to avoid dereferencing inode->i_sb in non-error case to
1621  * avoid slow-downs.
1622  */
1623  WARN_ON_ONCE(IS_RDONLY(inode) &&
1624  !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1625 
1626  /*
1627  * We give up here if we're reentered, because it might be for a
1628  * different filesystem.
1629  */
1630  if (ext3_journal_current_handle())
1631  goto out_fail;
1632 
1633  trace_ext3_ordered_writepage(page);
1634  if (!page_has_buffers(page)) {
1635  create_empty_buffers(page, inode->i_sb->s_blocksize,
1636  (1 << BH_Dirty)|(1 << BH_Uptodate));
1637  page_bufs = page_buffers(page);
1638  } else {
1639  page_bufs = page_buffers(page);
1640  if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
1641  NULL, buffer_unmapped)) {
1642  /* Provide NULL get_block() to catch bugs if buffers
1643  * weren't really mapped */
1644  return block_write_full_page(page, NULL, wbc);
1645  }
1646  }
1647  handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1648 
1649  if (IS_ERR(handle)) {
1650  ret = PTR_ERR(handle);
1651  goto out_fail;
1652  }
1653 
1654  walk_page_buffers(handle, page_bufs, 0,
1655  PAGE_CACHE_SIZE, NULL, bget_one);
1656 
1657  ret = block_write_full_page(page, ext3_get_block, wbc);
1658 
1659  /*
1660  * The page can become unlocked at any point now, and
1661  * truncate can then come in and change things. So we
1662  * can't touch *page from now on. But *page_bufs is
1663  * safe due to elevated refcount.
1664  */
1665 
1666  /*
1667  * And attach them to the current transaction. But only if
1668  * block_write_full_page() succeeded. Otherwise they are unmapped,
1669  * and generally junk.
1670  */
1671  if (ret == 0) {
1672  err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1673  NULL, journal_dirty_data_fn);
1674  if (!ret)
1675  ret = err;
1676  }
1677  walk_page_buffers(handle, page_bufs, 0,
1678  PAGE_CACHE_SIZE, NULL, bput_one);
1679  err = ext3_journal_stop(handle);
1680  if (!ret)
1681  ret = err;
1682  return ret;
1683 
1684 out_fail:
1685  redirty_page_for_writepage(wbc, page);
1686  unlock_page(page);
1687  return ret;
1688 }
1689 
1690 static int ext3_writeback_writepage(struct page *page,
1691  struct writeback_control *wbc)
1692 {
1693  struct inode *inode = page->mapping->host;
1694  handle_t *handle = NULL;
1695  int ret = 0;
1696  int err;
1697 
1698  J_ASSERT(PageLocked(page));
1699  /*
1700  * We don't want to warn for emergency remount. The condition is
1701  * ordered to avoid dereferencing inode->i_sb in non-error case to
1702  * avoid slow-downs.
1703  */
1704  WARN_ON_ONCE(IS_RDONLY(inode) &&
1705  !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1706 
1707  if (ext3_journal_current_handle())
1708  goto out_fail;
1709 
1710  trace_ext3_writeback_writepage(page);
1711  if (page_has_buffers(page)) {
1712  if (!walk_page_buffers(NULL, page_buffers(page), 0,
1713  PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
1714  /* Provide NULL get_block() to catch bugs if buffers
1715  * weren't really mapped */
1716  return block_write_full_page(page, NULL, wbc);
1717  }
1718  }
1719 
1720  handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1721  if (IS_ERR(handle)) {
1722  ret = PTR_ERR(handle);
1723  goto out_fail;
1724  }
1725 
1726  ret = block_write_full_page(page, ext3_get_block, wbc);
1727 
1728  err = ext3_journal_stop(handle);
1729  if (!ret)
1730  ret = err;
1731  return ret;
1732 
1733 out_fail:
1734  redirty_page_for_writepage(wbc, page);
1735  unlock_page(page);
1736  return ret;
1737 }
1738 
1739 static int ext3_journalled_writepage(struct page *page,
1740  struct writeback_control *wbc)
1741 {
1742  struct inode *inode = page->mapping->host;
1743  handle_t *handle = NULL;
1744  int ret = 0;
1745  int err;
1746 
1747  J_ASSERT(PageLocked(page));
1748  /*
1749  * We don't want to warn for emergency remount. The condition is
1750  * ordered to avoid dereferencing inode->i_sb in non-error case to
1751  * avoid slow-downs.
1752  */
1753  WARN_ON_ONCE(IS_RDONLY(inode) &&
1754  !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
1755 
1756  if (ext3_journal_current_handle())
1757  goto no_write;
1758 
1759  trace_ext3_journalled_writepage(page);
1760  handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1761  if (IS_ERR(handle)) {
1762  ret = PTR_ERR(handle);
1763  goto no_write;
1764  }
1765 
1766  if (!page_has_buffers(page) || PageChecked(page)) {
1767  /*
1768  * It's mmapped pagecache. Add buffers and journal it. There
1769  * doesn't seem much point in redirtying the page here.
1770  */
1771  ClearPageChecked(page);
1772  ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
1773  ext3_get_block);
1774  if (ret != 0) {
1775  ext3_journal_stop(handle);
1776  goto out_unlock;
1777  }
1778  ret = walk_page_buffers(handle, page_buffers(page), 0,
1779  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1780 
1781  err = walk_page_buffers(handle, page_buffers(page), 0,
1782  PAGE_CACHE_SIZE, NULL, write_end_fn);
1783  if (ret == 0)
1784  ret = err;
1785  ext3_set_inode_state(inode, EXT3_STATE_JDATA);
1786  atomic_set(&EXT3_I(inode)->i_datasync_tid,
1787  handle->h_transaction->t_tid);
1788  unlock_page(page);
1789  } else {
1790  /*
1791  * It may be a page full of checkpoint-mode buffers. We don't
1792  * really know unless we go poke around in the buffer_heads.
1793  * But block_write_full_page will do the right thing.
1794  */
1795  ret = block_write_full_page(page, ext3_get_block, wbc);
1796  }
1797  err = ext3_journal_stop(handle);
1798  if (!ret)
1799  ret = err;
1800 out:
1801  return ret;
1802 
1803 no_write:
1804  redirty_page_for_writepage(wbc, page);
1805 out_unlock:
1806  unlock_page(page);
1807  goto out;
1808 }
1809 
1810 static int ext3_readpage(struct file *file, struct page *page)
1811 {
1812  trace_ext3_readpage(page);
1813  return mpage_readpage(page, ext3_get_block);
1814 }
1815 
1816 static int
1817 ext3_readpages(struct file *file, struct address_space *mapping,
1818  struct list_head *pages, unsigned nr_pages)
1819 {
1820  return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1821 }
1822 
1823 static void ext3_invalidatepage(struct page *page, unsigned long offset)
1824 {
1825  journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1826 
1827  trace_ext3_invalidatepage(page, offset);
1828 
1829  /*
1830  * If it's a full truncate we just forget about the pending dirtying
1831  */
1832  if (offset == 0)
1833  ClearPageChecked(page);
1834 
1835  journal_invalidatepage(journal, page, offset);
1836 }
1837 
1838 static int ext3_releasepage(struct page *page, gfp_t wait)
1839 {
1840  journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1841 
1842  trace_ext3_releasepage(page);
1843  WARN_ON(PageChecked(page));
1844  if (!page_has_buffers(page))
1845  return 0;
1846  return journal_try_to_free_buffers(journal, page, wait);
1847 }
1848 
1849 /*
1850  * If the O_DIRECT write will extend the file then add this inode to the
1851  * orphan list. So recovery will truncate it back to the original size
1852  * if the machine crashes during the write.
1853  *
1854  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1855  * crashes then stale disk data _may_ be exposed inside the file. But current
1856  * VFS code falls back into buffered path in that case so we are safe.
1857  */
1858 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1859  const struct iovec *iov, loff_t offset,
1860  unsigned long nr_segs)
1861 {
1862  struct file *file = iocb->ki_filp;
1863  struct inode *inode = file->f_mapping->host;
1864  struct ext3_inode_info *ei = EXT3_I(inode);
1865  handle_t *handle;
1866  ssize_t ret;
1867  int orphan = 0;
1868  size_t count = iov_length(iov, nr_segs);
1869  int retries = 0;
1870 
1871  trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
1872 
1873  if (rw == WRITE) {
1874  loff_t final_size = offset + count;
1875 
1876  if (final_size > inode->i_size) {
1877  /* Credits for sb + inode write */
1878  handle = ext3_journal_start(inode, 2);
1879  if (IS_ERR(handle)) {
1880  ret = PTR_ERR(handle);
1881  goto out;
1882  }
1883  ret = ext3_orphan_add(handle, inode);
1884  if (ret) {
1885  ext3_journal_stop(handle);
1886  goto out;
1887  }
1888  orphan = 1;
1889  ei->i_disksize = inode->i_size;
1890  ext3_journal_stop(handle);
1891  }
1892  }
1893 
1894 retry:
1895  ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
1896  ext3_get_block);
1897  /*
1898  * In case of error extending write may have instantiated a few
1899  * blocks outside i_size. Trim these off again.
1900  */
1901  if (unlikely((rw & WRITE) && ret < 0)) {
1902  loff_t isize = i_size_read(inode);
1903  loff_t end = offset + iov_length(iov, nr_segs);
1904 
1905  if (end > isize)
1906  ext3_truncate_failed_direct_write(inode);
1907  }
1908  if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1909  goto retry;
1910 
1911  if (orphan) {
1912  int err;
1913 
1914  /* Credits for sb + inode write */
1915  handle = ext3_journal_start(inode, 2);
1916  if (IS_ERR(handle)) {
1917  /* This is really bad luck. We've written the data
1918  * but cannot extend i_size. Truncate allocated blocks
1919  * and pretend the write failed... */
1920  ext3_truncate_failed_direct_write(inode);
1921  ret = PTR_ERR(handle);
1922  goto out;
1923  }
1924  if (inode->i_nlink)
1925  ext3_orphan_del(handle, inode);
1926  if (ret > 0) {
1927  loff_t end = offset + ret;
1928  if (end > inode->i_size) {
1929  ei->i_disksize = end;
1930  i_size_write(inode, end);
1931  /*
1932  * We're going to return a positive `ret'
1933  * here due to non-zero-length I/O, so there's
1934  * no way of reporting error returns from
1935  * ext3_mark_inode_dirty() to userspace. So
1936  * ignore it.
1937  */
1938  ext3_mark_inode_dirty(handle, inode);
1939  }
1940  }
1941  err = ext3_journal_stop(handle);
1942  if (ret == 0)
1943  ret = err;
1944  }
1945 out:
1946  trace_ext3_direct_IO_exit(inode, offset,
1947  iov_length(iov, nr_segs), rw, ret);
1948  return ret;
1949 }
1950 
1951 /*
1952  * Pages can be marked dirty completely asynchronously from ext3's journalling
1953  * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1954  * much here because ->set_page_dirty is called under VFS locks. The page is
1955  * not necessarily locked.
1956  *
1957  * We cannot just dirty the page and leave attached buffers clean, because the
1958  * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1959  * or jbddirty because all the journalling code will explode.
1960  *
1961  * So what we do is to mark the page "pending dirty" and next time writepage
1962  * is called, propagate that into the buffers appropriately.
1963  */
1964 static int ext3_journalled_set_page_dirty(struct page *page)
1965 {
1966  SetPageChecked(page);
1967  return __set_page_dirty_nobuffers(page);
1968 }
1969 
1970 static const struct address_space_operations ext3_ordered_aops = {
1971  .readpage = ext3_readpage,
1972  .readpages = ext3_readpages,
1973  .writepage = ext3_ordered_writepage,
1974  .write_begin = ext3_write_begin,
1975  .write_end = ext3_ordered_write_end,
1976  .bmap = ext3_bmap,
1977  .invalidatepage = ext3_invalidatepage,
1978  .releasepage = ext3_releasepage,
1979  .direct_IO = ext3_direct_IO,
1980  .migratepage = buffer_migrate_page,
1981  .is_partially_uptodate = block_is_partially_uptodate,
1982  .error_remove_page = generic_error_remove_page,
1983 };
1984 
1985 static const struct address_space_operations ext3_writeback_aops = {
1986  .readpage = ext3_readpage,
1987  .readpages = ext3_readpages,
1988  .writepage = ext3_writeback_writepage,
1989  .write_begin = ext3_write_begin,
1990  .write_end = ext3_writeback_write_end,
1991  .bmap = ext3_bmap,
1992  .invalidatepage = ext3_invalidatepage,
1993  .releasepage = ext3_releasepage,
1994  .direct_IO = ext3_direct_IO,
1995  .migratepage = buffer_migrate_page,
1996  .is_partially_uptodate = block_is_partially_uptodate,
1997  .error_remove_page = generic_error_remove_page,
1998 };
1999 
2000 static const struct address_space_operations ext3_journalled_aops = {
2001  .readpage = ext3_readpage,
2002  .readpages = ext3_readpages,
2003  .writepage = ext3_journalled_writepage,
2004  .write_begin = ext3_write_begin,
2005  .write_end = ext3_journalled_write_end,
2006  .set_page_dirty = ext3_journalled_set_page_dirty,
2007  .bmap = ext3_bmap,
2008  .invalidatepage = ext3_invalidatepage,
2009  .releasepage = ext3_releasepage,
2010  .is_partially_uptodate = block_is_partially_uptodate,
2011  .error_remove_page = generic_error_remove_page,
2012 };
2013 
2014 void ext3_set_aops(struct inode *inode)
2015 {
2016  if (ext3_should_order_data(inode))
2017  inode->i_mapping->a_ops = &ext3_ordered_aops;
2018  else if (ext3_should_writeback_data(inode))
2019  inode->i_mapping->a_ops = &ext3_writeback_aops;
2020  else
2021  inode->i_mapping->a_ops = &ext3_journalled_aops;
2022 }
2023 
2024 /*
2025  * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
2026  * up to the end of the block which corresponds to `from'.
2027  * This required during truncate. We need to physically zero the tail end
2028  * of that block so it doesn't yield old data if the file is later grown.
2029  */
2030 static int ext3_block_truncate_page(struct inode *inode, loff_t from)
2031 {
2032  ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
2033  unsigned offset = from & (PAGE_CACHE_SIZE - 1);
2034  unsigned blocksize, iblock, length, pos;
2035  struct page *page;
2036  handle_t *handle = NULL;
2037  struct buffer_head *bh;
2038  int err = 0;
2039 
2040  /* Truncated on block boundary - nothing to do */
2041  blocksize = inode->i_sb->s_blocksize;
2042  if ((from & (blocksize - 1)) == 0)
2043  return 0;
2044 
2045  page = grab_cache_page(inode->i_mapping, index);
2046  if (!page)
2047  return -ENOMEM;
2048  length = blocksize - (offset & (blocksize - 1));
2049  iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
2050 
2051  if (!page_has_buffers(page))
2052  create_empty_buffers(page, blocksize, 0);
2053 
2054  /* Find the buffer that contains "offset" */
2055  bh = page_buffers(page);
2056  pos = blocksize;
2057  while (offset >= pos) {
2058  bh = bh->b_this_page;
2059  iblock++;
2060  pos += blocksize;
2061  }
2062 
2063  err = 0;
2064  if (buffer_freed(bh)) {
2065  BUFFER_TRACE(bh, "freed: skip");
2066  goto unlock;
2067  }
2068 
2069  if (!buffer_mapped(bh)) {
2070  BUFFER_TRACE(bh, "unmapped");
2071  ext3_get_block(inode, iblock, bh, 0);
2072  /* unmapped? It's a hole - nothing to do */
2073  if (!buffer_mapped(bh)) {
2074  BUFFER_TRACE(bh, "still unmapped");
2075  goto unlock;
2076  }
2077  }
2078 
2079  /* Ok, it's mapped. Make sure it's up-to-date */
2080  if (PageUptodate(page))
2081  set_buffer_uptodate(bh);
2082 
2083  if (!bh_uptodate_or_lock(bh)) {
2084  err = bh_submit_read(bh);
2085  /* Uhhuh. Read error. Complain and punt. */
2086  if (err)
2087  goto unlock;
2088  }
2089 
2090  /* data=writeback mode doesn't need transaction to zero-out data */
2091  if (!ext3_should_writeback_data(inode)) {
2092  /* We journal at most one block */
2093  handle = ext3_journal_start(inode, 1);
2094  if (IS_ERR(handle)) {
2095  clear_highpage(page);
2096  flush_dcache_page(page);
2097  err = PTR_ERR(handle);
2098  goto unlock;
2099  }
2100  }
2101 
2102  if (ext3_should_journal_data(inode)) {
2103  BUFFER_TRACE(bh, "get write access");
2104  err = ext3_journal_get_write_access(handle, bh);
2105  if (err)
2106  goto stop;
2107  }
2108 
2109  zero_user(page, offset, length);
2110  BUFFER_TRACE(bh, "zeroed end of block");
2111 
2112  err = 0;
2113  if (ext3_should_journal_data(inode)) {
2114  err = ext3_journal_dirty_metadata(handle, bh);
2115  } else {
2116  if (ext3_should_order_data(inode))
2117  err = ext3_journal_dirty_data(handle, bh);
2118  mark_buffer_dirty(bh);
2119  }
2120 stop:
2121  if (handle)
2122  ext3_journal_stop(handle);
2123 
2124 unlock:
2125  unlock_page(page);
2126  page_cache_release(page);
2127  return err;
2128 }
2129 
2130 /*
2131  * Probably it should be a library function... search for first non-zero word
2132  * or memcmp with zero_page, whatever is better for particular architecture.
2133  * Linus?
2134  */
2135 static inline int all_zeroes(__le32 *p, __le32 *q)
2136 {
2137  while (p < q)
2138  if (*p++)
2139  return 0;
2140  return 1;
2141 }
2142 
2178 static Indirect *ext3_find_shared(struct inode *inode, int depth,
2179  int offsets[4], Indirect chain[4], __le32 *top)
2180 {
2181  Indirect *partial, *p;
2182  int k, err;
2183 
2184  *top = 0;
2185  /* Make k index the deepest non-null offset + 1 */
2186  for (k = depth; k > 1 && !offsets[k-1]; k--)
2187  ;
2188  partial = ext3_get_branch(inode, k, offsets, chain, &err);
2189  /* Writer: pointers */
2190  if (!partial)
2191  partial = chain + k-1;
2192  /*
2193  * If the branch acquired continuation since we've looked at it -
2194  * fine, it should all survive and (new) top doesn't belong to us.
2195  */
2196  if (!partial->key && *partial->p)
2197  /* Writer: end */
2198  goto no_top;
2199  for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2200  ;
2201  /*
2202  * OK, we've found the last block that must survive. The rest of our
2203  * branch should be detached before unlocking. However, if that rest
2204  * of branch is all ours and does not grow immediately from the inode
2205  * it's easier to cheat and just decrement partial->p.
2206  */
2207  if (p == chain + k - 1 && p > chain) {
2208  p->p--;
2209  } else {
2210  *top = *p->p;
2211  /* Nope, don't do this in ext3. Must leave the tree intact */
2212 #if 0
2213  *p->p = 0;
2214 #endif
2215  }
2216  /* Writer: end */
2217 
2218  while(partial > p) {
2219  brelse(partial->bh);
2220  partial--;
2221  }
2222 no_top:
2223  return partial;
2224 }
2225 
2226 /*
2227  * Zero a number of block pointers in either an inode or an indirect block.
2228  * If we restart the transaction we must again get write access to the
2229  * indirect block for further modification.
2230  *
2231  * We release `count' blocks on disk, but (last - first) may be greater
2232  * than `count' because there can be holes in there.
2233  */
2234 static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2235  struct buffer_head *bh, ext3_fsblk_t block_to_free,
2236  unsigned long count, __le32 *first, __le32 *last)
2237 {
2238  __le32 *p;
2239  if (try_to_extend_transaction(handle, inode)) {
2240  if (bh) {
2241  BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2242  if (ext3_journal_dirty_metadata(handle, bh))
2243  return;
2244  }
2245  ext3_mark_inode_dirty(handle, inode);
2246  truncate_restart_transaction(handle, inode);
2247  if (bh) {
2248  BUFFER_TRACE(bh, "retaking write access");
2249  if (ext3_journal_get_write_access(handle, bh))
2250  return;
2251  }
2252  }
2253 
2254  /*
2255  * Any buffers which are on the journal will be in memory. We find
2256  * them on the hash table so journal_revoke() will run journal_forget()
2257  * on them. We've already detached each block from the file, so
2258  * bforget() in journal_forget() should be safe.
2259  *
2260  * AKPM: turn on bforget in journal_forget()!!!
2261  */
2262  for (p = first; p < last; p++) {
2263  u32 nr = le32_to_cpu(*p);
2264  if (nr) {
2265  struct buffer_head *bh;
2266 
2267  *p = 0;
2268  bh = sb_find_get_block(inode->i_sb, nr);
2269  ext3_forget(handle, 0, inode, bh, nr);
2270  }
2271  }
2272 
2273  ext3_free_blocks(handle, inode, block_to_free, count);
2274 }
2275 
2295 static void ext3_free_data(handle_t *handle, struct inode *inode,
2296  struct buffer_head *this_bh,
2297  __le32 *first, __le32 *last)
2298 {
2299  ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2300  unsigned long count = 0; /* Number of blocks in the run */
2301  __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2302  corresponding to
2303  block_to_free */
2304  ext3_fsblk_t nr; /* Current block # */
2305  __le32 *p; /* Pointer into inode/ind
2306  for current block */
2307  int err;
2308 
2309  if (this_bh) { /* For indirect block */
2310  BUFFER_TRACE(this_bh, "get_write_access");
2311  err = ext3_journal_get_write_access(handle, this_bh);
2312  /* Important: if we can't update the indirect pointers
2313  * to the blocks, we can't free them. */
2314  if (err)
2315  return;
2316  }
2317 
2318  for (p = first; p < last; p++) {
2319  nr = le32_to_cpu(*p);
2320  if (nr) {
2321  /* accumulate blocks to free if they're contiguous */
2322  if (count == 0) {
2323  block_to_free = nr;
2324  block_to_free_p = p;
2325  count = 1;
2326  } else if (nr == block_to_free + count) {
2327  count++;
2328  } else {
2329  ext3_clear_blocks(handle, inode, this_bh,
2330  block_to_free,
2331  count, block_to_free_p, p);
2332  block_to_free = nr;
2333  block_to_free_p = p;
2334  count = 1;
2335  }
2336  }
2337  }
2338 
2339  if (count > 0)
2340  ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2341  count, block_to_free_p, p);
2342 
2343  if (this_bh) {
2344  BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2345 
2346  /*
2347  * The buffer head should have an attached journal head at this
2348  * point. However, if the data is corrupted and an indirect
2349  * block pointed to itself, it would have been detached when
2350  * the block was cleared. Check for this instead of OOPSing.
2351  */
2352  if (bh2jh(this_bh))
2353  ext3_journal_dirty_metadata(handle, this_bh);
2354  else
2355  ext3_error(inode->i_sb, "ext3_free_data",
2356  "circular indirect block detected, "
2357  "inode=%lu, block=%llu",
2358  inode->i_ino,
2359  (unsigned long long)this_bh->b_blocknr);
2360  }
2361 }
2362 
2376 static void ext3_free_branches(handle_t *handle, struct inode *inode,
2377  struct buffer_head *parent_bh,
2378  __le32 *first, __le32 *last, int depth)
2379 {
2380  ext3_fsblk_t nr;
2381  __le32 *p;
2382 
2383  if (is_handle_aborted(handle))
2384  return;
2385 
2386  if (depth--) {
2387  struct buffer_head *bh;
2388  int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2389  p = last;
2390  while (--p >= first) {
2391  nr = le32_to_cpu(*p);
2392  if (!nr)
2393  continue; /* A hole */
2394 
2395  /* Go read the buffer for the next level down */
2396  bh = sb_bread(inode->i_sb, nr);
2397 
2398  /*
2399  * A read failure? Report error and clear slot
2400  * (should be rare).
2401  */
2402  if (!bh) {
2403  ext3_error(inode->i_sb, "ext3_free_branches",
2404  "Read failure, inode=%lu, block="E3FSBLK,
2405  inode->i_ino, nr);
2406  continue;
2407  }
2408 
2409  /* This zaps the entire block. Bottom up. */
2410  BUFFER_TRACE(bh, "free child branches");
2411  ext3_free_branches(handle, inode, bh,
2412  (__le32*)bh->b_data,
2413  (__le32*)bh->b_data + addr_per_block,
2414  depth);
2415 
2416  /*
2417  * Everything below this this pointer has been
2418  * released. Now let this top-of-subtree go.
2419  *
2420  * We want the freeing of this indirect block to be
2421  * atomic in the journal with the updating of the
2422  * bitmap block which owns it. So make some room in
2423  * the journal.
2424  *
2425  * We zero the parent pointer *after* freeing its
2426  * pointee in the bitmaps, so if extend_transaction()
2427  * for some reason fails to put the bitmap changes and
2428  * the release into the same transaction, recovery
2429  * will merely complain about releasing a free block,
2430  * rather than leaking blocks.
2431  */
2432  if (is_handle_aborted(handle))
2433  return;
2434  if (try_to_extend_transaction(handle, inode)) {
2435  ext3_mark_inode_dirty(handle, inode);
2436  truncate_restart_transaction(handle, inode);
2437  }
2438 
2439  /*
2440  * We've probably journalled the indirect block several
2441  * times during the truncate. But it's no longer
2442  * needed and we now drop it from the transaction via
2443  * journal_revoke().
2444  *
2445  * That's easy if it's exclusively part of this
2446  * transaction. But if it's part of the committing
2447  * transaction then journal_forget() will simply
2448  * brelse() it. That means that if the underlying
2449  * block is reallocated in ext3_get_block(),
2450  * unmap_underlying_metadata() will find this block
2451  * and will try to get rid of it. damn, damn. Thus
2452  * we don't allow a block to be reallocated until
2453  * a transaction freeing it has fully committed.
2454  *
2455  * We also have to make sure journal replay after a
2456  * crash does not overwrite non-journaled data blocks
2457  * with old metadata when the block got reallocated for
2458  * data. Thus we have to store a revoke record for a
2459  * block in the same transaction in which we free the
2460  * block.
2461  */
2462  ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2463 
2464  ext3_free_blocks(handle, inode, nr, 1);
2465 
2466  if (parent_bh) {
2467  /*
2468  * The block which we have just freed is
2469  * pointed to by an indirect block: journal it
2470  */
2471  BUFFER_TRACE(parent_bh, "get_write_access");
2472  if (!ext3_journal_get_write_access(handle,
2473  parent_bh)){
2474  *p = 0;
2475  BUFFER_TRACE(parent_bh,
2476  "call ext3_journal_dirty_metadata");
2478  parent_bh);
2479  }
2480  }
2481  }
2482  } else {
2483  /* We have reached the bottom of the tree. */
2484  BUFFER_TRACE(parent_bh, "free data blocks");
2485  ext3_free_data(handle, inode, parent_bh, first, last);
2486  }
2487 }
2488 
2489 int ext3_can_truncate(struct inode *inode)
2490 {
2491  if (S_ISREG(inode->i_mode))
2492  return 1;
2493  if (S_ISDIR(inode->i_mode))
2494  return 1;
2495  if (S_ISLNK(inode->i_mode))
2496  return !ext3_inode_is_fast_symlink(inode);
2497  return 0;
2498 }
2499 
2500 /*
2501  * ext3_truncate()
2502  *
2503  * We block out ext3_get_block() block instantiations across the entire
2504  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2505  * simultaneously on behalf of the same inode.
2506  *
2507  * As we work through the truncate and commit bits of it to the journal there
2508  * is one core, guiding principle: the file's tree must always be consistent on
2509  * disk. We must be able to restart the truncate after a crash.
2510  *
2511  * The file's tree may be transiently inconsistent in memory (although it
2512  * probably isn't), but whenever we close off and commit a journal transaction,
2513  * the contents of (the filesystem + the journal) must be consistent and
2514  * restartable. It's pretty simple, really: bottom up, right to left (although
2515  * left-to-right works OK too).
2516  *
2517  * Note that at recovery time, journal replay occurs *before* the restart of
2518  * truncate against the orphan inode list.
2519  *
2520  * The committed inode has the new, desired i_size (which is the same as
2521  * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
2522  * that this inode's truncate did not complete and it will again call
2523  * ext3_truncate() to have another go. So there will be instantiated blocks
2524  * to the right of the truncation point in a crashed ext3 filesystem. But
2525  * that's fine - as long as they are linked from the inode, the post-crash
2526  * ext3_truncate() run will find them and release them.
2527  */
2528 void ext3_truncate(struct inode *inode)
2529 {
2530  handle_t *handle;
2531  struct ext3_inode_info *ei = EXT3_I(inode);
2532  __le32 *i_data = ei->i_data;
2533  int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2534  int offsets[4];
2535  Indirect chain[4];
2536  Indirect *partial;
2537  __le32 nr = 0;
2538  int n;
2539  long last_block;
2540  unsigned blocksize = inode->i_sb->s_blocksize;
2541 
2542  trace_ext3_truncate_enter(inode);
2543 
2544  if (!ext3_can_truncate(inode))
2545  goto out_notrans;
2546 
2547  if (inode->i_size == 0 && ext3_should_writeback_data(inode))
2548  ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
2549 
2550  handle = start_transaction(inode);
2551  if (IS_ERR(handle))
2552  goto out_notrans;
2553 
2554  last_block = (inode->i_size + blocksize-1)
2555  >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2556  n = ext3_block_to_path(inode, last_block, offsets, NULL);
2557  if (n == 0)
2558  goto out_stop; /* error */
2559 
2560  /*
2561  * OK. This truncate is going to happen. We add the inode to the
2562  * orphan list, so that if this truncate spans multiple transactions,
2563  * and we crash, we will resume the truncate when the filesystem
2564  * recovers. It also marks the inode dirty, to catch the new size.
2565  *
2566  * Implication: the file must always be in a sane, consistent
2567  * truncatable state while each transaction commits.
2568  */
2569  if (ext3_orphan_add(handle, inode))
2570  goto out_stop;
2571 
2572  /*
2573  * The orphan list entry will now protect us from any crash which
2574  * occurs before the truncate completes, so it is now safe to propagate
2575  * the new, shorter inode size (held for now in i_size) into the
2576  * on-disk inode. We do this via i_disksize, which is the value which
2577  * ext3 *really* writes onto the disk inode.
2578  */
2579  ei->i_disksize = inode->i_size;
2580 
2581  /*
2582  * From here we block out all ext3_get_block() callers who want to
2583  * modify the block allocation tree.
2584  */
2585  mutex_lock(&ei->truncate_mutex);
2586 
2587  if (n == 1) { /* direct blocks */
2588  ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2589  i_data + EXT3_NDIR_BLOCKS);
2590  goto do_indirects;
2591  }
2592 
2593  partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2594  /* Kill the top of shared branch (not detached) */
2595  if (nr) {
2596  if (partial == chain) {
2597  /* Shared branch grows from the inode */
2598  ext3_free_branches(handle, inode, NULL,
2599  &nr, &nr+1, (chain+n-1) - partial);
2600  *partial->p = 0;
2601  /*
2602  * We mark the inode dirty prior to restart,
2603  * and prior to stop. No need for it here.
2604  */
2605  } else {
2606  /* Shared branch grows from an indirect block */
2607  ext3_free_branches(handle, inode, partial->bh,
2608  partial->p,
2609  partial->p+1, (chain+n-1) - partial);
2610  }
2611  }
2612  /* Clear the ends of indirect blocks on the shared branch */
2613  while (partial > chain) {
2614  ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2615  (__le32*)partial->bh->b_data+addr_per_block,
2616  (chain+n-1) - partial);
2617  BUFFER_TRACE(partial->bh, "call brelse");
2618  brelse (partial->bh);
2619  partial--;
2620  }
2621 do_indirects:
2622  /* Kill the remaining (whole) subtrees */
2623  switch (offsets[0]) {
2624  default:
2625  nr = i_data[EXT3_IND_BLOCK];
2626  if (nr) {
2627  ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2628  i_data[EXT3_IND_BLOCK] = 0;
2629  }
2630  case EXT3_IND_BLOCK:
2631  nr = i_data[EXT3_DIND_BLOCK];
2632  if (nr) {
2633  ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2634  i_data[EXT3_DIND_BLOCK] = 0;
2635  }
2636  case EXT3_DIND_BLOCK:
2637  nr = i_data[EXT3_TIND_BLOCK];
2638  if (nr) {
2639  ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2640  i_data[EXT3_TIND_BLOCK] = 0;
2641  }
2642  case EXT3_TIND_BLOCK:
2643  ;
2644  }
2645 
2646  ext3_discard_reservation(inode);
2647 
2649  inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2650  ext3_mark_inode_dirty(handle, inode);
2651 
2652  /*
2653  * In a multi-transaction truncate, we only make the final transaction
2654  * synchronous
2655  */
2656  if (IS_SYNC(inode))
2657  handle->h_sync = 1;
2658 out_stop:
2659  /*
2660  * If this was a simple ftruncate(), and the file will remain alive
2661  * then we need to clear up the orphan record which we created above.
2662  * However, if this was a real unlink then we were called by
2663  * ext3_evict_inode(), and we allow that function to clean up the
2664  * orphan info for us.
2665  */
2666  if (inode->i_nlink)
2667  ext3_orphan_del(handle, inode);
2668 
2669  ext3_journal_stop(handle);
2670  trace_ext3_truncate_exit(inode);
2671  return;
2672 out_notrans:
2673  /*
2674  * Delete the inode from orphan list so that it doesn't stay there
2675  * forever and trigger assertion on umount.
2676  */
2677  if (inode->i_nlink)
2678  ext3_orphan_del(NULL, inode);
2679  trace_ext3_truncate_exit(inode);
2680 }
2681 
2682 static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2683  unsigned long ino, struct ext3_iloc *iloc)
2684 {
2685  unsigned long block_group;
2686  unsigned long offset;
2688  struct ext3_group_desc *gdp;
2689 
2690  if (!ext3_valid_inum(sb, ino)) {
2691  /*
2692  * This error is already checked for in namei.c unless we are
2693  * looking at an NFS filehandle, in which case no error
2694  * report is needed
2695  */
2696  return 0;
2697  }
2698 
2699  block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2700  gdp = ext3_get_group_desc(sb, block_group, NULL);
2701  if (!gdp)
2702  return 0;
2703  /*
2704  * Figure out the offset within the block group inode table
2705  */
2706  offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2707  EXT3_INODE_SIZE(sb);
2708  block = le32_to_cpu(gdp->bg_inode_table) +
2709  (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2710 
2711  iloc->block_group = block_group;
2712  iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2713  return block;
2714 }
2715 
2716 /*
2717  * ext3_get_inode_loc returns with an extra refcount against the inode's
2718  * underlying buffer_head on success. If 'in_mem' is true, we have all
2719  * data in memory that is needed to recreate the on-disk version of this
2720  * inode.
2721  */
2722 static int __ext3_get_inode_loc(struct inode *inode,
2723  struct ext3_iloc *iloc, int in_mem)
2724 {
2726  struct buffer_head *bh;
2727 
2728  block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2729  if (!block)
2730  return -EIO;
2731 
2732  bh = sb_getblk(inode->i_sb, block);
2733  if (!bh) {
2734  ext3_error (inode->i_sb, "ext3_get_inode_loc",
2735  "unable to read inode block - "
2736  "inode=%lu, block="E3FSBLK,
2737  inode->i_ino, block);
2738  return -EIO;
2739  }
2740  if (!buffer_uptodate(bh)) {
2741  lock_buffer(bh);
2742 
2743  /*
2744  * If the buffer has the write error flag, we have failed
2745  * to write out another inode in the same block. In this
2746  * case, we don't have to read the block because we may
2747  * read the old inode data successfully.
2748  */
2749  if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
2750  set_buffer_uptodate(bh);
2751 
2752  if (buffer_uptodate(bh)) {
2753  /* someone brought it uptodate while we waited */
2754  unlock_buffer(bh);
2755  goto has_buffer;
2756  }
2757 
2758  /*
2759  * If we have all information of the inode in memory and this
2760  * is the only valid inode in the block, we need not read the
2761  * block.
2762  */
2763  if (in_mem) {
2764  struct buffer_head *bitmap_bh;
2765  struct ext3_group_desc *desc;
2766  int inodes_per_buffer;
2767  int inode_offset, i;
2768  int block_group;
2769  int start;
2770 
2771  block_group = (inode->i_ino - 1) /
2772  EXT3_INODES_PER_GROUP(inode->i_sb);
2773  inodes_per_buffer = bh->b_size /
2774  EXT3_INODE_SIZE(inode->i_sb);
2775  inode_offset = ((inode->i_ino - 1) %
2776  EXT3_INODES_PER_GROUP(inode->i_sb));
2777  start = inode_offset & ~(inodes_per_buffer - 1);
2778 
2779  /* Is the inode bitmap in cache? */
2780  desc = ext3_get_group_desc(inode->i_sb,
2781  block_group, NULL);
2782  if (!desc)
2783  goto make_io;
2784 
2785  bitmap_bh = sb_getblk(inode->i_sb,
2786  le32_to_cpu(desc->bg_inode_bitmap));
2787  if (!bitmap_bh)
2788  goto make_io;
2789 
2790  /*
2791  * If the inode bitmap isn't in cache then the
2792  * optimisation may end up performing two reads instead
2793  * of one, so skip it.
2794  */
2795  if (!buffer_uptodate(bitmap_bh)) {
2796  brelse(bitmap_bh);
2797  goto make_io;
2798  }
2799  for (i = start; i < start + inodes_per_buffer; i++) {
2800  if (i == inode_offset)
2801  continue;
2802  if (ext3_test_bit(i, bitmap_bh->b_data))
2803  break;
2804  }
2805  brelse(bitmap_bh);
2806  if (i == start + inodes_per_buffer) {
2807  /* all other inodes are free, so skip I/O */
2808  memset(bh->b_data, 0, bh->b_size);
2809  set_buffer_uptodate(bh);
2810  unlock_buffer(bh);
2811  goto has_buffer;
2812  }
2813  }
2814 
2815 make_io:
2816  /*
2817  * There are other valid inodes in the buffer, this inode
2818  * has in-inode xattrs, or we don't have this inode in memory.
2819  * Read the block from disk.
2820  */
2821  trace_ext3_load_inode(inode);
2822  get_bh(bh);
2823  bh->b_end_io = end_buffer_read_sync;
2824  submit_bh(READ | REQ_META | REQ_PRIO, bh);
2825  wait_on_buffer(bh);
2826  if (!buffer_uptodate(bh)) {
2827  ext3_error(inode->i_sb, "ext3_get_inode_loc",
2828  "unable to read inode block - "
2829  "inode=%lu, block="E3FSBLK,
2830  inode->i_ino, block);
2831  brelse(bh);
2832  return -EIO;
2833  }
2834  }
2835 has_buffer:
2836  iloc->bh = bh;
2837  return 0;
2838 }
2839 
2840 int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2841 {
2842  /* We have all inode data except xattrs in memory here. */
2843  return __ext3_get_inode_loc(inode, iloc,
2844  !ext3_test_inode_state(inode, EXT3_STATE_XATTR));
2845 }
2846 
2847 void ext3_set_inode_flags(struct inode *inode)
2848 {
2849  unsigned int flags = EXT3_I(inode)->i_flags;
2850 
2852  if (flags & EXT3_SYNC_FL)
2853  inode->i_flags |= S_SYNC;
2854  if (flags & EXT3_APPEND_FL)
2855  inode->i_flags |= S_APPEND;
2856  if (flags & EXT3_IMMUTABLE_FL)
2857  inode->i_flags |= S_IMMUTABLE;
2858  if (flags & EXT3_NOATIME_FL)
2859  inode->i_flags |= S_NOATIME;
2860  if (flags & EXT3_DIRSYNC_FL)
2861  inode->i_flags |= S_DIRSYNC;
2862 }
2863 
2864 /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2866 {
2867  unsigned int flags = ei->vfs_inode.i_flags;
2868 
2871  if (flags & S_SYNC)
2872  ei->i_flags |= EXT3_SYNC_FL;
2873  if (flags & S_APPEND)
2874  ei->i_flags |= EXT3_APPEND_FL;
2875  if (flags & S_IMMUTABLE)
2876  ei->i_flags |= EXT3_IMMUTABLE_FL;
2877  if (flags & S_NOATIME)
2878  ei->i_flags |= EXT3_NOATIME_FL;
2879  if (flags & S_DIRSYNC)
2880  ei->i_flags |= EXT3_DIRSYNC_FL;
2881 }
2882 
2883 struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
2884 {
2885  struct ext3_iloc iloc;
2886  struct ext3_inode *raw_inode;
2887  struct ext3_inode_info *ei;
2888  struct buffer_head *bh;
2889  struct inode *inode;
2890  journal_t *journal = EXT3_SB(sb)->s_journal;
2892  long ret;
2893  int block;
2894  uid_t i_uid;
2895  gid_t i_gid;
2896 
2897  inode = iget_locked(sb, ino);
2898  if (!inode)
2899  return ERR_PTR(-ENOMEM);
2900  if (!(inode->i_state & I_NEW))
2901  return inode;
2902 
2903  ei = EXT3_I(inode);
2904  ei->i_block_alloc_info = NULL;
2905 
2906  ret = __ext3_get_inode_loc(inode, &iloc, 0);
2907  if (ret < 0)
2908  goto bad_inode;
2909  bh = iloc.bh;
2910  raw_inode = ext3_raw_inode(&iloc);
2911  inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2912  i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2913  i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2914  if(!(test_opt (inode->i_sb, NO_UID32))) {
2915  i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2916  i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2917  }
2918  i_uid_write(inode, i_uid);
2919  i_gid_write(inode, i_gid);
2920  set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
2921  inode->i_size = le32_to_cpu(raw_inode->i_size);
2922  inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2923  inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2924  inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2925  inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2926 
2927  ei->i_state_flags = 0;
2928  ei->i_dir_start_lookup = 0;
2929  ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2930  /* We now have enough fields to check if the inode was active or not.
2931  * This is needed because nfsd might try to access dead inodes
2932  * the test is that same one that e2fsck uses
2933  * NeilBrown 1999oct15
2934  */
2935  if (inode->i_nlink == 0) {
2936  if (inode->i_mode == 0 ||
2937  !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2938  /* this inode is deleted */
2939  brelse (bh);
2940  ret = -ESTALE;
2941  goto bad_inode;
2942  }
2943  /* The only unlinked inodes we let through here have
2944  * valid i_mode and are being read by the orphan
2945  * recovery code: that's fine, we're about to complete
2946  * the process of deleting those. */
2947  }
2948  inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2949  ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2950 #ifdef EXT3_FRAGMENTS
2951  ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2952  ei->i_frag_no = raw_inode->i_frag;
2953  ei->i_frag_size = raw_inode->i_fsize;
2954 #endif
2955  ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2956  if (!S_ISREG(inode->i_mode)) {
2957  ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2958  } else {
2959  inode->i_size |=
2960  ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2961  }
2962  ei->i_disksize = inode->i_size;
2963  inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2964  ei->i_block_group = iloc.block_group;
2965  /*
2966  * NOTE! The in-memory inode i_data array is in little-endian order
2967  * even on big-endian machines: we do NOT byteswap the block numbers!
2968  */
2969  for (block = 0; block < EXT3_N_BLOCKS; block++)
2970  ei->i_data[block] = raw_inode->i_block[block];
2971  INIT_LIST_HEAD(&ei->i_orphan);
2972 
2973  /*
2974  * Set transaction id's of transactions that have to be committed
2975  * to finish f[data]sync. We set them to currently running transaction
2976  * as we cannot be sure that the inode or some of its metadata isn't
2977  * part of the transaction - the inode could have been reclaimed and
2978  * now it is reread from disk.
2979  */
2980  if (journal) {
2981  tid_t tid;
2982 
2983  spin_lock(&journal->j_state_lock);
2984  if (journal->j_running_transaction)
2985  transaction = journal->j_running_transaction;
2986  else
2987  transaction = journal->j_committing_transaction;
2988  if (transaction)
2989  tid = transaction->t_tid;
2990  else
2991  tid = journal->j_commit_sequence;
2992  spin_unlock(&journal->j_state_lock);
2993  atomic_set(&ei->i_sync_tid, tid);
2994  atomic_set(&ei->i_datasync_tid, tid);
2995  }
2996 
2997  if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2999  /*
3000  * When mke2fs creates big inodes it does not zero out
3001  * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
3002  * so ignore those first few inodes.
3003  */
3004  ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3006  EXT3_INODE_SIZE(inode->i_sb)) {
3007  brelse (bh);
3008  ret = -EIO;
3009  goto bad_inode;
3010  }
3011  if (ei->i_extra_isize == 0) {
3012  /* The extra space is currently unused. Use it. */
3013  ei->i_extra_isize = sizeof(struct ext3_inode) -
3014  EXT3_GOOD_OLD_INODE_SIZE;
3015  } else {
3016  __le32 *magic = (void *)raw_inode +
3018  ei->i_extra_isize;
3019  if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
3020  ext3_set_inode_state(inode, EXT3_STATE_XATTR);
3021  }
3022  } else
3023  ei->i_extra_isize = 0;
3024 
3025  if (S_ISREG(inode->i_mode)) {
3026  inode->i_op = &ext3_file_inode_operations;
3027  inode->i_fop = &ext3_file_operations;
3028  ext3_set_aops(inode);
3029  } else if (S_ISDIR(inode->i_mode)) {
3030  inode->i_op = &ext3_dir_inode_operations;
3031  inode->i_fop = &ext3_dir_operations;
3032  } else if (S_ISLNK(inode->i_mode)) {
3033  if (ext3_inode_is_fast_symlink(inode)) {
3035  nd_terminate_link(ei->i_data, inode->i_size,
3036  sizeof(ei->i_data) - 1);
3037  } else {
3039  ext3_set_aops(inode);
3040  }
3041  } else {
3043  if (raw_inode->i_block[0])
3044  init_special_inode(inode, inode->i_mode,
3045  old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3046  else
3047  init_special_inode(inode, inode->i_mode,
3048  new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3049  }
3050  brelse (iloc.bh);
3051  ext3_set_inode_flags(inode);
3052  unlock_new_inode(inode);
3053  return inode;
3054 
3055 bad_inode:
3056  iget_failed(inode);
3057  return ERR_PTR(ret);
3058 }
3059 
3060 /*
3061  * Post the struct inode info into an on-disk inode location in the
3062  * buffer-cache. This gobbles the caller's reference to the
3063  * buffer_head in the inode location struct.
3064  *
3065  * The caller must have write access to iloc->bh.
3066  */
3067 static int ext3_do_update_inode(handle_t *handle,
3068  struct inode *inode,
3069  struct ext3_iloc *iloc)
3070 {
3071  struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
3072  struct ext3_inode_info *ei = EXT3_I(inode);
3073  struct buffer_head *bh = iloc->bh;
3074  int err = 0, rc, block;
3075  int need_datasync = 0;
3076  __le32 disksize;
3077  uid_t i_uid;
3078  gid_t i_gid;
3079 
3080 again:
3081  /* we can't allow multiple procs in here at once, its a bit racey */
3082  lock_buffer(bh);
3083 
3084  /* For fields not not tracking in the in-memory inode,
3085  * initialise them to zero for new inodes. */
3086  if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
3087  memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
3088 
3090  raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3091  i_uid = i_uid_read(inode);
3092  i_gid = i_gid_read(inode);
3093  if(!(test_opt(inode->i_sb, NO_UID32))) {
3094  raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
3095  raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
3096 /*
3097  * Fix up interoperability with old kernels. Otherwise, old inodes get
3098  * re-used with the upper 16 bits of the uid/gid intact
3099  */
3100  if(!ei->i_dtime) {
3101  raw_inode->i_uid_high =
3102  cpu_to_le16(high_16_bits(i_uid));
3103  raw_inode->i_gid_high =
3104  cpu_to_le16(high_16_bits(i_gid));
3105  } else {
3106  raw_inode->i_uid_high = 0;
3107  raw_inode->i_gid_high = 0;
3108  }
3109  } else {
3110  raw_inode->i_uid_low =
3111  cpu_to_le16(fs_high2lowuid(i_uid));
3112  raw_inode->i_gid_low =
3113  cpu_to_le16(fs_high2lowgid(i_gid));
3114  raw_inode->i_uid_high = 0;
3115  raw_inode->i_gid_high = 0;
3116  }
3117  raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3118  disksize = cpu_to_le32(ei->i_disksize);
3119  if (disksize != raw_inode->i_size) {
3120  need_datasync = 1;
3121  raw_inode->i_size = disksize;
3122  }
3123  raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
3124  raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
3125  raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
3126  raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
3127  raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3128  raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3129 #ifdef EXT3_FRAGMENTS
3130  raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
3131  raw_inode->i_frag = ei->i_frag_no;
3132  raw_inode->i_fsize = ei->i_frag_size;
3133 #endif
3134  raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
3135  if (!S_ISREG(inode->i_mode)) {
3136  raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
3137  } else {
3138  disksize = cpu_to_le32(ei->i_disksize >> 32);
3139  if (disksize != raw_inode->i_size_high) {
3140  raw_inode->i_size_high = disksize;
3141  need_datasync = 1;
3142  }
3143  if (ei->i_disksize > 0x7fffffffULL) {
3144  struct super_block *sb = inode->i_sb;
3147  EXT3_SB(sb)->s_es->s_rev_level ==
3149  /* If this is the first large file
3150  * created, add a flag to the superblock.
3151  */
3152  unlock_buffer(bh);
3153  err = ext3_journal_get_write_access(handle,
3154  EXT3_SB(sb)->s_sbh);
3155  if (err)
3156  goto out_brelse;
3157 
3161  handle->h_sync = 1;
3162  err = ext3_journal_dirty_metadata(handle,
3163  EXT3_SB(sb)->s_sbh);
3164  /* get our lock and start over */
3165  goto again;
3166  }
3167  }
3168  }
3169  raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3170  if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3171  if (old_valid_dev(inode->i_rdev)) {
3172  raw_inode->i_block[0] =
3173  cpu_to_le32(old_encode_dev(inode->i_rdev));
3174  raw_inode->i_block[1] = 0;
3175  } else {
3176  raw_inode->i_block[0] = 0;
3177  raw_inode->i_block[1] =
3178  cpu_to_le32(new_encode_dev(inode->i_rdev));
3179  raw_inode->i_block[2] = 0;
3180  }
3181  } else for (block = 0; block < EXT3_N_BLOCKS; block++)
3182  raw_inode->i_block[block] = ei->i_data[block];
3183 
3184  if (ei->i_extra_isize)
3185  raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3186 
3187  BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
3188  unlock_buffer(bh);
3189  rc = ext3_journal_dirty_metadata(handle, bh);
3190  if (!err)
3191  err = rc;
3192  ext3_clear_inode_state(inode, EXT3_STATE_NEW);
3193 
3194  atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
3195  if (need_datasync)
3196  atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
3197 out_brelse:
3198  brelse (bh);
3199  ext3_std_error(inode->i_sb, err);
3200  return err;
3201 }
3202 
3203 /*
3204  * ext3_write_inode()
3205  *
3206  * We are called from a few places:
3207  *
3208  * - Within generic_file_write() for O_SYNC files.
3209  * Here, there will be no transaction running. We wait for any running
3210  * transaction to commit.
3211  *
3212  * - Within sys_sync(), kupdate and such.
3213  * We wait on commit, if tol to.
3214  *
3215  * - Within prune_icache() (PF_MEMALLOC == true)
3216  * Here we simply return. We can't afford to block kswapd on the
3217  * journal commit.
3218  *
3219  * In all cases it is actually safe for us to return without doing anything,
3220  * because the inode has been copied into a raw inode buffer in
3221  * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
3222  * knfsd.
3223  *
3224  * Note that we are absolutely dependent upon all inode dirtiers doing the
3225  * right thing: they *must* call mark_inode_dirty() after dirtying info in
3226  * which we are interested.
3227  *
3228  * It would be a bug for them to not do this. The code:
3229  *
3230  * mark_inode_dirty(inode)
3231  * stuff();
3232  * inode->i_size = expr;
3233  *
3234  * is in error because a kswapd-driven write_inode() could occur while
3235  * `stuff()' is running, and the new i_size will be lost. Plus the inode
3236  * will no longer be on the superblock's dirty inode list.
3237  */
3238 int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
3239 {
3240  if (current->flags & PF_MEMALLOC)
3241  return 0;
3242 
3243  if (ext3_journal_current_handle()) {
3244  jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3245  dump_stack();
3246  return -EIO;
3247  }
3248 
3249  if (wbc->sync_mode != WB_SYNC_ALL)
3250  return 0;
3251 
3252  return ext3_force_commit(inode->i_sb);
3253 }
3254 
3255 /*
3256  * ext3_setattr()
3257  *
3258  * Called from notify_change.
3259  *
3260  * We want to trap VFS attempts to truncate the file as soon as
3261  * possible. In particular, we want to make sure that when the VFS
3262  * shrinks i_size, we put the inode on the orphan list and modify
3263  * i_disksize immediately, so that during the subsequent flushing of
3264  * dirty pages and freeing of disk blocks, we can guarantee that any
3265  * commit will leave the blocks being flushed in an unused state on
3266  * disk. (On recovery, the inode will get truncated and the blocks will
3267  * be freed, so we have a strong guarantee that no future commit will
3268  * leave these blocks visible to the user.)
3269  *
3270  * Called with inode->sem down.
3271  */
3272 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
3273 {
3274  struct inode *inode = dentry->d_inode;
3275  int error, rc = 0;
3276  const unsigned int ia_valid = attr->ia_valid;
3277 
3278  error = inode_change_ok(inode, attr);
3279  if (error)
3280  return error;
3281 
3282  if (is_quota_modification(inode, attr))
3283  dquot_initialize(inode);
3284  if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
3285  (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
3286  handle_t *handle;
3287 
3288  /* (user+group)*(old+new) structure, inode write (sb,
3289  * inode block, ? - but truncate inode update has it) */
3290  handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
3291  EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
3292  if (IS_ERR(handle)) {
3293  error = PTR_ERR(handle);
3294  goto err_out;
3295  }
3296  error = dquot_transfer(inode, attr);
3297  if (error) {
3298  ext3_journal_stop(handle);
3299  return error;
3300  }
3301  /* Update corresponding info in inode so that everything is in
3302  * one transaction */
3303  if (attr->ia_valid & ATTR_UID)
3304  inode->i_uid = attr->ia_uid;
3305  if (attr->ia_valid & ATTR_GID)
3306  inode->i_gid = attr->ia_gid;
3307  error = ext3_mark_inode_dirty(handle, inode);
3308  ext3_journal_stop(handle);
3309  }
3310 
3311  if (attr->ia_valid & ATTR_SIZE)
3312  inode_dio_wait(inode);
3313 
3314  if (S_ISREG(inode->i_mode) &&
3315  attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3316  handle_t *handle;
3317 
3318  handle = ext3_journal_start(inode, 3);
3319  if (IS_ERR(handle)) {
3320  error = PTR_ERR(handle);
3321  goto err_out;
3322  }
3323 
3324  error = ext3_orphan_add(handle, inode);
3325  if (error) {
3326  ext3_journal_stop(handle);
3327  goto err_out;
3328  }
3329  EXT3_I(inode)->i_disksize = attr->ia_size;
3330  error = ext3_mark_inode_dirty(handle, inode);
3331  ext3_journal_stop(handle);
3332  if (error) {
3333  /* Some hard fs error must have happened. Bail out. */
3334  ext3_orphan_del(NULL, inode);
3335  goto err_out;
3336  }
3337  rc = ext3_block_truncate_page(inode, attr->ia_size);
3338  if (rc) {
3339  /* Cleanup orphan list and exit */
3340  handle = ext3_journal_start(inode, 3);
3341  if (IS_ERR(handle)) {
3342  ext3_orphan_del(NULL, inode);
3343  goto err_out;
3344  }
3345  ext3_orphan_del(handle, inode);
3346  ext3_journal_stop(handle);
3347  goto err_out;
3348  }
3349  }
3350 
3351  if ((attr->ia_valid & ATTR_SIZE) &&
3352  attr->ia_size != i_size_read(inode)) {
3353  truncate_setsize(inode, attr->ia_size);
3354  ext3_truncate(inode);
3355  }
3356 
3357  setattr_copy(inode, attr);
3358  mark_inode_dirty(inode);
3359 
3360  if (ia_valid & ATTR_MODE)
3361  rc = ext3_acl_chmod(inode);
3362 
3363 err_out:
3364  ext3_std_error(inode->i_sb, error);
3365  if (!error)
3366  error = rc;
3367  return error;
3368 }
3369 
3370 
3371 /*
3372  * How many blocks doth make a writepage()?
3373  *
3374  * With N blocks per page, it may be:
3375  * N data blocks
3376  * 2 indirect block
3377  * 2 dindirect
3378  * 1 tindirect
3379  * N+5 bitmap blocks (from the above)
3380  * N+5 group descriptor summary blocks
3381  * 1 inode block
3382  * 1 superblock.
3383  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3384  *
3385  * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3386  *
3387  * With ordered or writeback data it's the same, less the N data blocks.
3388  *
3389  * If the inode's direct blocks can hold an integral number of pages then a
3390  * page cannot straddle two indirect blocks, and we can only touch one indirect
3391  * and dindirect block, and the "5" above becomes "3".
3392  *
3393  * This still overestimates under most circumstances. If we were to pass the
3394  * start and end offsets in here as well we could do block_to_path() on each
3395  * block and work out the exact number of indirects which are touched. Pah.
3396  */
3397 
3398 static int ext3_writepage_trans_blocks(struct inode *inode)
3399 {
3400  int bpp = ext3_journal_blocks_per_page(inode);
3401  int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3402  int ret;
3403 
3404  if (ext3_should_journal_data(inode))
3405  ret = 3 * (bpp + indirects) + 2;
3406  else
3407  ret = 2 * (bpp + indirects) + indirects + 2;
3408 
3409 #ifdef CONFIG_QUOTA
3410  /* We know that structure was already allocated during dquot_initialize so
3411  * we will be updating only the data blocks + inodes */
3412  ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
3413 #endif
3414 
3415  return ret;
3416 }
3417 
3418 /*
3419  * The caller must have previously called ext3_reserve_inode_write().
3420  * Give this, we know that the caller already has write access to iloc->bh.
3421  */
3422 int ext3_mark_iloc_dirty(handle_t *handle,
3423  struct inode *inode, struct ext3_iloc *iloc)
3424 {
3425  int err = 0;
3426 
3427  /* the do_update_inode consumes one bh->b_count */
3428  get_bh(iloc->bh);
3429 
3430  /* ext3_do_update_inode() does journal_dirty_metadata */
3431  err = ext3_do_update_inode(handle, inode, iloc);
3432  put_bh(iloc->bh);
3433  return err;
3434 }
3435 
3436 /*
3437  * On success, We end up with an outstanding reference count against
3438  * iloc->bh. This _must_ be cleaned up later.
3439  */
3440 
3441 int
3442 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3443  struct ext3_iloc *iloc)
3444 {
3445  int err = 0;
3446  if (handle) {
3447  err = ext3_get_inode_loc(inode, iloc);
3448  if (!err) {
3449  BUFFER_TRACE(iloc->bh, "get_write_access");
3450  err = ext3_journal_get_write_access(handle, iloc->bh);
3451  if (err) {
3452  brelse(iloc->bh);
3453  iloc->bh = NULL;
3454  }
3455  }
3456  }
3457  ext3_std_error(inode->i_sb, err);
3458  return err;
3459 }
3460 
3461 /*
3462  * What we do here is to mark the in-core inode as clean with respect to inode
3463  * dirtiness (it may still be data-dirty).
3464  * This means that the in-core inode may be reaped by prune_icache
3465  * without having to perform any I/O. This is a very good thing,
3466  * because *any* task may call prune_icache - even ones which
3467  * have a transaction open against a different journal.
3468  *
3469  * Is this cheating? Not really. Sure, we haven't written the
3470  * inode out, but prune_icache isn't a user-visible syncing function.
3471  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3472  * we start and wait on commits.
3473  */
3474 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3475 {
3476  struct ext3_iloc iloc;
3477  int err;
3478 
3479  might_sleep();
3480  trace_ext3_mark_inode_dirty(inode, _RET_IP_);
3481  err = ext3_reserve_inode_write(handle, inode, &iloc);
3482  if (!err)
3483  err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3484  return err;
3485 }
3486 
3487 /*
3488  * ext3_dirty_inode() is called from __mark_inode_dirty()
3489  *
3490  * We're really interested in the case where a file is being extended.
3491  * i_size has been changed by generic_commit_write() and we thus need
3492  * to include the updated inode in the current transaction.
3493  *
3494  * Also, dquot_alloc_space() will always dirty the inode when blocks
3495  * are allocated to the file.
3496  *
3497  * If the inode is marked synchronous, we don't honour that here - doing
3498  * so would cause a commit on atime updates, which we don't bother doing.
3499  * We handle synchronous inodes at the highest possible level.
3500  */
3501 void ext3_dirty_inode(struct inode *inode, int flags)
3502 {
3503  handle_t *current_handle = ext3_journal_current_handle();
3504  handle_t *handle;
3505 
3506  handle = ext3_journal_start(inode, 2);
3507  if (IS_ERR(handle))
3508  goto out;
3509  if (current_handle &&
3510  current_handle->h_transaction != handle->h_transaction) {
3511  /* This task has a transaction open against a different fs */
3512  printk(KERN_EMERG "%s: transactions do not match!\n",
3513  __func__);
3514  } else {
3515  jbd_debug(5, "marking dirty. outer handle=%p\n",
3516  current_handle);
3517  ext3_mark_inode_dirty(handle, inode);
3518  }
3519  ext3_journal_stop(handle);
3520 out:
3521  return;
3522 }
3523 
3524 #if 0
3525 /*
3526  * Bind an inode's backing buffer_head into this transaction, to prevent
3527  * it from being flushed to disk early. Unlike
3528  * ext3_reserve_inode_write, this leaves behind no bh reference and
3529  * returns no iloc structure, so the caller needs to repeat the iloc
3530  * lookup to mark the inode dirty later.
3531  */
3532 static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3533 {
3534  struct ext3_iloc iloc;
3535 
3536  int err = 0;
3537  if (handle) {
3538  err = ext3_get_inode_loc(inode, &iloc);
3539  if (!err) {
3540  BUFFER_TRACE(iloc.bh, "get_write_access");
3541  err = journal_get_write_access(handle, iloc.bh);
3542  if (!err)
3543  err = ext3_journal_dirty_metadata(handle,
3544  iloc.bh);
3545  brelse(iloc.bh);
3546  }
3547  }
3548  ext3_std_error(inode->i_sb, err);
3549  return err;
3550 }
3551 #endif
3552 
3553 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3554 {
3555  journal_t *journal;
3556  handle_t *handle;
3557  int err;
3558 
3559  /*
3560  * We have to be very careful here: changing a data block's
3561  * journaling status dynamically is dangerous. If we write a
3562  * data block to the journal, change the status and then delete
3563  * that block, we risk forgetting to revoke the old log record
3564  * from the journal and so a subsequent replay can corrupt data.
3565  * So, first we make sure that the journal is empty and that
3566  * nobody is changing anything.
3567  */
3568 
3569  journal = EXT3_JOURNAL(inode);
3570  if (is_journal_aborted(journal))
3571  return -EROFS;
3572 
3573  journal_lock_updates(journal);
3574  journal_flush(journal);
3575 
3576  /*
3577  * OK, there are no updates running now, and all cached data is
3578  * synced to disk. We are now in a completely consistent state
3579  * which doesn't have anything in the journal, and we know that
3580  * no filesystem updates are running, so it is safe to modify
3581  * the inode's in-core data-journaling state flag now.
3582  */
3583 
3584  if (val)
3585  EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3586  else
3587  EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3588  ext3_set_aops(inode);
3589 
3590  journal_unlock_updates(journal);
3591 
3592  /* Finally we can mark the inode as dirty. */
3593 
3594  handle = ext3_journal_start(inode, 1);
3595  if (IS_ERR(handle))
3596  return PTR_ERR(handle);
3597 
3598  err = ext3_mark_inode_dirty(handle, inode);
3599  handle->h_sync = 1;
3600  ext3_journal_stop(handle);
3601  ext3_std_error(inode->i_sb, err);
3602 
3603  return err;
3604 }