Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inode.c
Go to the documentation of this file.
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Written by Ryusuke Konishi <[email protected]>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
47  struct nilfs_root *root;
48  int for_gc;
49 };
50 
51 void nilfs_inode_add_blocks(struct inode *inode, int n)
52 {
53  struct nilfs_root *root = NILFS_I(inode)->i_root;
54 
55  inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
56  if (root)
57  atomic_add(n, &root->blocks_count);
58 }
59 
60 void nilfs_inode_sub_blocks(struct inode *inode, int n)
61 {
62  struct nilfs_root *root = NILFS_I(inode)->i_root;
63 
64  inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
65  if (root)
66  atomic_sub(n, &root->blocks_count);
67 }
68 
80 int nilfs_get_block(struct inode *inode, sector_t blkoff,
81  struct buffer_head *bh_result, int create)
82 {
83  struct nilfs_inode_info *ii = NILFS_I(inode);
84  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
85  __u64 blknum = 0;
86  int err = 0, ret;
87  unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
88 
89  down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
90  ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
91  up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
92  if (ret >= 0) { /* found */
93  map_bh(bh_result, inode->i_sb, blknum);
94  if (ret > 0)
95  bh_result->b_size = (ret << inode->i_blkbits);
96  goto out;
97  }
98  /* data block was not found */
99  if (ret == -ENOENT && create) {
100  struct nilfs_transaction_info ti;
101 
102  bh_result->b_blocknr = 0;
103  err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
104  if (unlikely(err))
105  goto out;
106  err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
107  (unsigned long)bh_result);
108  if (unlikely(err != 0)) {
109  if (err == -EEXIST) {
110  /*
111  * The get_block() function could be called
112  * from multiple callers for an inode.
113  * However, the page having this block must
114  * be locked in this case.
115  */
117  "nilfs_get_block: a race condition "
118  "while inserting a data block. "
119  "(inode number=%lu, file block "
120  "offset=%llu)\n",
121  inode->i_ino,
122  (unsigned long long)blkoff);
123  err = 0;
124  }
126  goto out;
127  }
128  nilfs_mark_inode_dirty(inode);
129  nilfs_transaction_commit(inode->i_sb); /* never fails */
130  /* Error handling should be detailed */
131  set_buffer_new(bh_result);
132  set_buffer_delay(bh_result);
133  map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
134  to proper value */
135  } else if (ret == -ENOENT) {
136  /* not found is not error (e.g. hole); must return without
137  the mapped state flag. */
138  ;
139  } else {
140  err = ret;
141  }
142 
143  out:
144  return err;
145 }
146 
153 static int nilfs_readpage(struct file *file, struct page *page)
154 {
155  return mpage_readpage(page, nilfs_get_block);
156 }
157 
166 static int nilfs_readpages(struct file *file, struct address_space *mapping,
167  struct list_head *pages, unsigned nr_pages)
168 {
169  return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
170 }
171 
172 static int nilfs_writepages(struct address_space *mapping,
173  struct writeback_control *wbc)
174 {
175  struct inode *inode = mapping->host;
176  int err = 0;
177 
178  if (wbc->sync_mode == WB_SYNC_ALL)
179  err = nilfs_construct_dsync_segment(inode->i_sb, inode,
180  wbc->range_start,
181  wbc->range_end);
182  return err;
183 }
184 
185 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
186 {
187  struct inode *inode = page->mapping->host;
188  int err;
189 
190  redirty_page_for_writepage(wbc, page);
191  unlock_page(page);
192 
193  if (wbc->sync_mode == WB_SYNC_ALL) {
194  err = nilfs_construct_segment(inode->i_sb);
195  if (unlikely(err))
196  return err;
197  } else if (wbc->for_reclaim)
198  nilfs_flush_segment(inode->i_sb, inode->i_ino);
199 
200  return 0;
201 }
202 
203 static int nilfs_set_page_dirty(struct page *page)
204 {
205  int ret = __set_page_dirty_buffers(page);
206 
207  if (ret) {
208  struct inode *inode = page->mapping->host;
209  unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
210 
211  nilfs_set_file_dirty(inode, nr_dirty);
212  }
213  return ret;
214 }
215 
216 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
217  loff_t pos, unsigned len, unsigned flags,
218  struct page **pagep, void **fsdata)
219 
220 {
221  struct inode *inode = mapping->host;
222  int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
223 
224  if (unlikely(err))
225  return err;
226 
227  err = block_write_begin(mapping, pos, len, flags, pagep,
229  if (unlikely(err)) {
230  loff_t isize = mapping->host->i_size;
231  if (pos + len > isize)
232  vmtruncate(mapping->host, isize);
233 
235  }
236  return err;
237 }
238 
239 static int nilfs_write_end(struct file *file, struct address_space *mapping,
240  loff_t pos, unsigned len, unsigned copied,
241  struct page *page, void *fsdata)
242 {
243  struct inode *inode = mapping->host;
244  unsigned start = pos & (PAGE_CACHE_SIZE - 1);
245  unsigned nr_dirty;
246  int err;
247 
248  nr_dirty = nilfs_page_count_clean_buffers(page, start,
249  start + copied);
250  copied = generic_write_end(file, mapping, pos, len, copied, page,
251  fsdata);
252  nilfs_set_file_dirty(inode, nr_dirty);
253  err = nilfs_transaction_commit(inode->i_sb);
254  return err ? : copied;
255 }
256 
257 static ssize_t
258 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
259  loff_t offset, unsigned long nr_segs)
260 {
261  struct file *file = iocb->ki_filp;
262  struct inode *inode = file->f_mapping->host;
263  ssize_t size;
264 
265  if (rw == WRITE)
266  return 0;
267 
268  /* Needs synchronization with the cleaner */
269  size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
271 
272  /*
273  * In case of error extending write may have instantiated a few
274  * blocks outside i_size. Trim these off again.
275  */
276  if (unlikely((rw & WRITE) && size < 0)) {
277  loff_t isize = i_size_read(inode);
278  loff_t end = offset + iov_length(iov, nr_segs);
279 
280  if (end > isize)
281  vmtruncate(inode, isize);
282  }
283 
284  return size;
285 }
286 
288  .writepage = nilfs_writepage,
289  .readpage = nilfs_readpage,
290  .writepages = nilfs_writepages,
291  .set_page_dirty = nilfs_set_page_dirty,
292  .readpages = nilfs_readpages,
293  .write_begin = nilfs_write_begin,
294  .write_end = nilfs_write_end,
295  /* .releasepage = nilfs_releasepage, */
296  .invalidatepage = block_invalidatepage,
297  .direct_IO = nilfs_direct_IO,
298  .is_partially_uptodate = block_is_partially_uptodate,
299 };
300 
301 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
302 {
303  struct super_block *sb = dir->i_sb;
304  struct the_nilfs *nilfs = sb->s_fs_info;
305  struct inode *inode;
306  struct nilfs_inode_info *ii;
307  struct nilfs_root *root;
308  int err = -ENOMEM;
309  ino_t ino;
310 
311  inode = new_inode(sb);
312  if (unlikely(!inode))
313  goto failed;
314 
315  mapping_set_gfp_mask(inode->i_mapping,
316  mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
317 
318  root = NILFS_I(dir)->i_root;
319  ii = NILFS_I(inode);
320  ii->i_state = 1 << NILFS_I_NEW;
321  ii->i_root = root;
322 
323  err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
324  if (unlikely(err))
325  goto failed_ifile_create_inode;
326  /* reference count of i_bh inherits from nilfs_mdt_read_block() */
327 
328  atomic_inc(&root->inodes_count);
329  inode_init_owner(inode, dir, mode);
330  inode->i_ino = ino;
331  inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
332 
333  if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
334  err = nilfs_bmap_read(ii->i_bmap, NULL);
335  if (err < 0)
336  goto failed_bmap;
337 
339  /* No lock is needed; iget() ensures it. */
340  }
341 
342  ii->i_flags = nilfs_mask_flags(
343  mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
344 
345  /* ii->i_file_acl = 0; */
346  /* ii->i_dir_acl = 0; */
347  ii->i_dir_start_lookup = 0;
348  nilfs_set_inode_flags(inode);
349  spin_lock(&nilfs->ns_next_gen_lock);
350  inode->i_generation = nilfs->ns_next_generation++;
351  spin_unlock(&nilfs->ns_next_gen_lock);
352  insert_inode_hash(inode);
353 
354  err = nilfs_init_acl(inode, dir);
355  if (unlikely(err))
356  goto failed_acl; /* never occur. When supporting
357  nilfs_init_acl(), proper cancellation of
358  above jobs should be considered */
359 
360  return inode;
361 
362  failed_acl:
363  failed_bmap:
364  clear_nlink(inode);
365  iput(inode); /* raw_inode will be deleted through
366  generic_delete_inode() */
367  goto failed;
368 
369  failed_ifile_create_inode:
370  make_bad_inode(inode);
371  iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
372  called */
373  failed:
374  return ERR_PTR(err);
375 }
376 
377 void nilfs_set_inode_flags(struct inode *inode)
378 {
379  unsigned int flags = NILFS_I(inode)->i_flags;
380 
381  inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
382  S_DIRSYNC);
383  if (flags & FS_SYNC_FL)
384  inode->i_flags |= S_SYNC;
385  if (flags & FS_APPEND_FL)
386  inode->i_flags |= S_APPEND;
387  if (flags & FS_IMMUTABLE_FL)
388  inode->i_flags |= S_IMMUTABLE;
389  if (flags & FS_NOATIME_FL)
390  inode->i_flags |= S_NOATIME;
391  if (flags & FS_DIRSYNC_FL)
392  inode->i_flags |= S_DIRSYNC;
393  mapping_set_gfp_mask(inode->i_mapping,
394  mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
395 }
396 
397 int nilfs_read_inode_common(struct inode *inode,
398  struct nilfs_inode *raw_inode)
399 {
400  struct nilfs_inode_info *ii = NILFS_I(inode);
401  int err;
402 
403  inode->i_mode = le16_to_cpu(raw_inode->i_mode);
404  i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
405  i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
406  set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
407  inode->i_size = le64_to_cpu(raw_inode->i_size);
408  inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
409  inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
410  inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
411  inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
412  inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
413  inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
414  if (inode->i_nlink == 0 && inode->i_mode == 0)
415  return -EINVAL; /* this inode is deleted */
416 
417  inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
418  ii->i_flags = le32_to_cpu(raw_inode->i_flags);
419 #if 0
420  ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
421  ii->i_dir_acl = S_ISREG(inode->i_mode) ?
422  0 : le32_to_cpu(raw_inode->i_dir_acl);
423 #endif
424  ii->i_dir_start_lookup = 0;
425  inode->i_generation = le32_to_cpu(raw_inode->i_generation);
426 
427  if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
428  S_ISLNK(inode->i_mode)) {
429  err = nilfs_bmap_read(ii->i_bmap, raw_inode);
430  if (err < 0)
431  return err;
433  /* No lock is needed; iget() ensures it. */
434  }
435  return 0;
436 }
437 
438 static int __nilfs_read_inode(struct super_block *sb,
439  struct nilfs_root *root, unsigned long ino,
440  struct inode *inode)
441 {
442  struct the_nilfs *nilfs = sb->s_fs_info;
443  struct buffer_head *bh;
444  struct nilfs_inode *raw_inode;
445  int err;
446 
447  down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
448  err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
449  if (unlikely(err))
450  goto bad_inode;
451 
452  raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
453 
454  err = nilfs_read_inode_common(inode, raw_inode);
455  if (err)
456  goto failed_unmap;
457 
458  if (S_ISREG(inode->i_mode)) {
460  inode->i_fop = &nilfs_file_operations;
461  inode->i_mapping->a_ops = &nilfs_aops;
462  } else if (S_ISDIR(inode->i_mode)) {
464  inode->i_fop = &nilfs_dir_operations;
465  inode->i_mapping->a_ops = &nilfs_aops;
466  } else if (S_ISLNK(inode->i_mode)) {
468  inode->i_mapping->a_ops = &nilfs_aops;
469  } else {
472  inode, inode->i_mode,
473  huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
474  }
475  nilfs_ifile_unmap_inode(root->ifile, ino, bh);
476  brelse(bh);
477  up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
478  nilfs_set_inode_flags(inode);
479  return 0;
480 
481  failed_unmap:
482  nilfs_ifile_unmap_inode(root->ifile, ino, bh);
483  brelse(bh);
484 
485  bad_inode:
486  up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
487  return err;
488 }
489 
490 static int nilfs_iget_test(struct inode *inode, void *opaque)
491 {
492  struct nilfs_iget_args *args = opaque;
493  struct nilfs_inode_info *ii;
494 
495  if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
496  return 0;
497 
498  ii = NILFS_I(inode);
499  if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
500  return !args->for_gc;
501 
502  return args->for_gc && args->cno == ii->i_cno;
503 }
504 
505 static int nilfs_iget_set(struct inode *inode, void *opaque)
506 {
507  struct nilfs_iget_args *args = opaque;
508 
509  inode->i_ino = args->ino;
510  if (args->for_gc) {
511  NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
512  NILFS_I(inode)->i_cno = args->cno;
513  NILFS_I(inode)->i_root = NULL;
514  } else {
515  if (args->root && args->ino == NILFS_ROOT_INO)
516  nilfs_get_root(args->root);
517  NILFS_I(inode)->i_root = args->root;
518  }
519  return 0;
520 }
521 
522 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
523  unsigned long ino)
524 {
525  struct nilfs_iget_args args = {
526  .ino = ino, .root = root, .cno = 0, .for_gc = 0
527  };
528 
529  return ilookup5(sb, ino, nilfs_iget_test, &args);
530 }
531 
532 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
533  unsigned long ino)
534 {
535  struct nilfs_iget_args args = {
536  .ino = ino, .root = root, .cno = 0, .for_gc = 0
537  };
538 
539  return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
540 }
541 
542 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
543  unsigned long ino)
544 {
545  struct inode *inode;
546  int err;
547 
548  inode = nilfs_iget_locked(sb, root, ino);
549  if (unlikely(!inode))
550  return ERR_PTR(-ENOMEM);
551  if (!(inode->i_state & I_NEW))
552  return inode;
553 
554  err = __nilfs_read_inode(sb, root, ino, inode);
555  if (unlikely(err)) {
556  iget_failed(inode);
557  return ERR_PTR(err);
558  }
559  unlock_new_inode(inode);
560  return inode;
561 }
562 
563 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
564  __u64 cno)
565 {
566  struct nilfs_iget_args args = {
567  .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
568  };
569  struct inode *inode;
570  int err;
571 
572  inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
573  if (unlikely(!inode))
574  return ERR_PTR(-ENOMEM);
575  if (!(inode->i_state & I_NEW))
576  return inode;
577 
578  err = nilfs_init_gcinode(inode);
579  if (unlikely(err)) {
580  iget_failed(inode);
581  return ERR_PTR(err);
582  }
583  unlock_new_inode(inode);
584  return inode;
585 }
586 
587 void nilfs_write_inode_common(struct inode *inode,
588  struct nilfs_inode *raw_inode, int has_bmap)
589 {
590  struct nilfs_inode_info *ii = NILFS_I(inode);
591 
592  raw_inode->i_mode = cpu_to_le16(inode->i_mode);
593  raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
594  raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
595  raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
596  raw_inode->i_size = cpu_to_le64(inode->i_size);
597  raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
598  raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
599  raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
600  raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
601  raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
602 
603  raw_inode->i_flags = cpu_to_le32(ii->i_flags);
604  raw_inode->i_generation = cpu_to_le32(inode->i_generation);
605 
606  if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
607  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
608 
609  /* zero-fill unused portion in the case of super root block */
610  raw_inode->i_xattr = 0;
611  raw_inode->i_pad = 0;
612  memset((void *)raw_inode + sizeof(*raw_inode), 0,
613  nilfs->ns_inode_size - sizeof(*raw_inode));
614  }
615 
616  if (has_bmap)
617  nilfs_bmap_write(ii->i_bmap, raw_inode);
618  else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
619  raw_inode->i_device_code =
620  cpu_to_le64(huge_encode_dev(inode->i_rdev));
621  /* When extending inode, nilfs->ns_inode_size should be checked
622  for substitutions of appended fields */
623 }
624 
625 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
626 {
627  ino_t ino = inode->i_ino;
628  struct nilfs_inode_info *ii = NILFS_I(inode);
629  struct inode *ifile = ii->i_root->ifile;
630  struct nilfs_inode *raw_inode;
631 
632  raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
633 
635  memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
637 
638  nilfs_write_inode_common(inode, raw_inode, 0);
639  /* XXX: call with has_bmap = 0 is a workaround to avoid
640  deadlock of bmap. This delays update of i_bmap to just
641  before writing */
642  nilfs_ifile_unmap_inode(ifile, ino, ibh);
643 }
644 
645 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
646 
647 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
648  unsigned long from)
649 {
650  unsigned long b;
651  int ret;
652 
653  if (!test_bit(NILFS_I_BMAP, &ii->i_state))
654  return;
655 repeat:
656  ret = nilfs_bmap_last_key(ii->i_bmap, &b);
657  if (ret == -ENOENT)
658  return;
659  else if (ret < 0)
660  goto failed;
661 
662  if (b < from)
663  return;
664 
665  b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
666  ret = nilfs_bmap_truncate(ii->i_bmap, b);
668  if (!ret || (ret == -ENOMEM &&
669  nilfs_bmap_truncate(ii->i_bmap, b) == 0))
670  goto repeat;
671 
672 failed:
673  nilfs_warning(ii->vfs_inode.i_sb, __func__,
674  "failed to truncate bmap (ino=%lu, err=%d)",
675  ii->vfs_inode.i_ino, ret);
676 }
677 
678 void nilfs_truncate(struct inode *inode)
679 {
680  unsigned long blkoff;
681  unsigned int blocksize;
682  struct nilfs_transaction_info ti;
683  struct super_block *sb = inode->i_sb;
684  struct nilfs_inode_info *ii = NILFS_I(inode);
685 
686  if (!test_bit(NILFS_I_BMAP, &ii->i_state))
687  return;
688  if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
689  return;
690 
691  blocksize = sb->s_blocksize;
692  blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
693  nilfs_transaction_begin(sb, &ti, 0); /* never fails */
694 
696 
697  nilfs_truncate_bmap(ii, blkoff);
698 
699  inode->i_mtime = inode->i_ctime = CURRENT_TIME;
700  if (IS_SYNC(inode))
701  nilfs_set_transaction_flag(NILFS_TI_SYNC);
702 
703  nilfs_mark_inode_dirty(inode);
704  nilfs_set_file_dirty(inode, 0);
706  /* May construct a logical segment and may fail in sync mode.
707  But truncate has no return value. */
708 }
709 
710 static void nilfs_clear_inode(struct inode *inode)
711 {
712  struct nilfs_inode_info *ii = NILFS_I(inode);
713  struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
714 
715  /*
716  * Free resources allocated in nilfs_read_inode(), here.
717  */
718  BUG_ON(!list_empty(&ii->i_dirty));
719  brelse(ii->i_bh);
720  ii->i_bh = NULL;
721 
722  if (mdi && mdi->mi_palloc_cache)
724 
725  if (test_bit(NILFS_I_BMAP, &ii->i_state))
727 
729 
730  if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
731  nilfs_put_root(ii->i_root);
732 }
733 
734 void nilfs_evict_inode(struct inode *inode)
735 {
736  struct nilfs_transaction_info ti;
737  struct super_block *sb = inode->i_sb;
738  struct nilfs_inode_info *ii = NILFS_I(inode);
739  int ret;
740 
741  if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
742  if (inode->i_data.nrpages)
743  truncate_inode_pages(&inode->i_data, 0);
744  clear_inode(inode);
745  nilfs_clear_inode(inode);
746  return;
747  }
748  nilfs_transaction_begin(sb, &ti, 0); /* never fails */
749 
750  if (inode->i_data.nrpages)
751  truncate_inode_pages(&inode->i_data, 0);
752 
753  /* TODO: some of the following operations may fail. */
754  nilfs_truncate_bmap(ii, 0);
755  nilfs_mark_inode_dirty(inode);
756  clear_inode(inode);
757 
758  ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
759  if (!ret)
760  atomic_dec(&ii->i_root->inodes_count);
761 
762  nilfs_clear_inode(inode);
763 
764  if (IS_SYNC(inode))
765  nilfs_set_transaction_flag(NILFS_TI_SYNC);
767  /* May construct a logical segment and may fail in sync mode.
768  But delete_inode has no return value. */
769 }
770 
771 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
772 {
773  struct nilfs_transaction_info ti;
774  struct inode *inode = dentry->d_inode;
775  struct super_block *sb = inode->i_sb;
776  int err;
777 
778  err = inode_change_ok(inode, iattr);
779  if (err)
780  return err;
781 
782  err = nilfs_transaction_begin(sb, &ti, 0);
783  if (unlikely(err))
784  return err;
785 
786  if ((iattr->ia_valid & ATTR_SIZE) &&
787  iattr->ia_size != i_size_read(inode)) {
788  inode_dio_wait(inode);
789 
790  err = vmtruncate(inode, iattr->ia_size);
791  if (unlikely(err))
792  goto out_err;
793  }
794 
795  setattr_copy(inode, iattr);
796  mark_inode_dirty(inode);
797 
798  if (iattr->ia_valid & ATTR_MODE) {
799  err = nilfs_acl_chmod(inode);
800  if (unlikely(err))
801  goto out_err;
802  }
803 
804  return nilfs_transaction_commit(sb);
805 
806 out_err:
808  return err;
809 }
810 
811 int nilfs_permission(struct inode *inode, int mask)
812 {
813  struct nilfs_root *root = NILFS_I(inode)->i_root;
814  if ((mask & MAY_WRITE) && root &&
815  root->cno != NILFS_CPTREE_CURRENT_CNO)
816  return -EROFS; /* snapshot is not writable */
817 
818  return generic_permission(inode, mask);
819 }
820 
821 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
822 {
823  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
824  struct nilfs_inode_info *ii = NILFS_I(inode);
825  int err;
826 
827  spin_lock(&nilfs->ns_inode_lock);
828  if (ii->i_bh == NULL) {
829  spin_unlock(&nilfs->ns_inode_lock);
830  err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
831  inode->i_ino, pbh);
832  if (unlikely(err))
833  return err;
834  spin_lock(&nilfs->ns_inode_lock);
835  if (ii->i_bh == NULL)
836  ii->i_bh = *pbh;
837  else {
838  brelse(*pbh);
839  *pbh = ii->i_bh;
840  }
841  } else
842  *pbh = ii->i_bh;
843 
844  get_bh(*pbh);
845  spin_unlock(&nilfs->ns_inode_lock);
846  return 0;
847 }
848 
849 int nilfs_inode_dirty(struct inode *inode)
850 {
851  struct nilfs_inode_info *ii = NILFS_I(inode);
852  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
853  int ret = 0;
854 
855  if (!list_empty(&ii->i_dirty)) {
856  spin_lock(&nilfs->ns_inode_lock);
857  ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
859  spin_unlock(&nilfs->ns_inode_lock);
860  }
861  return ret;
862 }
863 
864 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
865 {
866  struct nilfs_inode_info *ii = NILFS_I(inode);
867  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
868 
869  atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
870 
872  return 0;
873 
874  spin_lock(&nilfs->ns_inode_lock);
875  if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
876  !test_bit(NILFS_I_BUSY, &ii->i_state)) {
877  /* Because this routine may race with nilfs_dispose_list(),
878  we have to check NILFS_I_QUEUED here, too. */
879  if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
880  /* This will happen when somebody is freeing
881  this inode. */
882  nilfs_warning(inode->i_sb, __func__,
883  "cannot get inode (ino=%lu)\n",
884  inode->i_ino);
885  spin_unlock(&nilfs->ns_inode_lock);
886  return -EINVAL; /* NILFS_I_DIRTY may remain for
887  freeing inode */
888  }
889  list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
891  }
892  spin_unlock(&nilfs->ns_inode_lock);
893  return 0;
894 }
895 
896 int nilfs_mark_inode_dirty(struct inode *inode)
897 {
898  struct buffer_head *ibh;
899  int err;
900 
901  err = nilfs_load_inode_block(inode, &ibh);
902  if (unlikely(err)) {
903  nilfs_warning(inode->i_sb, __func__,
904  "failed to reget inode block.\n");
905  return err;
906  }
907  nilfs_update_inode(inode, ibh);
908  mark_buffer_dirty(ibh);
909  nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
910  brelse(ibh);
911  return 0;
912 }
913 
924 void nilfs_dirty_inode(struct inode *inode, int flags)
925 {
926  struct nilfs_transaction_info ti;
927  struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
928 
929  if (is_bad_inode(inode)) {
930  nilfs_warning(inode->i_sb, __func__,
931  "tried to mark bad_inode dirty. ignored.\n");
932  dump_stack();
933  return;
934  }
935  if (mdi) {
936  nilfs_mdt_mark_dirty(inode);
937  return;
938  }
939  nilfs_transaction_begin(inode->i_sb, &ti, 0);
940  nilfs_mark_inode_dirty(inode);
941  nilfs_transaction_commit(inode->i_sb); /* never fails */
942 }
943 
944 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
945  __u64 start, __u64 len)
946 {
947  struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
948  __u64 logical = 0, phys = 0, size = 0;
949  __u32 flags = 0;
950  loff_t isize;
951  sector_t blkoff, end_blkoff;
952  sector_t delalloc_blkoff;
953  unsigned long delalloc_blklen;
954  unsigned int blkbits = inode->i_blkbits;
955  int ret, n;
956 
957  ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
958  if (ret)
959  return ret;
960 
961  mutex_lock(&inode->i_mutex);
962 
963  isize = i_size_read(inode);
964 
965  blkoff = start >> blkbits;
966  end_blkoff = (start + len - 1) >> blkbits;
967 
968  delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
969  &delalloc_blkoff);
970 
971  do {
972  __u64 blkphy;
973  unsigned int maxblocks;
974 
975  if (delalloc_blklen && blkoff == delalloc_blkoff) {
976  if (size) {
977  /* End of the current extent */
979  fieinfo, logical, phys, size, flags);
980  if (ret)
981  break;
982  }
983  if (blkoff > end_blkoff)
984  break;
985 
987  logical = blkoff << blkbits;
988  phys = 0;
989  size = delalloc_blklen << blkbits;
990 
991  blkoff = delalloc_blkoff + delalloc_blklen;
992  delalloc_blklen = nilfs_find_uncommitted_extent(
993  inode, blkoff, &delalloc_blkoff);
994  continue;
995  }
996 
997  /*
998  * Limit the number of blocks that we look up so as
999  * not to get into the next delayed allocation extent.
1000  */
1001  maxblocks = INT_MAX;
1002  if (delalloc_blklen)
1003  maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1004  maxblocks);
1005  blkphy = 0;
1006 
1007  down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1009  NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1010  up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1011 
1012  if (n < 0) {
1013  int past_eof;
1014 
1015  if (unlikely(n != -ENOENT))
1016  break; /* error */
1017 
1018  /* HOLE */
1019  blkoff++;
1020  past_eof = ((blkoff << blkbits) >= isize);
1021 
1022  if (size) {
1023  /* End of the current extent */
1024 
1025  if (past_eof)
1026  flags |= FIEMAP_EXTENT_LAST;
1027 
1029  fieinfo, logical, phys, size, flags);
1030  if (ret)
1031  break;
1032  size = 0;
1033  }
1034  if (blkoff > end_blkoff || past_eof)
1035  break;
1036  } else {
1037  if (size) {
1038  if (phys && blkphy << blkbits == phys + size) {
1039  /* The current extent goes on */
1040  size += n << blkbits;
1041  } else {
1042  /* Terminate the current extent */
1044  fieinfo, logical, phys, size,
1045  flags);
1046  if (ret || blkoff > end_blkoff)
1047  break;
1048 
1049  /* Start another extent */
1050  flags = FIEMAP_EXTENT_MERGED;
1051  logical = blkoff << blkbits;
1052  phys = blkphy << blkbits;
1053  size = n << blkbits;
1054  }
1055  } else {
1056  /* Start a new extent */
1057  flags = FIEMAP_EXTENT_MERGED;
1058  logical = blkoff << blkbits;
1059  phys = blkphy << blkbits;
1060  size = n << blkbits;
1061  }
1062  blkoff += n;
1063  }
1064  cond_resched();
1065  } while (true);
1066 
1067  /* If ret is 1 then we just hit the end of the extent array */
1068  if (ret == 1)
1069  ret = 0;
1070 
1071  mutex_unlock(&inode->i_mutex);
1072  return ret;
1073 }