Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
libfs.c
Go to the documentation of this file.
1 /*
2  * fs/libfs.c
3  * Library for filesystems writers.
4  */
5 
6 #include <linux/export.h>
7 #include <linux/pagemap.h>
8 #include <linux/slab.h>
9 #include <linux/mount.h>
10 #include <linux/vfs.h>
11 #include <linux/quotaops.h>
12 #include <linux/mutex.h>
13 #include <linux/exportfs.h>
14 #include <linux/writeback.h>
15 #include <linux/buffer_head.h> /* sync_mapping_buffers */
16 
17 #include <asm/uaccess.h>
18 
19 #include "internal.h"
20 
21 static inline int simple_positive(struct dentry *dentry)
22 {
23  return dentry->d_inode && !d_unhashed(dentry);
24 }
25 
26 int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
27  struct kstat *stat)
28 {
29  struct inode *inode = dentry->d_inode;
30  generic_fillattr(inode, stat);
31  stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
32  return 0;
33 }
34 
35 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
36 {
37  buf->f_type = dentry->d_sb->s_magic;
38  buf->f_bsize = PAGE_CACHE_SIZE;
39  buf->f_namelen = NAME_MAX;
40  return 0;
41 }
42 
43 /*
44  * Retaining negative dentries for an in-memory filesystem just wastes
45  * memory and lookup time: arrange for them to be deleted immediately.
46  */
47 static int simple_delete_dentry(const struct dentry *dentry)
48 {
49  return 1;
50 }
51 
52 /*
53  * Lookup the data. This is trivial - if the dentry didn't already
54  * exist, we know it is negative. Set d_op to delete negative dentries.
55  */
56 struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
57 {
58  static const struct dentry_operations simple_dentry_operations = {
59  .d_delete = simple_delete_dentry,
60  };
61 
62  if (dentry->d_name.len > NAME_MAX)
63  return ERR_PTR(-ENAMETOOLONG);
64  d_set_d_op(dentry, &simple_dentry_operations);
65  d_add(dentry, NULL);
66  return NULL;
67 }
68 
69 int dcache_dir_open(struct inode *inode, struct file *file)
70 {
71  static struct qstr cursor_name = QSTR_INIT(".", 1);
72 
73  file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
74 
75  return file->private_data ? 0 : -ENOMEM;
76 }
77 
78 int dcache_dir_close(struct inode *inode, struct file *file)
79 {
80  dput(file->private_data);
81  return 0;
82 }
83 
84 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
85 {
86  struct dentry *dentry = file->f_path.dentry;
87  mutex_lock(&dentry->d_inode->i_mutex);
88  switch (origin) {
89  case 1:
90  offset += file->f_pos;
91  case 0:
92  if (offset >= 0)
93  break;
94  default:
95  mutex_unlock(&dentry->d_inode->i_mutex);
96  return -EINVAL;
97  }
98  if (offset != file->f_pos) {
99  file->f_pos = offset;
100  if (file->f_pos >= 2) {
101  struct list_head *p;
102  struct dentry *cursor = file->private_data;
103  loff_t n = file->f_pos - 2;
104 
105  spin_lock(&dentry->d_lock);
106  /* d_lock not required for cursor */
107  list_del(&cursor->d_u.d_child);
108  p = dentry->d_subdirs.next;
109  while (n && p != &dentry->d_subdirs) {
110  struct dentry *next;
111  next = list_entry(p, struct dentry, d_u.d_child);
113  if (simple_positive(next))
114  n--;
115  spin_unlock(&next->d_lock);
116  p = p->next;
117  }
118  list_add_tail(&cursor->d_u.d_child, p);
119  spin_unlock(&dentry->d_lock);
120  }
121  }
122  mutex_unlock(&dentry->d_inode->i_mutex);
123  return offset;
124 }
125 
126 /* Relationship between i_mode and the DT_xxx types */
127 static inline unsigned char dt_type(struct inode *inode)
128 {
129  return (inode->i_mode >> 12) & 15;
130 }
131 
132 /*
133  * Directory is locked and all positive dentries in it are safe, since
134  * for ramfs-type trees they can't go away without unlink() or rmdir(),
135  * both impossible due to the lock on directory.
136  */
137 
138 int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
139 {
140  struct dentry *dentry = filp->f_path.dentry;
141  struct dentry *cursor = filp->private_data;
142  struct list_head *p, *q = &cursor->d_u.d_child;
143  ino_t ino;
144  int i = filp->f_pos;
145 
146  switch (i) {
147  case 0:
148  ino = dentry->d_inode->i_ino;
149  if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
150  break;
151  filp->f_pos++;
152  i++;
153  /* fallthrough */
154  case 1:
155  ino = parent_ino(dentry);
156  if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
157  break;
158  filp->f_pos++;
159  i++;
160  /* fallthrough */
161  default:
162  spin_lock(&dentry->d_lock);
163  if (filp->f_pos == 2)
164  list_move(q, &dentry->d_subdirs);
165 
166  for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
167  struct dentry *next;
168  next = list_entry(p, struct dentry, d_u.d_child);
170  if (!simple_positive(next)) {
171  spin_unlock(&next->d_lock);
172  continue;
173  }
174 
175  spin_unlock(&next->d_lock);
176  spin_unlock(&dentry->d_lock);
177  if (filldir(dirent, next->d_name.name,
178  next->d_name.len, filp->f_pos,
179  next->d_inode->i_ino,
180  dt_type(next->d_inode)) < 0)
181  return 0;
182  spin_lock(&dentry->d_lock);
184  /* next is still alive */
185  list_move(q, p);
186  spin_unlock(&next->d_lock);
187  p = q;
188  filp->f_pos++;
189  }
190  spin_unlock(&dentry->d_lock);
191  }
192  return 0;
193 }
194 
195 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
196 {
197  return -EISDIR;
198 }
199 
201  .open = dcache_dir_open,
202  .release = dcache_dir_close,
203  .llseek = dcache_dir_lseek,
204  .read = generic_read_dir,
205  .readdir = dcache_readdir,
206  .fsync = noop_fsync,
207 };
208 
210  .lookup = simple_lookup,
211 };
212 
213 static const struct super_operations simple_super_operations = {
214  .statfs = simple_statfs,
215 };
216 
217 /*
218  * Common helper for pseudo-filesystems (sockfs, pipefs, bdev - stuff that
219  * will never be mountable)
220  */
221 struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
222  const struct super_operations *ops,
223  const struct dentry_operations *dops, unsigned long magic)
224 {
225  struct super_block *s;
226  struct dentry *dentry;
227  struct inode *root;
228  struct qstr d_name = QSTR_INIT(name, strlen(name));
229 
230  s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
231  if (IS_ERR(s))
232  return ERR_CAST(s);
233 
234  s->s_maxbytes = MAX_LFS_FILESIZE;
235  s->s_blocksize = PAGE_SIZE;
237  s->s_magic = magic;
238  s->s_op = ops ? ops : &simple_super_operations;
239  s->s_time_gran = 1;
240  root = new_inode(s);
241  if (!root)
242  goto Enomem;
243  /*
244  * since this is the first inode, make it number 1. New inodes created
245  * after this must take care not to collide with it (by passing
246  * max_reserved of 1 to iunique).
247  */
248  root->i_ino = 1;
249  root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
250  root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
251  dentry = __d_alloc(s, &d_name);
252  if (!dentry) {
253  iput(root);
254  goto Enomem;
255  }
256  d_instantiate(dentry, root);
257  s->s_root = dentry;
258  s->s_d_op = dops;
259  s->s_flags |= MS_ACTIVE;
260  return dget(s->s_root);
261 
262 Enomem:
264  return ERR_PTR(-ENOMEM);
265 }
266 
267 int simple_open(struct inode *inode, struct file *file)
268 {
269  if (inode->i_private)
270  file->private_data = inode->i_private;
271  return 0;
272 }
273 
274 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
275 {
276  struct inode *inode = old_dentry->d_inode;
277 
278  inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
279  inc_nlink(inode);
280  ihold(inode);
281  dget(dentry);
282  d_instantiate(dentry, inode);
283  return 0;
284 }
285 
287 {
288  struct dentry *child;
289  int ret = 0;
290 
291  spin_lock(&dentry->d_lock);
292  list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
294  if (simple_positive(child)) {
295  spin_unlock(&child->d_lock);
296  goto out;
297  }
298  spin_unlock(&child->d_lock);
299  }
300  ret = 1;
301 out:
302  spin_unlock(&dentry->d_lock);
303  return ret;
304 }
305 
306 int simple_unlink(struct inode *dir, struct dentry *dentry)
307 {
308  struct inode *inode = dentry->d_inode;
309 
310  inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
311  drop_nlink(inode);
312  dput(dentry);
313  return 0;
314 }
315 
316 int simple_rmdir(struct inode *dir, struct dentry *dentry)
317 {
318  if (!simple_empty(dentry))
319  return -ENOTEMPTY;
320 
321  drop_nlink(dentry->d_inode);
322  simple_unlink(dir, dentry);
323  drop_nlink(dir);
324  return 0;
325 }
326 
327 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
328  struct inode *new_dir, struct dentry *new_dentry)
329 {
330  struct inode *inode = old_dentry->d_inode;
331  int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
332 
333  if (!simple_empty(new_dentry))
334  return -ENOTEMPTY;
335 
336  if (new_dentry->d_inode) {
337  simple_unlink(new_dir, new_dentry);
338  if (they_are_dirs) {
339  drop_nlink(new_dentry->d_inode);
340  drop_nlink(old_dir);
341  }
342  } else if (they_are_dirs) {
343  drop_nlink(old_dir);
344  inc_nlink(new_dir);
345  }
346 
347  old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
348  new_dir->i_mtime = inode->i_ctime = CURRENT_TIME;
349 
350  return 0;
351 }
352 
367 int simple_setattr(struct dentry *dentry, struct iattr *iattr)
368 {
369  struct inode *inode = dentry->d_inode;
370  int error;
371 
372  WARN_ON_ONCE(inode->i_op->truncate);
373 
374  error = inode_change_ok(inode, iattr);
375  if (error)
376  return error;
377 
378  if (iattr->ia_valid & ATTR_SIZE)
379  truncate_setsize(inode, iattr->ia_size);
380  setattr_copy(inode, iattr);
381  mark_inode_dirty(inode);
382  return 0;
383 }
385 
386 int simple_readpage(struct file *file, struct page *page)
387 {
388  clear_highpage(page);
389  flush_dcache_page(page);
390  SetPageUptodate(page);
391  unlock_page(page);
392  return 0;
393 }
394 
396  loff_t pos, unsigned len, unsigned flags,
397  struct page **pagep, void **fsdata)
398 {
399  struct page *page;
400  pgoff_t index;
401 
402  index = pos >> PAGE_CACHE_SHIFT;
403 
404  page = grab_cache_page_write_begin(mapping, index, flags);
405  if (!page)
406  return -ENOMEM;
407 
408  *pagep = page;
409 
410  if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
411  unsigned from = pos & (PAGE_CACHE_SIZE - 1);
412 
413  zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE);
414  }
415  return 0;
416 }
417 
440  loff_t pos, unsigned len, unsigned copied,
441  struct page *page, void *fsdata)
442 {
443  struct inode *inode = page->mapping->host;
444  loff_t last_pos = pos + copied;
445 
446  /* zero the stale part of the page if we did a short copy */
447  if (copied < len) {
448  unsigned from = pos & (PAGE_CACHE_SIZE - 1);
449 
450  zero_user(page, from + copied, len - copied);
451  }
452 
453  if (!PageUptodate(page))
454  SetPageUptodate(page);
455  /*
456  * No need to use i_size_read() here, the i_size
457  * cannot change under us because we hold the i_mutex.
458  */
459  if (last_pos > inode->i_size)
460  i_size_write(inode, last_pos);
461 
462  set_page_dirty(page);
463  unlock_page(page);
464  page_cache_release(page);
465 
466  return copied;
467 }
468 
469 /*
470  * the inodes created here are not hashed. If you use iunique to generate
471  * unique inode values later for this filesystem, then you must take care
472  * to pass it an appropriate max_reserved value to avoid collisions.
473  */
474 int simple_fill_super(struct super_block *s, unsigned long magic,
475  struct tree_descr *files)
476 {
477  struct inode *inode;
478  struct dentry *root;
479  struct dentry *dentry;
480  int i;
481 
484  s->s_magic = magic;
485  s->s_op = &simple_super_operations;
486  s->s_time_gran = 1;
487 
488  inode = new_inode(s);
489  if (!inode)
490  return -ENOMEM;
491  /*
492  * because the root inode is 1, the files array must not contain an
493  * entry at index 1
494  */
495  inode->i_ino = 1;
496  inode->i_mode = S_IFDIR | 0755;
497  inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
499  inode->i_fop = &simple_dir_operations;
500  set_nlink(inode, 2);
501  root = d_make_root(inode);
502  if (!root)
503  return -ENOMEM;
504  for (i = 0; !files->name || files->name[0]; i++, files++) {
505  if (!files->name)
506  continue;
507 
508  /* warn if it tries to conflict with the root inode */
509  if (unlikely(i == 1))
510  printk(KERN_WARNING "%s: %s passed in a files array"
511  "with an index of 1!\n", __func__,
512  s->s_type->name);
513 
514  dentry = d_alloc_name(root, files->name);
515  if (!dentry)
516  goto out;
517  inode = new_inode(s);
518  if (!inode) {
519  dput(dentry);
520  goto out;
521  }
522  inode->i_mode = S_IFREG | files->mode;
523  inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
524  inode->i_fop = files->ops;
525  inode->i_ino = i;
526  d_add(dentry, inode);
527  }
528  s->s_root = root;
529  return 0;
530 out:
531  d_genocide(root);
532  shrink_dcache_parent(root);
533  dput(root);
534  return -ENOMEM;
535 }
536 
537 static DEFINE_SPINLOCK(pin_fs_lock);
538 
540 {
541  struct vfsmount *mnt = NULL;
542  spin_lock(&pin_fs_lock);
543  if (unlikely(!*mount)) {
544  spin_unlock(&pin_fs_lock);
545  mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
546  if (IS_ERR(mnt))
547  return PTR_ERR(mnt);
548  spin_lock(&pin_fs_lock);
549  if (!*mount)
550  *mount = mnt;
551  }
552  mntget(*mount);
553  ++*count;
554  spin_unlock(&pin_fs_lock);
555  mntput(mnt);
556  return 0;
557 }
558 
559 void simple_release_fs(struct vfsmount **mount, int *count)
560 {
561  struct vfsmount *mnt;
562  spin_lock(&pin_fs_lock);
563  mnt = *mount;
564  if (!--*count)
565  *mount = NULL;
566  spin_unlock(&pin_fs_lock);
567  mntput(mnt);
568 }
569 
584 ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
585  const void *from, size_t available)
586 {
587  loff_t pos = *ppos;
588  size_t ret;
589 
590  if (pos < 0)
591  return -EINVAL;
592  if (pos >= available || !count)
593  return 0;
594  if (count > available - pos)
595  count = available - pos;
596  ret = copy_to_user(to, from + pos, count);
597  if (ret == count)
598  return -EFAULT;
599  count -= ret;
600  *ppos = pos + count;
601  return count;
602 }
603 
618 ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
619  const void __user *from, size_t count)
620 {
621  loff_t pos = *ppos;
622  size_t res;
623 
624  if (pos < 0)
625  return -EINVAL;
626  if (pos >= available || !count)
627  return 0;
628  if (count > available - pos)
629  count = available - pos;
630  res = copy_from_user(to + pos, from, count);
631  if (res == count)
632  return -EFAULT;
633  count -= res;
634  *ppos = pos + count;
635  return count;
636 }
637 
652 ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
653  const void *from, size_t available)
654 {
655  loff_t pos = *ppos;
656 
657  if (pos < 0)
658  return -EINVAL;
659  if (pos >= available)
660  return 0;
661  if (count > available - pos)
662  count = available - pos;
663  memcpy(to, from + pos, count);
664  *ppos = pos + count;
665 
666  return count;
667 }
668 
669 /*
670  * Transaction based IO.
671  * The file expects a single write which triggers the transaction, and then
672  * possibly a read which collects the result - which is stored in a
673  * file-local buffer.
674  */
675 
676 void simple_transaction_set(struct file *file, size_t n)
677 {
679 
681 
682  /*
683  * The barrier ensures that ar->size will really remain zero until
684  * ar->data is ready for reading.
685  */
686  smp_mb();
687  ar->size = n;
688 }
689 
690 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
691 {
693  static DEFINE_SPINLOCK(simple_transaction_lock);
694 
695  if (size > SIMPLE_TRANSACTION_LIMIT - 1)
696  return ERR_PTR(-EFBIG);
697 
699  if (!ar)
700  return ERR_PTR(-ENOMEM);
701 
702  spin_lock(&simple_transaction_lock);
703 
704  /* only one write allowed per open */
705  if (file->private_data) {
706  spin_unlock(&simple_transaction_lock);
707  free_page((unsigned long)ar);
708  return ERR_PTR(-EBUSY);
709  }
710 
711  file->private_data = ar;
712 
713  spin_unlock(&simple_transaction_lock);
714 
715  if (copy_from_user(ar->data, buf, size))
716  return ERR_PTR(-EFAULT);
717 
718  return ar->data;
719 }
720 
721 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
722 {
724 
725  if (!ar)
726  return 0;
727  return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
728 }
729 
730 int simple_transaction_release(struct inode *inode, struct file *file)
731 {
732  free_page((unsigned long)file->private_data);
733  return 0;
734 }
735 
736 /* Simple attribute files */
737 
738 struct simple_attr {
739  int (*get)(void *, u64 *);
740  int (*set)(void *, u64);
741  char get_buf[24]; /* enough to store a u64 and "\n\0" */
742  char set_buf[24];
743  void *data;
744  const char *fmt; /* format for read operation */
745  struct mutex mutex; /* protects access to these buffers */
746 };
747 
748 /* simple_attr_open is called by an actual attribute open file operation
749  * to set the attribute specific access operations. */
750 int simple_attr_open(struct inode *inode, struct file *file,
751  int (*get)(void *, u64 *), int (*set)(void *, u64),
752  const char *fmt)
753 {
754  struct simple_attr *attr;
755 
756  attr = kmalloc(sizeof(*attr), GFP_KERNEL);
757  if (!attr)
758  return -ENOMEM;
759 
760  attr->get = get;
761  attr->set = set;
762  attr->data = inode->i_private;
763  attr->fmt = fmt;
764  mutex_init(&attr->mutex);
765 
766  file->private_data = attr;
767 
768  return nonseekable_open(inode, file);
769 }
770 
771 int simple_attr_release(struct inode *inode, struct file *file)
772 {
773  kfree(file->private_data);
774  return 0;
775 }
776 
777 /* read from the buffer that is filled with the get function */
778 ssize_t simple_attr_read(struct file *file, char __user *buf,
779  size_t len, loff_t *ppos)
780 {
781  struct simple_attr *attr;
782  size_t size;
783  ssize_t ret;
784 
785  attr = file->private_data;
786 
787  if (!attr->get)
788  return -EACCES;
789 
790  ret = mutex_lock_interruptible(&attr->mutex);
791  if (ret)
792  return ret;
793 
794  if (*ppos) { /* continued read */
795  size = strlen(attr->get_buf);
796  } else { /* first read */
797  u64 val;
798  ret = attr->get(attr->data, &val);
799  if (ret)
800  goto out;
801 
802  size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
803  attr->fmt, (unsigned long long)val);
804  }
805 
806  ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
807 out:
808  mutex_unlock(&attr->mutex);
809  return ret;
810 }
811 
812 /* interpret the buffer as a number to call the set function with */
813 ssize_t simple_attr_write(struct file *file, const char __user *buf,
814  size_t len, loff_t *ppos)
815 {
816  struct simple_attr *attr;
817  u64 val;
818  size_t size;
819  ssize_t ret;
820 
821  attr = file->private_data;
822  if (!attr->set)
823  return -EACCES;
824 
825  ret = mutex_lock_interruptible(&attr->mutex);
826  if (ret)
827  return ret;
828 
829  ret = -EFAULT;
830  size = min(sizeof(attr->set_buf) - 1, len);
831  if (copy_from_user(attr->set_buf, buf, size))
832  goto out;
833 
834  attr->set_buf[size] = '\0';
835  val = simple_strtoll(attr->set_buf, NULL, 0);
836  ret = attr->set(attr->data, val);
837  if (ret == 0)
838  ret = len; /* on success, claim we got the whole input */
839 out:
840  mutex_unlock(&attr->mutex);
841  return ret;
842 }
843 
856 struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid,
857  int fh_len, int fh_type, struct inode *(*get_inode)
858  (struct super_block *sb, u64 ino, u32 gen))
859 {
860  struct inode *inode = NULL;
861 
862  if (fh_len < 2)
863  return NULL;
864 
865  switch (fh_type) {
866  case FILEID_INO32_GEN:
868  inode = get_inode(sb, fid->i32.ino, fid->i32.gen);
869  break;
870  }
871 
872  return d_obtain_alias(inode);
873 }
875 
889 struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
890  int fh_len, int fh_type, struct inode *(*get_inode)
891  (struct super_block *sb, u64 ino, u32 gen))
892 {
893  struct inode *inode = NULL;
894 
895  if (fh_len <= 2)
896  return NULL;
897 
898  switch (fh_type) {
900  inode = get_inode(sb, fid->i32.parent_ino,
901  (fh_len > 3 ? fid->i32.parent_gen : 0));
902  break;
903  }
904 
905  return d_obtain_alias(inode);
906 }
908 
918 int generic_file_fsync(struct file *file, loff_t start, loff_t end,
919  int datasync)
920 {
921  struct inode *inode = file->f_mapping->host;
922  int err;
923  int ret;
924 
925  err = filemap_write_and_wait_range(inode->i_mapping, start, end);
926  if (err)
927  return err;
928 
929  mutex_lock(&inode->i_mutex);
930  ret = sync_mapping_buffers(inode->i_mapping);
931  if (!(inode->i_state & I_DIRTY))
932  goto out;
933  if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
934  goto out;
935 
936  err = sync_inode_metadata(inode, 1);
937  if (ret == 0)
938  ret = err;
939 out:
940  mutex_unlock(&inode->i_mutex);
941  return ret;
942 }
944 
954 int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks)
955 {
956  u64 last_fs_block = num_blocks - 1;
957  u64 last_fs_page =
958  last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits);
959 
960  if (unlikely(num_blocks == 0))
961  return 0;
962 
963  if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT))
964  return -EINVAL;
965 
966  if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) ||
967  (last_fs_page > (pgoff_t)(~0ULL))) {
968  return -EFBIG;
969  }
970  return 0;
971 }
973 
974 /*
975  * No-op implementation of ->fsync for in-memory filesystems.
976  */
977 int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
978 {
979  return 0;
980 }
981 
990 EXPORT_SYMBOL(simple_dir_inode_operations);
991 EXPORT_SYMBOL(simple_dir_operations);