Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inode.c
Go to the documentation of this file.
1 /*
2  * (C) 1997 Linus Torvalds
3  * (C) 1999 Andrea Arcangeli <[email protected]> (dynamic inode allocation)
4  */
5 #include <linux/export.h>
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include "internal.h"
21 
22 /*
23  * Inode locking rules:
24  *
25  * inode->i_lock protects:
26  * inode->i_state, inode->i_hash, __iget()
27  * inode->i_sb->s_inode_lru_lock protects:
28  * inode->i_sb->s_inode_lru, inode->i_lru
29  * inode_sb_list_lock protects:
30  * sb->s_inodes, inode->i_sb_list
31  * bdi->wb.list_lock protects:
32  * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
33  * inode_hash_lock protects:
34  * inode_hashtable, inode->i_hash
35  *
36  * Lock ordering:
37  *
38  * inode_sb_list_lock
39  * inode->i_lock
40  * inode->i_sb->s_inode_lru_lock
41  *
42  * bdi->wb.list_lock
43  * inode->i_lock
44  *
45  * inode_hash_lock
46  * inode_sb_list_lock
47  * inode->i_lock
48  *
49  * iunique_lock
50  * inode_hash_lock
51  */
52 
53 static unsigned int i_hash_mask __read_mostly;
54 static unsigned int i_hash_shift __read_mostly;
55 static struct hlist_head *inode_hashtable __read_mostly;
56 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
57 
59 
60 /*
61  * Empty aops. Can be used for the cases where the user does not
62  * define any of the address_space operations.
63  */
65 };
66 EXPORT_SYMBOL(empty_aops);
67 
68 /*
69  * Statistics gathering..
70  */
72 
73 static DEFINE_PER_CPU(unsigned int, nr_inodes);
74 static DEFINE_PER_CPU(unsigned int, nr_unused);
75 
76 static struct kmem_cache *inode_cachep __read_mostly;
77 
78 static int get_nr_inodes(void)
79 {
80  int i;
81  int sum = 0;
83  sum += per_cpu(nr_inodes, i);
84  return sum < 0 ? 0 : sum;
85 }
86 
87 static inline int get_nr_inodes_unused(void)
88 {
89  int i;
90  int sum = 0;
92  sum += per_cpu(nr_unused, i);
93  return sum < 0 ? 0 : sum;
94 }
95 
97 {
98  /* not actually dirty inodes, but a wild approximation */
99  int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100  return nr_dirty > 0 ? nr_dirty : 0;
101 }
102 
103 /*
104  * Handle nr_inode sysctl
105  */
106 #ifdef CONFIG_SYSCTL
108  void __user *buffer, size_t *lenp, loff_t *ppos)
109 {
110  inodes_stat.nr_inodes = get_nr_inodes();
111  inodes_stat.nr_unused = get_nr_inodes_unused();
112  return proc_dointvec(table, write, buffer, lenp, ppos);
113 }
114 #endif
115 
125 {
126  static const struct inode_operations empty_iops;
127  static const struct file_operations empty_fops;
128  struct address_space *const mapping = &inode->i_data;
129 
130  inode->i_sb = sb;
131  inode->i_blkbits = sb->s_blocksize_bits;
132  inode->i_flags = 0;
133  atomic_set(&inode->i_count, 1);
134  inode->i_op = &empty_iops;
135  inode->i_fop = &empty_fops;
136  inode->__i_nlink = 1;
137  inode->i_opflags = 0;
138  i_uid_write(inode, 0);
139  i_gid_write(inode, 0);
140  atomic_set(&inode->i_writecount, 0);
141  inode->i_size = 0;
142  inode->i_blocks = 0;
143  inode->i_bytes = 0;
144  inode->i_generation = 0;
145 #ifdef CONFIG_QUOTA
146  memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
147 #endif
148  inode->i_pipe = NULL;
149  inode->i_bdev = NULL;
150  inode->i_cdev = NULL;
151  inode->i_rdev = 0;
152  inode->dirtied_when = 0;
153 
154  if (security_inode_alloc(inode))
155  goto out;
156  spin_lock_init(&inode->i_lock);
157  lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
158 
159  mutex_init(&inode->i_mutex);
160  lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
161 
162  atomic_set(&inode->i_dio_count, 0);
163 
164  mapping->a_ops = &empty_aops;
165  mapping->host = inode;
166  mapping->flags = 0;
167  mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
168  mapping->assoc_mapping = NULL;
170  mapping->writeback_index = 0;
171 
172  /*
173  * If the block_device provides a backing_dev_info for client
174  * inodes then use that. Otherwise the inode share the bdev's
175  * backing_dev_info.
176  */
177  if (sb->s_bdev) {
178  struct backing_dev_info *bdi;
179 
180  bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
181  mapping->backing_dev_info = bdi;
182  }
183  inode->i_private = NULL;
184  inode->i_mapping = mapping;
185  INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
186 #ifdef CONFIG_FS_POSIX_ACL
187  inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
188 #endif
189 
190 #ifdef CONFIG_FSNOTIFY
191  inode->i_fsnotify_mask = 0;
192 #endif
193 
194  this_cpu_inc(nr_inodes);
195 
196  return 0;
197 out:
198  return -ENOMEM;
199 }
201 
202 static struct inode *alloc_inode(struct super_block *sb)
203 {
204  struct inode *inode;
205 
206  if (sb->s_op->alloc_inode)
207  inode = sb->s_op->alloc_inode(sb);
208  else
209  inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
210 
211  if (!inode)
212  return NULL;
213 
214  if (unlikely(inode_init_always(sb, inode))) {
215  if (inode->i_sb->s_op->destroy_inode)
216  inode->i_sb->s_op->destroy_inode(inode);
217  else
218  kmem_cache_free(inode_cachep, inode);
219  return NULL;
220  }
221 
222  return inode;
223 }
224 
225 void free_inode_nonrcu(struct inode *inode)
226 {
227  kmem_cache_free(inode_cachep, inode);
228 }
230 
231 void __destroy_inode(struct inode *inode)
232 {
233  BUG_ON(inode_has_buffers(inode));
234  security_inode_free(inode);
235  fsnotify_inode_delete(inode);
236  if (!inode->i_nlink) {
237  WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
238  atomic_long_dec(&inode->i_sb->s_remove_count);
239  }
240 
241 #ifdef CONFIG_FS_POSIX_ACL
242  if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
243  posix_acl_release(inode->i_acl);
244  if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
245  posix_acl_release(inode->i_default_acl);
246 #endif
247  this_cpu_dec(nr_inodes);
248 }
250 
251 static void i_callback(struct rcu_head *head)
252 {
253  struct inode *inode = container_of(head, struct inode, i_rcu);
254  kmem_cache_free(inode_cachep, inode);
255 }
256 
257 static void destroy_inode(struct inode *inode)
258 {
259  BUG_ON(!list_empty(&inode->i_lru));
260  __destroy_inode(inode);
261  if (inode->i_sb->s_op->destroy_inode)
262  inode->i_sb->s_op->destroy_inode(inode);
263  else
264  call_rcu(&inode->i_rcu, i_callback);
265 }
266 
278 void drop_nlink(struct inode *inode)
279 {
280  WARN_ON(inode->i_nlink == 0);
281  inode->__i_nlink--;
282  if (!inode->i_nlink)
283  atomic_long_inc(&inode->i_sb->s_remove_count);
284 }
286 
295 void clear_nlink(struct inode *inode)
296 {
297  if (inode->i_nlink) {
298  inode->__i_nlink = 0;
299  atomic_long_inc(&inode->i_sb->s_remove_count);
300  }
301 }
303 
312 void set_nlink(struct inode *inode, unsigned int nlink)
313 {
314  if (!nlink) {
315  clear_nlink(inode);
316  } else {
317  /* Yes, some filesystems do change nlink from zero to one */
318  if (inode->i_nlink == 0)
319  atomic_long_dec(&inode->i_sb->s_remove_count);
320 
321  inode->__i_nlink = nlink;
322  }
323 }
325 
334 void inc_nlink(struct inode *inode)
335 {
336  if (WARN_ON(inode->i_nlink == 0))
337  atomic_long_dec(&inode->i_sb->s_remove_count);
338 
339  inode->__i_nlink++;
340 }
342 
344 {
345  memset(mapping, 0, sizeof(*mapping));
347  spin_lock_init(&mapping->tree_lock);
348  mutex_init(&mapping->i_mmap_mutex);
349  INIT_LIST_HEAD(&mapping->private_list);
350  spin_lock_init(&mapping->private_lock);
351  mapping->i_mmap = RB_ROOT;
352  INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
353 }
355 
356 /*
357  * These are initializations that only need to be done
358  * once, because the fields are idempotent across use
359  * of the inode, so let the slab aware of that.
360  */
361 void inode_init_once(struct inode *inode)
362 {
363  memset(inode, 0, sizeof(*inode));
364  INIT_HLIST_NODE(&inode->i_hash);
365  INIT_LIST_HEAD(&inode->i_devices);
366  INIT_LIST_HEAD(&inode->i_wb_list);
367  INIT_LIST_HEAD(&inode->i_lru);
369  i_size_ordered_init(inode);
370 #ifdef CONFIG_FSNOTIFY
371  INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
372 #endif
373 }
375 
376 static void init_once(void *foo)
377 {
378  struct inode *inode = (struct inode *) foo;
379 
380  inode_init_once(inode);
381 }
382 
383 /*
384  * inode->i_lock must be held
385  */
386 void __iget(struct inode *inode)
387 {
388  atomic_inc(&inode->i_count);
389 }
390 
391 /*
392  * get additional reference to inode; caller must already hold one.
393  */
394 void ihold(struct inode *inode)
395 {
396  WARN_ON(atomic_inc_return(&inode->i_count) < 2);
397 }
399 
400 static void inode_lru_list_add(struct inode *inode)
401 {
402  spin_lock(&inode->i_sb->s_inode_lru_lock);
403  if (list_empty(&inode->i_lru)) {
404  list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
405  inode->i_sb->s_nr_inodes_unused++;
406  this_cpu_inc(nr_unused);
407  }
408  spin_unlock(&inode->i_sb->s_inode_lru_lock);
409 }
410 
411 /*
412  * Add inode to LRU if needed (inode is unused and clean).
413  *
414  * Needs inode->i_lock held.
415  */
416 void inode_add_lru(struct inode *inode)
417 {
418  if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
419  !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
420  inode_lru_list_add(inode);
421 }
422 
423 
424 static void inode_lru_list_del(struct inode *inode)
425 {
426  spin_lock(&inode->i_sb->s_inode_lru_lock);
427  if (!list_empty(&inode->i_lru)) {
428  list_del_init(&inode->i_lru);
429  inode->i_sb->s_nr_inodes_unused--;
430  this_cpu_dec(nr_unused);
431  }
432  spin_unlock(&inode->i_sb->s_inode_lru_lock);
433 }
434 
439 void inode_sb_list_add(struct inode *inode)
440 {
441  spin_lock(&inode_sb_list_lock);
442  list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
443  spin_unlock(&inode_sb_list_lock);
444 }
446 
447 static inline void inode_sb_list_del(struct inode *inode)
448 {
449  if (!list_empty(&inode->i_sb_list)) {
450  spin_lock(&inode_sb_list_lock);
451  list_del_init(&inode->i_sb_list);
452  spin_unlock(&inode_sb_list_lock);
453  }
454 }
455 
456 static unsigned long hash(struct super_block *sb, unsigned long hashval)
457 {
458  unsigned long tmp;
459 
460  tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
462  tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
463  return tmp & i_hash_mask;
464 }
465 
474 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
475 {
476  struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
477 
478  spin_lock(&inode_hash_lock);
479  spin_lock(&inode->i_lock);
480  hlist_add_head(&inode->i_hash, b);
481  spin_unlock(&inode->i_lock);
482  spin_unlock(&inode_hash_lock);
483 }
485 
492 void __remove_inode_hash(struct inode *inode)
493 {
494  spin_lock(&inode_hash_lock);
495  spin_lock(&inode->i_lock);
496  hlist_del_init(&inode->i_hash);
497  spin_unlock(&inode->i_lock);
498  spin_unlock(&inode_hash_lock);
499 }
501 
502 void clear_inode(struct inode *inode)
503 {
504  might_sleep();
505  /*
506  * We have to cycle tree_lock here because reclaim can be still in the
507  * process of removing the last page (in __delete_from_page_cache())
508  * and we must not free mapping under it.
509  */
510  spin_lock_irq(&inode->i_data.tree_lock);
511  BUG_ON(inode->i_data.nrpages);
512  spin_unlock_irq(&inode->i_data.tree_lock);
513  BUG_ON(!list_empty(&inode->i_data.private_list));
514  BUG_ON(!(inode->i_state & I_FREEING));
515  BUG_ON(inode->i_state & I_CLEAR);
516  /* don't need i_lock here, no concurrent mods to i_state */
517  inode->i_state = I_FREEING | I_CLEAR;
518 }
520 
521 /*
522  * Free the inode passed in, removing it from the lists it is still connected
523  * to. We remove any pages still attached to the inode and wait for any IO that
524  * is still in progress before finally destroying the inode.
525  *
526  * An inode must already be marked I_FREEING so that we avoid the inode being
527  * moved back onto lists if we race with other code that manipulates the lists
528  * (e.g. writeback_single_inode). The caller is responsible for setting this.
529  *
530  * An inode must already be removed from the LRU list before being evicted from
531  * the cache. This should occur atomically with setting the I_FREEING state
532  * flag, so no inodes here should ever be on the LRU when being evicted.
533  */
534 static void evict(struct inode *inode)
535 {
536  const struct super_operations *op = inode->i_sb->s_op;
537 
538  BUG_ON(!(inode->i_state & I_FREEING));
539  BUG_ON(!list_empty(&inode->i_lru));
540 
541  if (!list_empty(&inode->i_wb_list))
542  inode_wb_list_del(inode);
543 
544  inode_sb_list_del(inode);
545 
546  /*
547  * Wait for flusher thread to be done with the inode so that filesystem
548  * does not start destroying it while writeback is still running. Since
549  * the inode has I_FREEING set, flusher thread won't start new work on
550  * the inode. We just have to wait for running writeback to finish.
551  */
553 
554  if (op->evict_inode) {
555  op->evict_inode(inode);
556  } else {
557  if (inode->i_data.nrpages)
558  truncate_inode_pages(&inode->i_data, 0);
559  clear_inode(inode);
560  }
561  if (S_ISBLK(inode->i_mode) && inode->i_bdev)
562  bd_forget(inode);
563  if (S_ISCHR(inode->i_mode) && inode->i_cdev)
564  cd_forget(inode);
565 
566  remove_inode_hash(inode);
567 
568  spin_lock(&inode->i_lock);
569  wake_up_bit(&inode->i_state, __I_NEW);
570  BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
571  spin_unlock(&inode->i_lock);
572 
573  destroy_inode(inode);
574 }
575 
576 /*
577  * dispose_list - dispose of the contents of a local list
578  * @head: the head of the list to free
579  *
580  * Dispose-list gets a local list with local inodes in it, so it doesn't
581  * need to worry about list corruption and SMP locks.
582  */
583 static void dispose_list(struct list_head *head)
584 {
585  while (!list_empty(head)) {
586  struct inode *inode;
587 
588  inode = list_first_entry(head, struct inode, i_lru);
589  list_del_init(&inode->i_lru);
590 
591  evict(inode);
592  }
593 }
594 
604 void evict_inodes(struct super_block *sb)
605 {
606  struct inode *inode, *next;
607  LIST_HEAD(dispose);
608 
609  spin_lock(&inode_sb_list_lock);
610  list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
611  if (atomic_read(&inode->i_count))
612  continue;
613 
614  spin_lock(&inode->i_lock);
615  if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
616  spin_unlock(&inode->i_lock);
617  continue;
618  }
619 
620  inode->i_state |= I_FREEING;
621  inode_lru_list_del(inode);
622  spin_unlock(&inode->i_lock);
623  list_add(&inode->i_lru, &dispose);
624  }
625  spin_unlock(&inode_sb_list_lock);
626 
627  dispose_list(&dispose);
628 }
629 
640 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
641 {
642  int busy = 0;
643  struct inode *inode, *next;
644  LIST_HEAD(dispose);
645 
646  spin_lock(&inode_sb_list_lock);
647  list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
648  spin_lock(&inode->i_lock);
649  if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
650  spin_unlock(&inode->i_lock);
651  continue;
652  }
653  if (inode->i_state & I_DIRTY && !kill_dirty) {
654  spin_unlock(&inode->i_lock);
655  busy = 1;
656  continue;
657  }
658  if (atomic_read(&inode->i_count)) {
659  spin_unlock(&inode->i_lock);
660  busy = 1;
661  continue;
662  }
663 
664  inode->i_state |= I_FREEING;
665  inode_lru_list_del(inode);
666  spin_unlock(&inode->i_lock);
667  list_add(&inode->i_lru, &dispose);
668  }
669  spin_unlock(&inode_sb_list_lock);
670 
671  dispose_list(&dispose);
672 
673  return busy;
674 }
675 
676 static int can_unuse(struct inode *inode)
677 {
678  if (inode->i_state & ~I_REFERENCED)
679  return 0;
680  if (inode_has_buffers(inode))
681  return 0;
682  if (atomic_read(&inode->i_count))
683  return 0;
684  if (inode->i_data.nrpages)
685  return 0;
686  return 1;
687 }
688 
689 /*
690  * Walk the superblock inode LRU for freeable inodes and attempt to free them.
691  * This is called from the superblock shrinker function with a number of inodes
692  * to trim from the LRU. Inodes to be freed are moved to a temporary list and
693  * then are freed outside inode_lock by dispose_list().
694  *
695  * Any inodes which are pinned purely because of attached pagecache have their
696  * pagecache removed. If the inode has metadata buffers attached to
697  * mapping->private_list then try to remove them.
698  *
699  * If the inode has the I_REFERENCED flag set, then it means that it has been
700  * used recently - the flag is set in iput_final(). When we encounter such an
701  * inode, clear the flag and move it to the back of the LRU so it gets another
702  * pass through the LRU before it gets reclaimed. This is necessary because of
703  * the fact we are doing lazy LRU updates to minimise lock contention so the
704  * LRU does not have strict ordering. Hence we don't want to reclaim inodes
705  * with this flag set because they are the inodes that are out of order.
706  */
707 void prune_icache_sb(struct super_block *sb, int nr_to_scan)
708 {
709  LIST_HEAD(freeable);
710  int nr_scanned;
711  unsigned long reap = 0;
712 
713  spin_lock(&sb->s_inode_lru_lock);
714  for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
715  struct inode *inode;
716 
717  if (list_empty(&sb->s_inode_lru))
718  break;
719 
720  inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
721 
722  /*
723  * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
724  * so use a trylock. If we fail to get the lock, just move the
725  * inode to the back of the list so we don't spin on it.
726  */
727  if (!spin_trylock(&inode->i_lock)) {
728  list_move_tail(&inode->i_lru, &sb->s_inode_lru);
729  continue;
730  }
731 
732  /*
733  * Referenced or dirty inodes are still in use. Give them
734  * another pass through the LRU as we canot reclaim them now.
735  */
736  if (atomic_read(&inode->i_count) ||
737  (inode->i_state & ~I_REFERENCED)) {
738  list_del_init(&inode->i_lru);
739  spin_unlock(&inode->i_lock);
740  sb->s_nr_inodes_unused--;
741  this_cpu_dec(nr_unused);
742  continue;
743  }
744 
745  /* recently referenced inodes get one more pass */
746  if (inode->i_state & I_REFERENCED) {
747  inode->i_state &= ~I_REFERENCED;
748  list_move(&inode->i_lru, &sb->s_inode_lru);
749  spin_unlock(&inode->i_lock);
750  continue;
751  }
752  if (inode_has_buffers(inode) || inode->i_data.nrpages) {
753  __iget(inode);
754  spin_unlock(&inode->i_lock);
755  spin_unlock(&sb->s_inode_lru_lock);
756  if (remove_inode_buffers(inode))
757  reap += invalidate_mapping_pages(&inode->i_data,
758  0, -1);
759  iput(inode);
760  spin_lock(&sb->s_inode_lru_lock);
761 
762  if (inode != list_entry(sb->s_inode_lru.next,
763  struct inode, i_lru))
764  continue; /* wrong inode or list_empty */
765  /* avoid lock inversions with trylock */
766  if (!spin_trylock(&inode->i_lock))
767  continue;
768  if (!can_unuse(inode)) {
769  spin_unlock(&inode->i_lock);
770  continue;
771  }
772  }
773  WARN_ON(inode->i_state & I_NEW);
774  inode->i_state |= I_FREEING;
775  spin_unlock(&inode->i_lock);
776 
777  list_move(&inode->i_lru, &freeable);
778  sb->s_nr_inodes_unused--;
779  this_cpu_dec(nr_unused);
780  }
781  if (current_is_kswapd())
782  __count_vm_events(KSWAPD_INODESTEAL, reap);
783  else
784  __count_vm_events(PGINODESTEAL, reap);
785  spin_unlock(&sb->s_inode_lru_lock);
786  if (current->reclaim_state)
787  current->reclaim_state->reclaimed_slab += reap;
788 
789  dispose_list(&freeable);
790 }
791 
792 static void __wait_on_freeing_inode(struct inode *inode);
793 /*
794  * Called with the inode lock held.
795  */
796 static struct inode *find_inode(struct super_block *sb,
797  struct hlist_head *head,
798  int (*test)(struct inode *, void *),
799  void *data)
800 {
801  struct hlist_node *node;
802  struct inode *inode = NULL;
803 
804 repeat:
805  hlist_for_each_entry(inode, node, head, i_hash) {
806  spin_lock(&inode->i_lock);
807  if (inode->i_sb != sb) {
808  spin_unlock(&inode->i_lock);
809  continue;
810  }
811  if (!test(inode, data)) {
812  spin_unlock(&inode->i_lock);
813  continue;
814  }
815  if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
816  __wait_on_freeing_inode(inode);
817  goto repeat;
818  }
819  __iget(inode);
820  spin_unlock(&inode->i_lock);
821  return inode;
822  }
823  return NULL;
824 }
825 
826 /*
827  * find_inode_fast is the fast path version of find_inode, see the comment at
828  * iget_locked for details.
829  */
830 static struct inode *find_inode_fast(struct super_block *sb,
831  struct hlist_head *head, unsigned long ino)
832 {
833  struct hlist_node *node;
834  struct inode *inode = NULL;
835 
836 repeat:
837  hlist_for_each_entry(inode, node, head, i_hash) {
838  spin_lock(&inode->i_lock);
839  if (inode->i_ino != ino) {
840  spin_unlock(&inode->i_lock);
841  continue;
842  }
843  if (inode->i_sb != sb) {
844  spin_unlock(&inode->i_lock);
845  continue;
846  }
847  if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
848  __wait_on_freeing_inode(inode);
849  goto repeat;
850  }
851  __iget(inode);
852  spin_unlock(&inode->i_lock);
853  return inode;
854  }
855  return NULL;
856 }
857 
858 /*
859  * Each cpu owns a range of LAST_INO_BATCH numbers.
860  * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
861  * to renew the exhausted range.
862  *
863  * This does not significantly increase overflow rate because every CPU can
864  * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
865  * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
866  * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
867  * overflow rate by 2x, which does not seem too significant.
868  *
869  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
870  * error if st_ino won't fit in target struct field. Use 32bit counter
871  * here to attempt to avoid that.
872  */
873 #define LAST_INO_BATCH 1024
874 static DEFINE_PER_CPU(unsigned int, last_ino);
875 
876 unsigned int get_next_ino(void)
877 {
878  unsigned int *p = &get_cpu_var(last_ino);
879  unsigned int res = *p;
880 
881 #ifdef CONFIG_SMP
882  if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
883  static atomic_t shared_last_ino;
884  int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
885 
886  res = next - LAST_INO_BATCH;
887  }
888 #endif
889 
890  *p = ++res;
891  put_cpu_var(last_ino);
892  return res;
893 }
895 
906 struct inode *new_inode_pseudo(struct super_block *sb)
907 {
908  struct inode *inode = alloc_inode(sb);
909 
910  if (inode) {
911  spin_lock(&inode->i_lock);
912  inode->i_state = 0;
913  spin_unlock(&inode->i_lock);
914  INIT_LIST_HEAD(&inode->i_sb_list);
915  }
916  return inode;
917 }
918 
931 struct inode *new_inode(struct super_block *sb)
932 {
933  struct inode *inode;
934 
936 
937  inode = new_inode_pseudo(sb);
938  if (inode)
939  inode_sb_list_add(inode);
940  return inode;
941 }
943 
944 #ifdef CONFIG_DEBUG_LOCK_ALLOC
945 void lockdep_annotate_inode_mutex_key(struct inode *inode)
946 {
947  if (S_ISDIR(inode->i_mode)) {
948  struct file_system_type *type = inode->i_sb->s_type;
949 
950  /* Set new key only if filesystem hasn't already changed it */
951  if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
952  /*
953  * ensure nobody is actually holding i_mutex
954  */
955  mutex_destroy(&inode->i_mutex);
956  mutex_init(&inode->i_mutex);
957  lockdep_set_class(&inode->i_mutex,
958  &type->i_mutex_dir_key);
959  }
960  }
961 }
962 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
963 #endif
964 
972 void unlock_new_inode(struct inode *inode)
973 {
974  lockdep_annotate_inode_mutex_key(inode);
975  spin_lock(&inode->i_lock);
976  WARN_ON(!(inode->i_state & I_NEW));
977  inode->i_state &= ~I_NEW;
978  smp_mb();
979  wake_up_bit(&inode->i_state, __I_NEW);
980  spin_unlock(&inode->i_lock);
981 }
983 
1004 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1005  int (*test)(struct inode *, void *),
1006  int (*set)(struct inode *, void *), void *data)
1007 {
1008  struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1009  struct inode *inode;
1010 
1011  spin_lock(&inode_hash_lock);
1012  inode = find_inode(sb, head, test, data);
1013  spin_unlock(&inode_hash_lock);
1014 
1015  if (inode) {
1016  wait_on_inode(inode);
1017  return inode;
1018  }
1019 
1020  inode = alloc_inode(sb);
1021  if (inode) {
1022  struct inode *old;
1023 
1024  spin_lock(&inode_hash_lock);
1025  /* We released the lock, so.. */
1026  old = find_inode(sb, head, test, data);
1027  if (!old) {
1028  if (set(inode, data))
1029  goto set_failed;
1030 
1031  spin_lock(&inode->i_lock);
1032  inode->i_state = I_NEW;
1033  hlist_add_head(&inode->i_hash, head);
1034  spin_unlock(&inode->i_lock);
1035  inode_sb_list_add(inode);
1036  spin_unlock(&inode_hash_lock);
1037 
1038  /* Return the locked inode with I_NEW set, the
1039  * caller is responsible for filling in the contents
1040  */
1041  return inode;
1042  }
1043 
1044  /*
1045  * Uhhuh, somebody else created the same inode under
1046  * us. Use the old inode instead of the one we just
1047  * allocated.
1048  */
1049  spin_unlock(&inode_hash_lock);
1050  destroy_inode(inode);
1051  inode = old;
1052  wait_on_inode(inode);
1053  }
1054  return inode;
1055 
1056 set_failed:
1057  spin_unlock(&inode_hash_lock);
1058  destroy_inode(inode);
1059  return NULL;
1060 }
1062 
1076 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1077 {
1078  struct hlist_head *head = inode_hashtable + hash(sb, ino);
1079  struct inode *inode;
1080 
1081  spin_lock(&inode_hash_lock);
1082  inode = find_inode_fast(sb, head, ino);
1083  spin_unlock(&inode_hash_lock);
1084  if (inode) {
1085  wait_on_inode(inode);
1086  return inode;
1087  }
1088 
1089  inode = alloc_inode(sb);
1090  if (inode) {
1091  struct inode *old;
1092 
1093  spin_lock(&inode_hash_lock);
1094  /* We released the lock, so.. */
1095  old = find_inode_fast(sb, head, ino);
1096  if (!old) {
1097  inode->i_ino = ino;
1098  spin_lock(&inode->i_lock);
1099  inode->i_state = I_NEW;
1100  hlist_add_head(&inode->i_hash, head);
1101  spin_unlock(&inode->i_lock);
1102  inode_sb_list_add(inode);
1103  spin_unlock(&inode_hash_lock);
1104 
1105  /* Return the locked inode with I_NEW set, the
1106  * caller is responsible for filling in the contents
1107  */
1108  return inode;
1109  }
1110 
1111  /*
1112  * Uhhuh, somebody else created the same inode under
1113  * us. Use the old inode instead of the one we just
1114  * allocated.
1115  */
1116  spin_unlock(&inode_hash_lock);
1117  destroy_inode(inode);
1118  inode = old;
1119  wait_on_inode(inode);
1120  }
1121  return inode;
1122 }
1124 
1125 /*
1126  * search the inode cache for a matching inode number.
1127  * If we find one, then the inode number we are trying to
1128  * allocate is not unique and so we should not use it.
1129  *
1130  * Returns 1 if the inode number is unique, 0 if it is not.
1131  */
1132 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1133 {
1134  struct hlist_head *b = inode_hashtable + hash(sb, ino);
1135  struct hlist_node *node;
1136  struct inode *inode;
1137 
1138  spin_lock(&inode_hash_lock);
1139  hlist_for_each_entry(inode, node, b, i_hash) {
1140  if (inode->i_ino == ino && inode->i_sb == sb) {
1141  spin_unlock(&inode_hash_lock);
1142  return 0;
1143  }
1144  }
1145  spin_unlock(&inode_hash_lock);
1146 
1147  return 1;
1148 }
1149 
1164 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1165 {
1166  /*
1167  * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1168  * error if st_ino won't fit in target struct field. Use 32bit counter
1169  * here to attempt to avoid that.
1170  */
1171  static DEFINE_SPINLOCK(iunique_lock);
1172  static unsigned int counter;
1173  ino_t res;
1174 
1175  spin_lock(&iunique_lock);
1176  do {
1177  if (counter <= max_reserved)
1178  counter = max_reserved + 1;
1179  res = counter++;
1180  } while (!test_inode_iunique(sb, res));
1181  spin_unlock(&iunique_lock);
1182 
1183  return res;
1184 }
1186 
1187 struct inode *igrab(struct inode *inode)
1188 {
1189  spin_lock(&inode->i_lock);
1190  if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1191  __iget(inode);
1192  spin_unlock(&inode->i_lock);
1193  } else {
1194  spin_unlock(&inode->i_lock);
1195  /*
1196  * Handle the case where s_op->clear_inode is not been
1197  * called yet, and somebody is calling igrab
1198  * while the inode is getting freed.
1199  */
1200  inode = NULL;
1201  }
1202  return inode;
1203 }
1205 
1222 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1223  int (*test)(struct inode *, void *), void *data)
1224 {
1225  struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1226  struct inode *inode;
1227 
1228  spin_lock(&inode_hash_lock);
1229  inode = find_inode(sb, head, test, data);
1230  spin_unlock(&inode_hash_lock);
1231 
1232  return inode;
1233 }
1235 
1253 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1254  int (*test)(struct inode *, void *), void *data)
1255 {
1256  struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1257 
1258  if (inode)
1259  wait_on_inode(inode);
1260  return inode;
1261 }
1263 
1272 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1273 {
1274  struct hlist_head *head = inode_hashtable + hash(sb, ino);
1275  struct inode *inode;
1276 
1277  spin_lock(&inode_hash_lock);
1278  inode = find_inode_fast(sb, head, ino);
1279  spin_unlock(&inode_hash_lock);
1280 
1281  if (inode)
1282  wait_on_inode(inode);
1283  return inode;
1284 }
1286 
1287 int insert_inode_locked(struct inode *inode)
1288 {
1289  struct super_block *sb = inode->i_sb;
1290  ino_t ino = inode->i_ino;
1291  struct hlist_head *head = inode_hashtable + hash(sb, ino);
1292 
1293  while (1) {
1294  struct hlist_node *node;
1295  struct inode *old = NULL;
1296  spin_lock(&inode_hash_lock);
1297  hlist_for_each_entry(old, node, head, i_hash) {
1298  if (old->i_ino != ino)
1299  continue;
1300  if (old->i_sb != sb)
1301  continue;
1302  spin_lock(&old->i_lock);
1303  if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1304  spin_unlock(&old->i_lock);
1305  continue;
1306  }
1307  break;
1308  }
1309  if (likely(!node)) {
1310  spin_lock(&inode->i_lock);
1311  inode->i_state |= I_NEW;
1312  hlist_add_head(&inode->i_hash, head);
1313  spin_unlock(&inode->i_lock);
1314  spin_unlock(&inode_hash_lock);
1315  return 0;
1316  }
1317  __iget(old);
1318  spin_unlock(&old->i_lock);
1319  spin_unlock(&inode_hash_lock);
1320  wait_on_inode(old);
1321  if (unlikely(!inode_unhashed(old))) {
1322  iput(old);
1323  return -EBUSY;
1324  }
1325  iput(old);
1326  }
1327 }
1329 
1330 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1331  int (*test)(struct inode *, void *), void *data)
1332 {
1333  struct super_block *sb = inode->i_sb;
1334  struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1335 
1336  while (1) {
1337  struct hlist_node *node;
1338  struct inode *old = NULL;
1339 
1340  spin_lock(&inode_hash_lock);
1341  hlist_for_each_entry(old, node, head, i_hash) {
1342  if (old->i_sb != sb)
1343  continue;
1344  if (!test(old, data))
1345  continue;
1346  spin_lock(&old->i_lock);
1347  if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1348  spin_unlock(&old->i_lock);
1349  continue;
1350  }
1351  break;
1352  }
1353  if (likely(!node)) {
1354  spin_lock(&inode->i_lock);
1355  inode->i_state |= I_NEW;
1356  hlist_add_head(&inode->i_hash, head);
1357  spin_unlock(&inode->i_lock);
1358  spin_unlock(&inode_hash_lock);
1359  return 0;
1360  }
1361  __iget(old);
1362  spin_unlock(&old->i_lock);
1363  spin_unlock(&inode_hash_lock);
1364  wait_on_inode(old);
1365  if (unlikely(!inode_unhashed(old))) {
1366  iput(old);
1367  return -EBUSY;
1368  }
1369  iput(old);
1370  }
1371 }
1373 
1374 
1375 int generic_delete_inode(struct inode *inode)
1376 {
1377  return 1;
1378 }
1380 
1381 /*
1382  * Called when we're dropping the last reference
1383  * to an inode.
1384  *
1385  * Call the FS "drop_inode()" function, defaulting to
1386  * the legacy UNIX filesystem behaviour. If it tells
1387  * us to evict inode, do so. Otherwise, retain inode
1388  * in cache if fs is alive, sync and evict if fs is
1389  * shutting down.
1390  */
1391 static void iput_final(struct inode *inode)
1392 {
1393  struct super_block *sb = inode->i_sb;
1394  const struct super_operations *op = inode->i_sb->s_op;
1395  int drop;
1396 
1397  WARN_ON(inode->i_state & I_NEW);
1398 
1399  if (op->drop_inode)
1400  drop = op->drop_inode(inode);
1401  else
1402  drop = generic_drop_inode(inode);
1403 
1404  if (!drop && (sb->s_flags & MS_ACTIVE)) {
1405  inode->i_state |= I_REFERENCED;
1406  inode_add_lru(inode);
1407  spin_unlock(&inode->i_lock);
1408  return;
1409  }
1410 
1411  if (!drop) {
1412  inode->i_state |= I_WILL_FREE;
1413  spin_unlock(&inode->i_lock);
1414  write_inode_now(inode, 1);
1415  spin_lock(&inode->i_lock);
1416  WARN_ON(inode->i_state & I_NEW);
1417  inode->i_state &= ~I_WILL_FREE;
1418  }
1419 
1420  inode->i_state |= I_FREEING;
1421  if (!list_empty(&inode->i_lru))
1422  inode_lru_list_del(inode);
1423  spin_unlock(&inode->i_lock);
1424 
1425  evict(inode);
1426 }
1427 
1437 void iput(struct inode *inode)
1438 {
1439  if (inode) {
1440  BUG_ON(inode->i_state & I_CLEAR);
1441 
1442  if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1443  iput_final(inode);
1444  }
1445 }
1447 
1459 sector_t bmap(struct inode *inode, sector_t block)
1460 {
1461  sector_t res = 0;
1462  if (inode->i_mapping->a_ops->bmap)
1463  res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1464  return res;
1465 }
1467 
1468 /*
1469  * With relative atime, only update atime if the previous atime is
1470  * earlier than either the ctime or mtime or if at least a day has
1471  * passed since the last atime update.
1472  */
1473 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1474  struct timespec now)
1475 {
1476 
1477  if (!(mnt->mnt_flags & MNT_RELATIME))
1478  return 1;
1479  /*
1480  * Is mtime younger than atime? If yes, update atime:
1481  */
1482  if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1483  return 1;
1484  /*
1485  * Is ctime younger than atime? If yes, update atime:
1486  */
1487  if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1488  return 1;
1489 
1490  /*
1491  * Is the previous atime value older than a day? If yes,
1492  * update atime:
1493  */
1494  if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1495  return 1;
1496  /*
1497  * Good, we can skip the atime update:
1498  */
1499  return 0;
1500 }
1501 
1502 /*
1503  * This does the actual work of updating an inodes time or version. Must have
1504  * had called mnt_want_write() before calling this.
1505  */
1506 static int update_time(struct inode *inode, struct timespec *time, int flags)
1507 {
1508  if (inode->i_op->update_time)
1509  return inode->i_op->update_time(inode, time, flags);
1510 
1511  if (flags & S_ATIME)
1512  inode->i_atime = *time;
1513  if (flags & S_VERSION)
1514  inode_inc_iversion(inode);
1515  if (flags & S_CTIME)
1516  inode->i_ctime = *time;
1517  if (flags & S_MTIME)
1518  inode->i_mtime = *time;
1519  mark_inode_dirty_sync(inode);
1520  return 0;
1521 }
1522 
1531 void touch_atime(struct path *path)
1532 {
1533  struct vfsmount *mnt = path->mnt;
1534  struct inode *inode = path->dentry->d_inode;
1535  struct timespec now;
1536 
1537  if (inode->i_flags & S_NOATIME)
1538  return;
1539  if (IS_NOATIME(inode))
1540  return;
1541  if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1542  return;
1543 
1544  if (mnt->mnt_flags & MNT_NOATIME)
1545  return;
1546  if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1547  return;
1548 
1549  now = current_fs_time(inode->i_sb);
1550 
1551  if (!relatime_need_update(mnt, inode, now))
1552  return;
1553 
1554  if (timespec_equal(&inode->i_atime, &now))
1555  return;
1556 
1557  if (!sb_start_write_trylock(inode->i_sb))
1558  return;
1559 
1560  if (__mnt_want_write(mnt))
1561  goto skip_update;
1562  /*
1563  * File systems can error out when updating inodes if they need to
1564  * allocate new space to modify an inode (such is the case for
1565  * Btrfs), but since we touch atime while walking down the path we
1566  * really don't care if we failed to update the atime of the file,
1567  * so just ignore the return value.
1568  * We may also fail on filesystems that have the ability to make parts
1569  * of the fs read only, e.g. subvolumes in Btrfs.
1570  */
1571  update_time(inode, &now, S_ATIME);
1572  __mnt_drop_write(mnt);
1573 skip_update:
1574  sb_end_write(inode->i_sb);
1575 }
1577 
1578 /*
1579  * The logic we want is
1580  *
1581  * if suid or (sgid and xgrp)
1582  * remove privs
1583  */
1585 {
1586  umode_t mode = dentry->d_inode->i_mode;
1587  int kill = 0;
1588 
1589  /* suid always must be killed */
1590  if (unlikely(mode & S_ISUID))
1591  kill = ATTR_KILL_SUID;
1592 
1593  /*
1594  * sgid without any exec bits is just a mandatory locking mark; leave
1595  * it alone. If some exec bits are set, it's a real sgid; kill it.
1596  */
1597  if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1598  kill |= ATTR_KILL_SGID;
1599 
1600  if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1601  return kill;
1602 
1603  return 0;
1604 }
1606 
1607 static int __remove_suid(struct dentry *dentry, int kill)
1608 {
1609  struct iattr newattrs;
1610 
1611  newattrs.ia_valid = ATTR_FORCE | kill;
1612  return notify_change(dentry, &newattrs);
1613 }
1614 
1616 {
1617  struct dentry *dentry = file->f_path.dentry;
1618  struct inode *inode = dentry->d_inode;
1619  int killsuid;
1620  int killpriv;
1621  int error = 0;
1622 
1623  /* Fast path for nothing security related */
1624  if (IS_NOSEC(inode))
1625  return 0;
1626 
1627  killsuid = should_remove_suid(dentry);
1628  killpriv = security_inode_need_killpriv(dentry);
1629 
1630  if (killpriv < 0)
1631  return killpriv;
1632  if (killpriv)
1633  error = security_inode_killpriv(dentry);
1634  if (!error && killsuid)
1635  error = __remove_suid(dentry, killsuid);
1636  if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1637  inode->i_flags |= S_NOSEC;
1638 
1639  return error;
1640 }
1642 
1657 {
1658  struct inode *inode = file->f_path.dentry->d_inode;
1659  struct timespec now;
1660  int sync_it = 0;
1661  int ret;
1662 
1663  /* First try to exhaust all avenues to not sync */
1664  if (IS_NOCMTIME(inode))
1665  return 0;
1666 
1667  now = current_fs_time(inode->i_sb);
1668  if (!timespec_equal(&inode->i_mtime, &now))
1669  sync_it = S_MTIME;
1670 
1671  if (!timespec_equal(&inode->i_ctime, &now))
1672  sync_it |= S_CTIME;
1673 
1674  if (IS_I_VERSION(inode))
1675  sync_it |= S_VERSION;
1676 
1677  if (!sync_it)
1678  return 0;
1679 
1680  /* Finally allowed to write? Takes lock. */
1681  if (__mnt_want_write_file(file))
1682  return 0;
1683 
1684  ret = update_time(inode, &now, sync_it);
1685  __mnt_drop_write_file(file);
1686 
1687  return ret;
1688 }
1690 
1691 int inode_needs_sync(struct inode *inode)
1692 {
1693  if (IS_SYNC(inode))
1694  return 1;
1695  if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1696  return 1;
1697  return 0;
1698 }
1700 
1701 int inode_wait(void *word)
1702 {
1703  schedule();
1704  return 0;
1705 }
1707 
1708 /*
1709  * If we try to find an inode in the inode hash while it is being
1710  * deleted, we have to wait until the filesystem completes its
1711  * deletion before reporting that it isn't found. This function waits
1712  * until the deletion _might_ have completed. Callers are responsible
1713  * to recheck inode state.
1714  *
1715  * It doesn't matter if I_NEW is not set initially, a call to
1716  * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1717  * will DTRT.
1718  */
1719 static void __wait_on_freeing_inode(struct inode *inode)
1720 {
1721  wait_queue_head_t *wq;
1722  DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1723  wq = bit_waitqueue(&inode->i_state, __I_NEW);
1725  spin_unlock(&inode->i_lock);
1726  spin_unlock(&inode_hash_lock);
1727  schedule();
1728  finish_wait(wq, &wait.wait);
1729  spin_lock(&inode_hash_lock);
1730 }
1731 
1732 static __initdata unsigned long ihash_entries;
1733 static int __init set_ihash_entries(char *str)
1734 {
1735  if (!str)
1736  return 0;
1737  ihash_entries = simple_strtoul(str, &str, 0);
1738  return 1;
1739 }
1740 __setup("ihash_entries=", set_ihash_entries);
1741 
1742 /*
1743  * Initialize the waitqueues and inode hash table.
1744  */
1746 {
1747  unsigned int loop;
1748 
1749  /* If hashes are distributed across NUMA nodes, defer
1750  * hash allocation until vmalloc space is available.
1751  */
1752  if (hashdist)
1753  return;
1754 
1755  inode_hashtable =
1756  alloc_large_system_hash("Inode-cache",
1757  sizeof(struct hlist_head),
1758  ihash_entries,
1759  14,
1760  HASH_EARLY,
1761  &i_hash_shift,
1762  &i_hash_mask,
1763  0,
1764  0);
1765 
1766  for (loop = 0; loop < (1U << i_hash_shift); loop++)
1767  INIT_HLIST_HEAD(&inode_hashtable[loop]);
1768 }
1769 
1770 void __init inode_init(void)
1771 {
1772  unsigned int loop;
1773 
1774  /* inode slab cache */
1775  inode_cachep = kmem_cache_create("inode_cache",
1776  sizeof(struct inode),
1777  0,
1779  SLAB_MEM_SPREAD),
1780  init_once);
1781 
1782  /* Hash may have been set up in inode_init_early */
1783  if (!hashdist)
1784  return;
1785 
1786  inode_hashtable =
1787  alloc_large_system_hash("Inode-cache",
1788  sizeof(struct hlist_head),
1789  ihash_entries,
1790  14,
1791  0,
1792  &i_hash_shift,
1793  &i_hash_mask,
1794  0,
1795  0);
1796 
1797  for (loop = 0; loop < (1U << i_hash_shift); loop++)
1798  INIT_HLIST_HEAD(&inode_hashtable[loop]);
1799 }
1800 
1801 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1802 {
1803  inode->i_mode = mode;
1804  if (S_ISCHR(mode)) {
1805  inode->i_fop = &def_chr_fops;
1806  inode->i_rdev = rdev;
1807  } else if (S_ISBLK(mode)) {
1808  inode->i_fop = &def_blk_fops;
1809  inode->i_rdev = rdev;
1810  } else if (S_ISFIFO(mode))
1811  inode->i_fop = &def_fifo_fops;
1812  else if (S_ISSOCK(mode))
1813  inode->i_fop = &bad_sock_fops;
1814  else
1815  printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1816  " inode %s:%lu\n", mode, inode->i_sb->s_id,
1817  inode->i_ino);
1818 }
1820 
1827 void inode_init_owner(struct inode *inode, const struct inode *dir,
1828  umode_t mode)
1829 {
1830  inode->i_uid = current_fsuid();
1831  if (dir && dir->i_mode & S_ISGID) {
1832  inode->i_gid = dir->i_gid;
1833  if (S_ISDIR(mode))
1834  mode |= S_ISGID;
1835  } else
1836  inode->i_gid = current_fsgid();
1837  inode->i_mode = mode;
1838 }
1840 
1848 bool inode_owner_or_capable(const struct inode *inode)
1849 {
1850  if (uid_eq(current_fsuid(), inode->i_uid))
1851  return true;
1852  if (inode_capable(inode, CAP_FOWNER))
1853  return true;
1854  return false;
1855 }
1857 
1858 /*
1859  * Direct i/o helper functions
1860  */
1861 static void __inode_dio_wait(struct inode *inode)
1862 {
1865 
1866  do {
1868  if (atomic_read(&inode->i_dio_count))
1869  schedule();
1870  } while (atomic_read(&inode->i_dio_count));
1871  finish_wait(wq, &q.wait);
1872 }
1873 
1884 void inode_dio_wait(struct inode *inode)
1885 {
1886  if (atomic_read(&inode->i_dio_count))
1887  __inode_dio_wait(inode);
1888 }
1890 
1891 /*
1892  * inode_dio_done - signal finish of a direct I/O requests
1893  * @inode: inode the direct I/O happens on
1894  *
1895  * This is called once we've finished processing a direct I/O request,
1896  * and is used to wake up callers waiting for direct I/O to be quiesced.
1897  */
1898 void inode_dio_done(struct inode *inode)
1899 {
1900  if (atomic_dec_and_test(&inode->i_dio_count))
1902 }