Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xfs_iget.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_quota.h"
36 #include "xfs_utils.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_bmap.h"
40 #include "xfs_trace.h"
41 
42 
43 /*
44  * Allocate and initialise an xfs_inode.
45  */
46 STATIC struct xfs_inode *
48  struct xfs_mount *mp,
49  xfs_ino_t ino)
50 {
51  struct xfs_inode *ip;
52 
53  /*
54  * if this didn't occur in transactions, we could use
55  * KM_MAYFAIL and return NULL here on ENOMEM. Set the
56  * code up to do this anyway.
57  */
59  if (!ip)
60  return NULL;
61  if (inode_init_always(mp->m_super, VFS_I(ip))) {
62  kmem_zone_free(xfs_inode_zone, ip);
63  return NULL;
64  }
65 
66  ASSERT(atomic_read(&ip->i_pincount) == 0);
67  ASSERT(!spin_is_locked(&ip->i_flags_lock));
68  ASSERT(!xfs_isiflocked(ip));
69  ASSERT(ip->i_ino == 0);
70 
71  mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
72 
73  /* initialise the xfs inode */
74  ip->i_ino = ino;
75  ip->i_mount = mp;
76  memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
77  ip->i_afp = NULL;
78  memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
79  ip->i_flags = 0;
80  ip->i_delayed_blks = 0;
81  memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
82 
83  return ip;
84 }
85 
86 STATIC void
88  struct rcu_head *head)
89 {
90  struct inode *inode = container_of(head, struct inode, i_rcu);
91  struct xfs_inode *ip = XFS_I(inode);
92 
93  kmem_zone_free(xfs_inode_zone, ip);
94 }
95 
96 void
98  struct xfs_inode *ip)
99 {
100  switch (ip->i_d.di_mode & S_IFMT) {
101  case S_IFREG:
102  case S_IFDIR:
103  case S_IFLNK:
105  break;
106  }
107 
108  if (ip->i_afp)
110 
111  if (ip->i_itemp) {
112  ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
114  ip->i_itemp = NULL;
115  }
116 
117  /* asserts to verify all state is correct here */
118  ASSERT(atomic_read(&ip->i_pincount) == 0);
119  ASSERT(!spin_is_locked(&ip->i_flags_lock));
120  ASSERT(!xfs_isiflocked(ip));
121 
122  /*
123  * Because we use RCU freeing we need to ensure the inode always
124  * appears to be reclaimed with an invalid inode number when in the
125  * free state. The ip->i_flags_lock provides the barrier against lookup
126  * races.
127  */
128  spin_lock(&ip->i_flags_lock);
129  ip->i_flags = XFS_IRECLAIM;
130  ip->i_ino = 0;
131  spin_unlock(&ip->i_flags_lock);
132 
133  call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
134 }
135 
136 /*
137  * Check the validity of the inode we just found it the cache
138  */
139 static int
140 xfs_iget_cache_hit(
141  struct xfs_perag *pag,
142  struct xfs_inode *ip,
143  xfs_ino_t ino,
144  int flags,
145  int lock_flags) __releases(RCU)
146 {
147  struct inode *inode = VFS_I(ip);
148  struct xfs_mount *mp = ip->i_mount;
149  int error;
150 
151  /*
152  * check for re-use of an inode within an RCU grace period due to the
153  * radix tree nodes not being updated yet. We monitor for this by
154  * setting the inode number to zero before freeing the inode structure.
155  * If the inode has been reallocated and set up, then the inode number
156  * will not match, so check for that, too.
157  */
158  spin_lock(&ip->i_flags_lock);
159  if (ip->i_ino != ino) {
160  trace_xfs_iget_skip(ip);
161  XFS_STATS_INC(xs_ig_frecycle);
162  error = EAGAIN;
163  goto out_error;
164  }
165 
166 
167  /*
168  * If we are racing with another cache hit that is currently
169  * instantiating this inode or currently recycling it out of
170  * reclaimabe state, wait for the initialisation to complete
171  * before continuing.
172  *
173  * XXX(hch): eventually we should do something equivalent to
174  * wait_on_inode to wait for these flags to be cleared
175  * instead of polling for it.
176  */
177  if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
178  trace_xfs_iget_skip(ip);
179  XFS_STATS_INC(xs_ig_frecycle);
180  error = EAGAIN;
181  goto out_error;
182  }
183 
184  /*
185  * If lookup is racing with unlink return an error immediately.
186  */
187  if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
188  error = ENOENT;
189  goto out_error;
190  }
191 
192  /*
193  * If IRECLAIMABLE is set, we've torn down the VFS inode already.
194  * Need to carefully get it back into useable state.
195  */
196  if (ip->i_flags & XFS_IRECLAIMABLE) {
197  trace_xfs_iget_reclaim(ip);
198 
199  /*
200  * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
201  * from stomping over us while we recycle the inode. We can't
202  * clear the radix tree reclaimable tag yet as it requires
203  * pag_ici_lock to be held exclusive.
204  */
205  ip->i_flags |= XFS_IRECLAIM;
206 
207  spin_unlock(&ip->i_flags_lock);
208  rcu_read_unlock();
209 
210  error = -inode_init_always(mp->m_super, inode);
211  if (error) {
212  /*
213  * Re-initializing the inode failed, and we are in deep
214  * trouble. Try to re-add it to the reclaim list.
215  */
216  rcu_read_lock();
217  spin_lock(&ip->i_flags_lock);
218 
219  ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
220  ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
221  trace_xfs_iget_reclaim_fail(ip);
222  goto out_error;
223  }
224 
225  spin_lock(&pag->pag_ici_lock);
226  spin_lock(&ip->i_flags_lock);
227 
228  /*
229  * Clear the per-lifetime state in the inode as we are now
230  * effectively a new inode and need to return to the initial
231  * state before reuse occurs.
232  */
233  ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
234  ip->i_flags |= XFS_INEW;
236  inode->i_state = I_NEW;
237 
238  ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
239  mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
240 
241  spin_unlock(&ip->i_flags_lock);
242  spin_unlock(&pag->pag_ici_lock);
243  } else {
244  /* If the VFS inode is being torn down, pause and try again. */
245  if (!igrab(inode)) {
246  trace_xfs_iget_skip(ip);
247  error = EAGAIN;
248  goto out_error;
249  }
250 
251  /* We've got a live one. */
252  spin_unlock(&ip->i_flags_lock);
253  rcu_read_unlock();
254  trace_xfs_iget_hit(ip);
255  }
256 
257  if (lock_flags != 0)
259 
260  xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
261  XFS_STATS_INC(xs_ig_found);
262 
263  return 0;
264 
265 out_error:
266  spin_unlock(&ip->i_flags_lock);
267  rcu_read_unlock();
268  return error;
269 }
270 
271 
272 static int
273 xfs_iget_cache_miss(
274  struct xfs_mount *mp,
275  struct xfs_perag *pag,
276  xfs_trans_t *tp,
277  xfs_ino_t ino,
278  struct xfs_inode **ipp,
279  int flags,
280  int lock_flags)
281 {
282  struct xfs_inode *ip;
283  int error;
284  xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
285  int iflags;
286 
287  ip = xfs_inode_alloc(mp, ino);
288  if (!ip)
289  return ENOMEM;
290 
291  error = xfs_iread(mp, tp, ip, flags);
292  if (error)
293  goto out_destroy;
294 
295  trace_xfs_iget_miss(ip);
296 
297  if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
298  error = ENOENT;
299  goto out_destroy;
300  }
301 
302  /*
303  * Preload the radix tree so we can insert safely under the
304  * write spinlock. Note that we cannot sleep inside the preload
305  * region. Since we can be called from transaction context, don't
306  * recurse into the file system.
307  */
309  error = EAGAIN;
310  goto out_destroy;
311  }
312 
313  /*
314  * Because the inode hasn't been added to the radix-tree yet it can't
315  * be found by another thread, so we can do the non-sleeping lock here.
316  */
317  if (lock_flags) {
318  if (!xfs_ilock_nowait(ip, lock_flags))
319  BUG();
320  }
321 
322  /*
323  * These values must be set before inserting the inode into the radix
324  * tree as the moment it is inserted a concurrent lookup (allowed by the
325  * RCU locking mechanism) can find it and that lookup must see that this
326  * is an inode currently under construction (i.e. that XFS_INEW is set).
327  * The ip->i_flags_lock that protects the XFS_INEW flag forms the
328  * memory barrier that ensures this detection works correctly at lookup
329  * time.
330  */
331  iflags = XFS_INEW;
332  if (flags & XFS_IGET_DONTCACHE)
333  iflags |= XFS_IDONTCACHE;
334  ip->i_udquot = ip->i_gdquot = NULL;
335  xfs_iflags_set(ip, iflags);
336 
337  /* insert the new inode */
338  spin_lock(&pag->pag_ici_lock);
339  error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
340  if (unlikely(error)) {
341  WARN_ON(error != -EEXIST);
342  XFS_STATS_INC(xs_ig_dup);
343  error = EAGAIN;
344  goto out_preload_end;
345  }
346  spin_unlock(&pag->pag_ici_lock);
347  radix_tree_preload_end();
348 
349  *ipp = ip;
350  return 0;
351 
352 out_preload_end:
353  spin_unlock(&pag->pag_ici_lock);
354  radix_tree_preload_end();
355  if (lock_flags)
356  xfs_iunlock(ip, lock_flags);
357 out_destroy:
358  __destroy_inode(VFS_I(ip));
359  xfs_inode_free(ip);
360  return error;
361 }
362 
363 /*
364  * Look up an inode by number in the given file system.
365  * The inode is looked up in the cache held in each AG.
366  * If the inode is found in the cache, initialise the vfs inode
367  * if necessary.
368  *
369  * If it is not in core, read it in from the file system's device,
370  * add it to the cache and initialise the vfs inode.
371  *
372  * The inode is locked according to the value of the lock_flags parameter.
373  * This flag parameter indicates how and if the inode's IO lock and inode lock
374  * should be taken.
375  *
376  * mp -- the mount point structure for the current file system. It points
377  * to the inode hash table.
378  * tp -- a pointer to the current transaction if there is one. This is
379  * simply passed through to the xfs_iread() call.
380  * ino -- the number of the inode desired. This is the unique identifier
381  * within the file system for the inode being requested.
382  * lock_flags -- flags indicating how to lock the inode. See the comment
383  * for xfs_ilock() for a list of valid values.
384  */
385 int
387  xfs_mount_t *mp,
388  xfs_trans_t *tp,
389  xfs_ino_t ino,
390  uint flags,
391  uint lock_flags,
392  xfs_inode_t **ipp)
393 {
394  xfs_inode_t *ip;
395  int error;
396  xfs_perag_t *pag;
397  xfs_agino_t agino;
398 
399  /*
400  * xfs_reclaim_inode() uses the ILOCK to ensure an inode
401  * doesn't get freed while it's being referenced during a
402  * radix tree traversal here. It assumes this function
403  * aqcuires only the ILOCK (and therefore it has no need to
404  * involve the IOLOCK in this synchronization).
405  */
406  ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
407 
408  /* reject inode numbers outside existing AGs */
409  if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
410  return EINVAL;
411 
412  /* get the perag structure and ensure that it's inode capable */
413  pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
414  agino = XFS_INO_TO_AGINO(mp, ino);
415 
416 again:
417  error = 0;
418  rcu_read_lock();
419  ip = radix_tree_lookup(&pag->pag_ici_root, agino);
420 
421  if (ip) {
422  error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
423  if (error)
424  goto out_error_or_again;
425  } else {
426  rcu_read_unlock();
427  XFS_STATS_INC(xs_ig_missed);
428 
429  error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
430  flags, lock_flags);
431  if (error)
432  goto out_error_or_again;
433  }
434  xfs_perag_put(pag);
435 
436  *ipp = ip;
437 
438  /*
439  * If we have a real type for an on-disk inode, we can set ops(&unlock)
440  * now. If it's a new inode being created, xfs_ialloc will handle it.
441  */
442  if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
443  xfs_setup_inode(ip);
444  return 0;
445 
446 out_error_or_again:
447  if (error == EAGAIN) {
448  delay(1);
449  goto again;
450  }
451  xfs_perag_put(pag);
452  return error;
453 }
454 
455 /*
456  * This is a wrapper routine around the xfs_ilock() routine
457  * used to centralize some grungy code. It is used in places
458  * that wish to lock the inode solely for reading the extents.
459  * The reason these places can't just call xfs_ilock(SHARED)
460  * is that the inode lock also guards to bringing in of the
461  * extents from disk for a file in b-tree format. If the inode
462  * is in b-tree format, then we need to lock the inode exclusively
463  * until the extents are read in. Locking it exclusively all
464  * the time would limit our parallelism unnecessarily, though.
465  * What we do instead is check to see if the extents have been
466  * read in yet, and only lock the inode exclusively if they
467  * have not.
468  *
469  * The function returns a value which should be given to the
470  * corresponding xfs_iunlock_map_shared(). This value is
471  * the mode in which the lock was actually taken.
472  */
473 uint
475  xfs_inode_t *ip)
476 {
477  uint lock_mode;
478 
479  if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
480  ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
481  lock_mode = XFS_ILOCK_EXCL;
482  } else {
483  lock_mode = XFS_ILOCK_SHARED;
484  }
485 
486  xfs_ilock(ip, lock_mode);
487 
488  return lock_mode;
489 }
490 
491 /*
492  * This is simply the unlock routine to go with xfs_ilock_map_shared().
493  * All it does is call xfs_iunlock() with the given lock_mode.
494  */
495 void
497  xfs_inode_t *ip,
498  unsigned int lock_mode)
499 {
500  xfs_iunlock(ip, lock_mode);
501 }
502 
503 /*
504  * The xfs inode contains 2 locks: a multi-reader lock called the
505  * i_iolock and a multi-reader lock called the i_lock. This routine
506  * allows either or both of the locks to be obtained.
507  *
508  * The 2 locks should always be ordered so that the IO lock is
509  * obtained first in order to prevent deadlock.
510  *
511  * ip -- the inode being locked
512  * lock_flags -- this parameter indicates the inode's locks
513  * to be locked. It can be:
514  * XFS_IOLOCK_SHARED,
515  * XFS_IOLOCK_EXCL,
516  * XFS_ILOCK_SHARED,
517  * XFS_ILOCK_EXCL,
518  * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
519  * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
520  * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
521  * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
522  */
523 void
525  xfs_inode_t *ip,
526  uint lock_flags)
527 {
528  /*
529  * You can't set both SHARED and EXCL for the same lock,
530  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
531  * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
532  */
533  ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
534  (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
535  ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
536  (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
537  ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
538 
539  if (lock_flags & XFS_IOLOCK_EXCL)
540  mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
541  else if (lock_flags & XFS_IOLOCK_SHARED)
542  mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
543 
544  if (lock_flags & XFS_ILOCK_EXCL)
545  mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
546  else if (lock_flags & XFS_ILOCK_SHARED)
547  mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
548 
549  trace_xfs_ilock(ip, lock_flags, _RET_IP_);
550 }
551 
552 /*
553  * This is just like xfs_ilock(), except that the caller
554  * is guaranteed not to sleep. It returns 1 if it gets
555  * the requested locks and 0 otherwise. If the IO lock is
556  * obtained but the inode lock cannot be, then the IO lock
557  * is dropped before returning.
558  *
559  * ip -- the inode being locked
560  * lock_flags -- this parameter indicates the inode's locks to be
561  * to be locked. See the comment for xfs_ilock() for a list
562  * of valid values.
563  */
564 int
566  xfs_inode_t *ip,
567  uint lock_flags)
568 {
569  /*
570  * You can't set both SHARED and EXCL for the same lock,
571  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
572  * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
573  */
574  ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
575  (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
576  ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
577  (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
578  ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
579 
580  if (lock_flags & XFS_IOLOCK_EXCL) {
581  if (!mrtryupdate(&ip->i_iolock))
582  goto out;
583  } else if (lock_flags & XFS_IOLOCK_SHARED) {
584  if (!mrtryaccess(&ip->i_iolock))
585  goto out;
586  }
587  if (lock_flags & XFS_ILOCK_EXCL) {
588  if (!mrtryupdate(&ip->i_lock))
589  goto out_undo_iolock;
590  } else if (lock_flags & XFS_ILOCK_SHARED) {
591  if (!mrtryaccess(&ip->i_lock))
592  goto out_undo_iolock;
593  }
594  trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
595  return 1;
596 
597  out_undo_iolock:
598  if (lock_flags & XFS_IOLOCK_EXCL)
599  mrunlock_excl(&ip->i_iolock);
600  else if (lock_flags & XFS_IOLOCK_SHARED)
601  mrunlock_shared(&ip->i_iolock);
602  out:
603  return 0;
604 }
605 
606 /*
607  * xfs_iunlock() is used to drop the inode locks acquired with
608  * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
609  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
610  * that we know which locks to drop.
611  *
612  * ip -- the inode being unlocked
613  * lock_flags -- this parameter indicates the inode's locks to be
614  * to be unlocked. See the comment for xfs_ilock() for a list
615  * of valid values for this parameter.
616  *
617  */
618 void
620  xfs_inode_t *ip,
621  uint lock_flags)
622 {
623  /*
624  * You can't set both SHARED and EXCL for the same lock,
625  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
626  * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
627  */
628  ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
629  (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
630  ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
631  (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
632  ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
633  ASSERT(lock_flags != 0);
634 
635  if (lock_flags & XFS_IOLOCK_EXCL)
636  mrunlock_excl(&ip->i_iolock);
637  else if (lock_flags & XFS_IOLOCK_SHARED)
638  mrunlock_shared(&ip->i_iolock);
639 
640  if (lock_flags & XFS_ILOCK_EXCL)
641  mrunlock_excl(&ip->i_lock);
642  else if (lock_flags & XFS_ILOCK_SHARED)
643  mrunlock_shared(&ip->i_lock);
644 
645  trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
646 }
647 
648 /*
649  * give up write locks. the i/o lock cannot be held nested
650  * if it is being demoted.
651  */
652 void
654  xfs_inode_t *ip,
655  uint lock_flags)
656 {
657  ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
658  ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
659 
660  if (lock_flags & XFS_ILOCK_EXCL)
661  mrdemote(&ip->i_lock);
662  if (lock_flags & XFS_IOLOCK_EXCL)
663  mrdemote(&ip->i_iolock);
664 
665  trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
666 }
667 
668 #ifdef DEBUG
669 int
670 xfs_isilocked(
671  xfs_inode_t *ip,
672  uint lock_flags)
673 {
674  if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
675  if (!(lock_flags & XFS_ILOCK_SHARED))
676  return !!ip->i_lock.mr_writer;
677  return rwsem_is_locked(&ip->i_lock.mr_lock);
678  }
679 
680  if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
681  if (!(lock_flags & XFS_IOLOCK_SHARED))
682  return !!ip->i_iolock.mr_writer;
683  return rwsem_is_locked(&ip->i_iolock.mr_lock);
684  }
685 
686  ASSERT(0);
687  return 0;
688 }
689 #endif
690 
691 void
693  struct xfs_inode *ip)
694 {
695  wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
696  DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
697 
698  do {
700  if (xfs_isiflocked(ip))
701  io_schedule();
702  } while (!xfs_iflock_nowait(ip));
703 
704  finish_wait(wq, &wait.wait);
705 }