67 ASSERT(!spin_is_locked(&ip->i_flags_lock));
68 ASSERT(!xfs_isiflocked(ip));
71 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER,
"xfsio", ip->i_ino);
80 ip->i_delayed_blks = 0;
91 struct xfs_inode *
ip = XFS_I(inode);
100 switch (ip->i_d.di_mode &
S_IFMT) {
112 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
119 ASSERT(!spin_is_locked(&ip->i_flags_lock));
120 ASSERT(!xfs_isiflocked(ip));
128 spin_lock(&ip->i_flags_lock);
129 ip->i_flags = XFS_IRECLAIM;
131 spin_unlock(&ip->i_flags_lock);
142 struct xfs_inode *
ip,
148 struct xfs_mount *
mp =
ip->i_mount;
158 spin_lock(&
ip->i_flags_lock);
159 if (
ip->i_ino !=
ino) {
160 trace_xfs_iget_skip(
ip);
177 if (
ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
178 trace_xfs_iget_skip(
ip);
196 if (
ip->i_flags & XFS_IRECLAIMABLE) {
197 trace_xfs_iget_reclaim(
ip);
205 ip->i_flags |= XFS_IRECLAIM;
207 spin_unlock(&
ip->i_flags_lock);
217 spin_lock(&
ip->i_flags_lock);
219 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
220 ASSERT(
ip->i_flags & XFS_IRECLAIMABLE);
221 trace_xfs_iget_reclaim_fail(
ip);
225 spin_lock(&pag->pag_ici_lock);
226 spin_lock(&
ip->i_flags_lock);
233 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
234 ip->i_flags |= XFS_INEW;
241 spin_unlock(&
ip->i_flags_lock);
242 spin_unlock(&pag->pag_ici_lock);
246 trace_xfs_iget_skip(
ip);
252 spin_unlock(&
ip->i_flags_lock);
254 trace_xfs_iget_hit(
ip);
260 xfs_iflags_clear(
ip, XFS_ISTALE | XFS_IDONTCACHE);
266 spin_unlock(&
ip->i_flags_lock);
274 struct xfs_mount *mp,
278 struct xfs_inode **ipp,
282 struct xfs_inode *
ip;
295 trace_xfs_iget_miss(ip);
297 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
333 iflags |= XFS_IDONTCACHE;
334 ip->i_udquot = ip->i_gdquot =
NULL;
335 xfs_iflags_set(ip, iflags);
338 spin_lock(&pag->pag_ici_lock);
344 goto out_preload_end;
346 spin_unlock(&pag->pag_ici_lock);
347 radix_tree_preload_end();
353 spin_unlock(&pag->pag_ici_lock);
354 radix_tree_preload_end();
406 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
422 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
424 goto out_error_or_again;
429 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
432 goto out_error_or_again;
442 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
481 lock_mode = XFS_ILOCK_EXCL;
483 lock_mode = XFS_ILOCK_SHARED;
498 unsigned int lock_mode)
533 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
534 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
535 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
536 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
537 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
539 if (lock_flags & XFS_IOLOCK_EXCL)
540 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
541 else if (lock_flags & XFS_IOLOCK_SHARED)
542 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
544 if (lock_flags & XFS_ILOCK_EXCL)
545 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
546 else if (lock_flags & XFS_ILOCK_SHARED)
547 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
549 trace_xfs_ilock(ip, lock_flags,
_RET_IP_);
574 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
575 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
576 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
577 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
578 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
580 if (lock_flags & XFS_IOLOCK_EXCL) {
581 if (!mrtryupdate(&ip->i_iolock))
583 }
else if (lock_flags & XFS_IOLOCK_SHARED) {
584 if (!mrtryaccess(&ip->i_iolock))
587 if (lock_flags & XFS_ILOCK_EXCL) {
588 if (!mrtryupdate(&ip->i_lock))
589 goto out_undo_iolock;
590 }
else if (lock_flags & XFS_ILOCK_SHARED) {
591 if (!mrtryaccess(&ip->i_lock))
592 goto out_undo_iolock;
594 trace_xfs_ilock_nowait(ip, lock_flags,
_RET_IP_);
598 if (lock_flags & XFS_IOLOCK_EXCL)
599 mrunlock_excl(&ip->i_iolock);
600 else if (lock_flags & XFS_IOLOCK_SHARED)
601 mrunlock_shared(&ip->i_iolock);
628 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
629 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
630 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
631 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
632 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
635 if (lock_flags & XFS_IOLOCK_EXCL)
636 mrunlock_excl(&ip->i_iolock);
637 else if (lock_flags & XFS_IOLOCK_SHARED)
638 mrunlock_shared(&ip->i_iolock);
640 if (lock_flags & XFS_ILOCK_EXCL)
641 mrunlock_excl(&ip->i_lock);
642 else if (lock_flags & XFS_ILOCK_SHARED)
643 mrunlock_shared(&ip->i_lock);
645 trace_xfs_iunlock(ip, lock_flags,
_RET_IP_);
657 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
658 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
660 if (lock_flags & XFS_ILOCK_EXCL)
661 mrdemote(&ip->i_lock);
662 if (lock_flags & XFS_IOLOCK_EXCL)
663 mrdemote(&ip->i_iolock);
665 trace_xfs_ilock_demote(ip, lock_flags,
_RET_IP_);
674 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
675 if (!(lock_flags & XFS_ILOCK_SHARED))
676 return !!ip->i_lock.mr_writer;
680 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
681 if (!(lock_flags & XFS_IOLOCK_SHARED))
682 return !!ip->i_iolock.mr_writer;
693 struct xfs_inode *ip)
700 if (xfs_isiflocked(ip))
702 }
while (!xfs_iflock_nowait(ip));