50 #define XFS_LOOKUP_BATCH 32
58 ASSERT(rcu_read_lock_held());
69 spin_lock(&ip->i_flags_lock);
71 goto out_unlock_noent;
74 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
75 goto out_unlock_noent;
76 spin_unlock(&ip->i_flags_lock);
79 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
95 spin_unlock(&ip->i_flags_lock);
101 struct xfs_mount *
mp,
103 int (*execute)(
struct xfs_inode *
ip,
125 (
void **)batch, first_index,
136 for (i = 0; i < nr_found; i++) {
137 struct xfs_inode *ip = batch[
i];
164 for (i = 0; i < nr_found; i++) {
167 error = execute(batch[i], pag, flags);
183 }
while (nr_found && !done);
194 struct xfs_mount *mp,
195 int (*execute)(
struct xfs_inode *
ip,
220 struct xfs_inode *
ip,
248 struct xfs_mount *mp,
265 struct xfs_mount *mp)
279 if (xfs_buf_ispinned(bp))
305 struct xfs_mount *mp)
307 int error, error2 = 0;
319 return error ? error : error2;
329 struct xfs_mount *mp)
353 xfs_warn(mp,
"xfs_attr_quiesce: failed to log sb changes. "
354 "Frozen image may not be consistent.");
374 xfs_syncd_queue_sync(
375 struct xfs_mount *mp)
390 struct xfs_mount *mp =
container_of(to_delayed_work(work),
391 struct xfs_mount, m_sync_work);
403 if (!(mp->m_super->s_flags &
MS_ACTIVE) &&
404 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
406 if (mp->m_super->s_writers.frozen ==
SB_UNFROZEN &&
418 xfs_syncd_queue_sync(mp);
429 xfs_syncd_queue_reclaim(
430 struct xfs_mount *mp)
452 struct xfs_mount *mp =
container_of(to_delayed_work(work),
453 struct xfs_mount, m_reclaim_work);
456 xfs_syncd_queue_reclaim(mp);
473 struct xfs_inode *
ip)
475 struct xfs_mount *mp = ip->i_mount;
486 struct xfs_mount, m_flush_work);
494 struct xfs_mount *mp)
500 xfs_syncd_queue_sync(mp);
507 struct xfs_mount *mp)
517 struct xfs_inode *
ip)
523 if (!pag->pag_ici_reclaimable) {
525 spin_lock(&ip->i_mount->m_perag_lock);
529 spin_unlock(&ip->i_mount->m_perag_lock);
532 xfs_syncd_queue_reclaim(ip->i_mount);
534 trace_xfs_perag_set_reclaim(ip->i_mount, pag->
pag_agno,
537 pag->pag_ici_reclaimable++;
549 struct xfs_mount *mp = ip->i_mount;
553 spin_lock(&pag->pag_ici_lock);
554 spin_lock(&ip->i_flags_lock);
556 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
557 spin_unlock(&ip->i_flags_lock);
558 spin_unlock(&pag->pag_ici_lock);
567 pag->pag_ici_reclaimable--;
568 if (!pag->pag_ici_reclaimable) {
570 spin_lock(&ip->i_mount->m_perag_lock);
574 spin_unlock(&ip->i_mount->m_perag_lock);
575 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->
pag_agno,
597 struct xfs_inode *
ip,
600 ASSERT(rcu_read_lock_held());
612 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
625 spin_lock(&ip->i_flags_lock);
626 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
627 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
629 spin_unlock(&ip->i_flags_lock);
632 __xfs_iflags_set(ip, XFS_IRECLAIM);
633 spin_unlock(&ip->i_flags_lock);
678 struct xfs_inode *
ip,
688 if (!xfs_iflock_nowait(ip)) {
696 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
706 if (xfs_iflags_test(ip, XFS_ISTALE))
708 if (xfs_inode_clean(ip))
760 spin_lock(&pag->pag_ici_lock);
765 spin_unlock(&pag->pag_ici_lock);
785 xfs_iflags_clear(ip, XFS_IRECLAIM);
805 struct xfs_mount *mp,
820 unsigned long first_index = 0;
832 first_index = pag->pag_ici_reclaim_cursor;
843 (
void **)batch, first_index,
856 for (i = 0; i < nr_found; i++) {
857 struct xfs_inode *
ip = batch[
i];
887 for (i = 0; i < nr_found; i++) {
899 }
while (nr_found && !done && *nr_to_scan > 0);
901 if (trylock && !done)
902 pag->pag_ici_reclaim_cursor = first_index;
904 pag->pag_ici_reclaim_cursor = 0;
916 if (skipped && (flags &
SYNC_WAIT) && *nr_to_scan > 0) {
944 struct xfs_mount *mp,
948 xfs_syncd_queue_reclaim(mp);
960 struct xfs_mount *mp)
968 reclaimable += pag->pag_ici_reclaimable;