Commit ffc18582 authored by Darrick J. Wong's avatar Darrick J. Wong

Merge tag 'inode-walk-cleanups-5.14_2021-06-03' of...

Merge tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-5.14-merge2

xfs: clean up incore inode walk functions

This ambitious series aims to cleans up redundant inode walk code in
xfs_icache.c, hide implementation details of the quotaoff dquot release
code, and eliminates indirect function calls from incore inode walks.

The first thing it does is to move all the code that quotaoff calls to
release dquots from all incore inodes into xfs_icache.c.  Next, it
separates the goal of an inode walk from the actual radix tree tags that
may or may not be involved and drops the kludgy XFS_ICI_NO_TAG thing.
Finally, we split the speculative preallocation (blockgc) and quotaoff
dquot release code paths into separate functions so that we can keep the
implementations cohesive.

Christoph suggested last cycle that we 'simply' change quotaoff not to
allow deactivating quota entirely, but as these cleanups are to enable
one major change in behavior (deferred inode inactivation) I do not want
to add a second behavior change (quotaoff) as a dependency.

To be blunt: Additional cleanups are not in scope for this series.

Next, I made two observations about incore inode radix tree walks --
since there's a 1:1 mapping between the walk goal and the per-inode
processing function passed in, we can use the goal to make a direct call
to the processing function.  Furthermore, the only caller to supply a
nonzero iter_flags argument is quotaoff, and there's only one INEW flag.

From that observation, I concluded that it's quite possible to remove
two parameters from the xfs_inode_walk* function signatures -- the
iter_flags, and the execute function pointer.  The middle of the series
moves the INEW functionality into the one piece (quotaoff) that wants
it, and removes the indirect calls.

The final observation is that the inode reclaim walk loop is now almost
the same as xfs_inode_walk, so it's silly to maintain two copies.  Merge
the reclaim loop code into xfs_inode_walk.

Lastly, refactor the per-ag radix tagging functions since there's
duplicated code that can be consolidated.

This series is a prerequisite for the next two patchsets, since deferred
inode inactivation will add another inode radix tree tag and iterator
function to xfs_inode_walk.

v2: walk the vfs inode list when running quotaoff instead of the radix
    tree, then rework the (now completely internal) inode walk function
    to take the tag as the main parameter.
v3: merge the reclaim loop into xfs_inode_walk, then consolidate the
    radix tree tagging functions
v4: rebase to 5.13-rc4
v5: combine with the quotaoff patchset, reorder functions to minimize
    forward declarations, split inode walk goals from radix tree tags
    to reduce conceptual confusion
v6: start moving the inode cache code towards the xfs_icwalk prefix

* tag 'inode-walk-cleanups-5.14_2021-06-03' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: refactor per-AG inode tagging functions
  xfs: merge xfs_reclaim_inodes_ag into xfs_inode_walk_ag
  xfs: pass struct xfs_eofblocks to the inode scan callback
  xfs: fix radix tree tag signs
  xfs: make the icwalk processing functions clean up the grab state
  xfs: clean up inode state flag tests in xfs_blockgc_igrab
  xfs: remove indirect calls from xfs_inode_walk{,_ag}
  xfs: remove iter_flags parameter from xfs_inode_walk_*
  xfs: move xfs_inew_wait call into xfs_dqrele_inode
  xfs: separate the dqrele_all inode grab logic from xfs_inode_walk_ag_grab
  xfs: pass the goal of the incore inode walk to xfs_inode_walk()
  xfs: rename xfs_inode_walk functions to xfs_icwalk
  xfs: move the inode walk functions further down
  xfs: detach inode dquots at the end of inactivation
  xfs: move the quotaoff dqrele inode walk into xfs_icache.c

[djwong: added variable names to function declarations while fixing
merge conflicts]
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parents 8b943d21 c076ae7a
...@@ -64,7 +64,7 @@ struct xfs_perag * ...@@ -64,7 +64,7 @@ struct xfs_perag *
xfs_perag_get_tag( xfs_perag_get_tag(
struct xfs_mount *mp, struct xfs_mount *mp,
xfs_agnumber_t first, xfs_agnumber_t first,
int tag) unsigned int tag)
{ {
struct xfs_perag *pag; struct xfs_perag *pag;
int found; int found;
......
...@@ -106,12 +106,12 @@ struct xfs_perag { ...@@ -106,12 +106,12 @@ struct xfs_perag {
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount, int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
xfs_agnumber_t *maxagi); xfs_agnumber_t *maxagi);
int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t); int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno);
void xfs_free_perag(struct xfs_mount *mp); void xfs_free_perag(struct xfs_mount *mp);
struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t); struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t, struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *mp, xfs_agnumber_t agno,
int tag); unsigned int tag);
void xfs_perag_put(struct xfs_perag *pag); void xfs_perag_put(struct xfs_perag *pag);
/* /*
......
...@@ -26,6 +26,56 @@ ...@@ -26,6 +26,56 @@
#include <linux/iversion.h> #include <linux/iversion.h>
/* Radix tree tags for incore inode tree. */
/* inode is to be reclaimed */
#define XFS_ICI_RECLAIM_TAG 0
/* Inode has speculative preallocations (posteof or cow) to clean. */
#define XFS_ICI_BLOCKGC_TAG 1
/*
* The goal for walking incore inodes. These can correspond with incore inode
* radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
*/
enum xfs_icwalk_goal {
/* Goals that are not related to tags; these must be < 0. */
XFS_ICWALK_DQRELE = -1,
/* Goals directly associated with tagged inodes. */
XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
};
#define XFS_ICWALK_NULL_TAG (-1U)
/* Compute the inode radix tree tag for this goal. */
static inline unsigned int
xfs_icwalk_tag(enum xfs_icwalk_goal goal)
{
return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
}
static int xfs_icwalk(struct xfs_mount *mp,
enum xfs_icwalk_goal goal, struct xfs_eofblocks *eofb);
static int xfs_icwalk_ag(struct xfs_perag *pag,
enum xfs_icwalk_goal goal, struct xfs_eofblocks *eofb);
/*
* Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
* with XFS_EOF_FLAGS_*.
*/
#define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
#define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
#define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
/* Stop scanning after icw_scan_limit inodes. */
#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
XFS_ICWALK_FLAG_DROP_GDQUOT | \
XFS_ICWALK_FLAG_DROP_PDQUOT | \
XFS_ICWALK_FLAG_SCAN_LIMIT)
/* /*
* Allocate and initialise an xfs_inode. * Allocate and initialise an xfs_inode.
*/ */
...@@ -157,46 +207,94 @@ xfs_reclaim_work_queue( ...@@ -157,46 +207,94 @@ xfs_reclaim_work_queue(
rcu_read_unlock(); rcu_read_unlock();
} }
static void /*
xfs_perag_set_reclaim_tag( * Background scanning to trim preallocated space. This is queued based on the
* 'speculative_prealloc_lifetime' tunable (5m by default).
*/
static inline void
xfs_blockgc_queue(
struct xfs_perag *pag) struct xfs_perag *pag)
{
rcu_read_lock();
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
queue_delayed_work(pag->pag_mount->m_gc_workqueue,
&pag->pag_blockgc_work,
msecs_to_jiffies(xfs_blockgc_secs * 1000));
rcu_read_unlock();
}
/* Set a tag on both the AG incore inode tree and the AG radix tree. */
static void
xfs_perag_set_inode_tag(
struct xfs_perag *pag,
xfs_agino_t agino,
unsigned int tag)
{ {
struct xfs_mount *mp = pag->pag_mount; struct xfs_mount *mp = pag->pag_mount;
bool was_tagged;
lockdep_assert_held(&pag->pag_ici_lock); lockdep_assert_held(&pag->pag_ici_lock);
if (pag->pag_ici_reclaimable++)
was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
if (tag == XFS_ICI_RECLAIM_TAG)
pag->pag_ici_reclaimable++;
if (was_tagged)
return; return;
/* propagate the reclaim tag up into the perag radix tree */ /* propagate the tag up into the perag radix tree */
spin_lock(&mp->m_perag_lock); spin_lock(&mp->m_perag_lock);
radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
XFS_ICI_RECLAIM_TAG);
spin_unlock(&mp->m_perag_lock); spin_unlock(&mp->m_perag_lock);
/* schedule periodic background inode reclaim */ /* start background work */
switch (tag) {
case XFS_ICI_RECLAIM_TAG:
xfs_reclaim_work_queue(mp); xfs_reclaim_work_queue(mp);
break;
case XFS_ICI_BLOCKGC_TAG:
xfs_blockgc_queue(pag);
break;
}
trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
} }
/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
static void static void
xfs_perag_clear_reclaim_tag( xfs_perag_clear_inode_tag(
struct xfs_perag *pag) struct xfs_perag *pag,
xfs_agino_t agino,
unsigned int tag)
{ {
struct xfs_mount *mp = pag->pag_mount; struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock); lockdep_assert_held(&pag->pag_ici_lock);
if (--pag->pag_ici_reclaimable)
/*
* Reclaim can signal (with a null agino) that it cleared its own tag
* by removing the inode from the radix tree.
*/
if (agino != NULLAGINO)
radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
else
ASSERT(tag == XFS_ICI_RECLAIM_TAG);
if (tag == XFS_ICI_RECLAIM_TAG)
pag->pag_ici_reclaimable--;
if (radix_tree_tagged(&pag->pag_ici_root, tag))
return; return;
/* clear the reclaim tag from the perag radix tree */ /* clear the tag from the perag radix tree */
spin_lock(&mp->m_perag_lock); spin_lock(&mp->m_perag_lock);
radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
XFS_ICI_RECLAIM_TAG);
spin_unlock(&mp->m_perag_lock); spin_unlock(&mp->m_perag_lock);
trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
}
trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}
/* /*
* We set the inode flag atomically with the radix tree tag. * We set the inode flag atomically with the radix tree tag.
...@@ -204,7 +302,7 @@ xfs_perag_clear_reclaim_tag( ...@@ -204,7 +302,7 @@ xfs_perag_clear_reclaim_tag(
* can go away. * can go away.
*/ */
void void
xfs_inode_set_reclaim_tag( xfs_inode_mark_reclaimable(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
...@@ -214,9 +312,8 @@ xfs_inode_set_reclaim_tag( ...@@ -214,9 +312,8 @@ xfs_inode_set_reclaim_tag(
spin_lock(&pag->pag_ici_lock); spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock); spin_lock(&ip->i_flags_lock);
radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG); XFS_ICI_RECLAIM_TAG);
xfs_perag_set_reclaim_tag(pag);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE); __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock); spin_unlock(&ip->i_flags_lock);
...@@ -224,18 +321,7 @@ xfs_inode_set_reclaim_tag( ...@@ -224,18 +321,7 @@ xfs_inode_set_reclaim_tag(
xfs_perag_put(pag); xfs_perag_put(pag);
} }
STATIC void static inline void
xfs_inode_clear_reclaim_tag(
struct xfs_perag *pag,
xfs_ino_t ino)
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(pag->pag_mount, ino),
XFS_ICI_RECLAIM_TAG);
xfs_perag_clear_reclaim_tag(pag);
}
static void
xfs_inew_wait( xfs_inew_wait(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
...@@ -433,7 +519,9 @@ xfs_iget_cache_hit( ...@@ -433,7 +519,9 @@ xfs_iget_cache_hit(
*/ */
ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
ip->i_flags |= XFS_INEW; ip->i_flags |= XFS_INEW;
xfs_inode_clear_reclaim_tag(pag, ip->i_ino); xfs_perag_clear_inode_tag(pag,
XFS_INO_TO_AGINO(pag->pag_mount, ino),
XFS_ICI_RECLAIM_TAG);
inode->i_state = I_NEW; inode->i_state = I_NEW;
ip->i_sick = 0; ip->i_sick = 0;
ip->i_checked = 0; ip->i_checked = 0;
...@@ -715,207 +803,96 @@ xfs_icache_inode_is_allocated( ...@@ -715,207 +803,96 @@ xfs_icache_inode_is_allocated(
return 0; return 0;
} }
/* #ifdef CONFIG_XFS_QUOTA
* The inode lookup is done in batches to keep the amount of lock traffic and /* Decide if we want to grab this inode to drop its dquots. */
* radix tree lookups to a minimum. The batch size is a trade off between static bool
* lookup reduction and stack usage. This is in the reclaim path, so we can't xfs_dqrele_igrab(
* be too greedy. struct xfs_inode *ip)
*/
#define XFS_LOOKUP_BATCH 32
/*
* Decide if the given @ip is eligible to be a part of the inode walk, and
* grab it if so. Returns true if it's ready to go or false if we should just
* ignore it.
*/
STATIC bool
xfs_inode_walk_ag_grab(
struct xfs_inode *ip,
int flags)
{ {
struct inode *inode = VFS_I(ip); bool ret = false;
bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
ASSERT(rcu_read_lock_held()); ASSERT(rcu_read_lock_held());
/* Check for stale RCU freed inode */ /* Check for stale RCU freed inode */
spin_lock(&ip->i_flags_lock); spin_lock(&ip->i_flags_lock);
if (!ip->i_ino) if (!ip->i_ino)
goto out_unlock_noent; goto out_unlock;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
__xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return false;
/* If we can't grab the inode, it must on it's way to reclaim. */
if (!igrab(inode))
return false;
/* inode is valid */
return true;
out_unlock_noent:
spin_unlock(&ip->i_flags_lock);
return false;
}
/*
* For a given per-AG structure @pag, grab, @execute, and rele all incore
* inodes with the given radix tree @tag.
*/
STATIC int
xfs_inode_walk_ag(
struct xfs_perag *pag,
int iter_flags,
int (*execute)(struct xfs_inode *ip, void *args),
void *args,
int tag)
{
struct xfs_mount *mp = pag->pag_mount;
uint32_t first_index;
int last_error = 0;
int skipped;
bool done;
int nr_found;
restart:
done = false;
skipped = 0;
first_index = 0;
nr_found = 0;
do {
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int error = 0;
int i;
rcu_read_lock();
if (tag == XFS_ICI_NO_TAG)
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH);
else
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **) batch, first_index,
XFS_LOOKUP_BATCH, tag);
if (!nr_found) {
rcu_read_unlock();
break;
}
/* /*
* Grab the inodes before we drop the lock. if we found * Skip inodes that are anywhere in the reclaim machinery because we
* nothing, nr == 0 and the loop will be skipped. * drop dquots before tagging an inode for reclamation.
*/ */
for (i = 0; i < nr_found; i++) { if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE))
struct xfs_inode *ip = batch[i]; goto out_unlock;
if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
batch[i] = NULL;
/* /*
* Update the index for the next lookup. Catch * The inode looks alive; try to grab a VFS reference so that it won't
* overflows into the next AG range which can occur if * get destroyed. If we got the reference, return true to say that
* we have inodes in the last block of the AG and we * we grabbed the inode.
* are currently pointing to the last inode.
* *
* Because we may see inodes that are from the wrong AG * If we can't get the reference, then we know the inode had its VFS
* due to RCU freeing and reallocation, only update the * state torn down and hasn't yet entered the reclaim machinery. Since
* index if it lies in this AG. It was a race that lead * we also know that dquots are detached from an inode before it enters
* us to see this inode, so another lookup from the * reclaim, we can skip the inode.
* same index will not find it again.
*/ */
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) ret = igrab(VFS_I(ip)) != NULL;
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = true;
}
/* unlock now we've grabbed the inodes. */ out_unlock:
rcu_read_unlock(); spin_unlock(&ip->i_flags_lock);
return ret;
}
for (i = 0; i < nr_found; i++) { /* Drop this inode's dquots. */
if (!batch[i]) static void
continue; xfs_dqrele_inode(
if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) && struct xfs_inode *ip,
xfs_iflags_test(batch[i], XFS_INEW)) struct xfs_eofblocks *eofb)
xfs_inew_wait(batch[i]); {
error = execute(batch[i], args); if (xfs_iflags_test(ip, XFS_INEW))
xfs_irele(batch[i]); xfs_inew_wait(ip);
if (error == -EAGAIN) {
skipped++; xfs_ilock(ip, XFS_ILOCK_EXCL);
continue; if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
} }
if (error && last_error != -EFSCORRUPTED) if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
last_error = error; xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
} }
if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
/* bail out if the filesystem is corrupted. */ xfs_qm_dqrele(ip->i_pdquot);
if (error == -EFSCORRUPTED) ip->i_pdquot = NULL;
break;
cond_resched();
} while (nr_found && !done);
if (skipped) {
delay(1);
goto restart;
} }
return last_error; xfs_iunlock(ip, XFS_ILOCK_EXCL);
} xfs_irele(ip);
/* Fetch the next (possibly tagged) per-AG structure. */
static inline struct xfs_perag *
xfs_inode_walk_get_perag(
struct xfs_mount *mp,
xfs_agnumber_t agno,
int tag)
{
if (tag == XFS_ICI_NO_TAG)
return xfs_perag_get(mp, agno);
return xfs_perag_get_tag(mp, agno, tag);
} }
/* /*
* Call the @execute function on all incore inodes matching the radix tree * Detach all dquots from incore inodes if we can. The caller must already
* @tag. * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
* not get reattached.
*/ */
int int
xfs_inode_walk( xfs_dqrele_all_inodes(
struct xfs_mount *mp, struct xfs_mount *mp,
int iter_flags, unsigned int qflags)
int (*execute)(struct xfs_inode *ip, void *args),
void *args,
int tag)
{ {
struct xfs_perag *pag; struct xfs_eofblocks eofb = { .eof_flags = 0 };
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
ag = 0; if (qflags & XFS_UQUOTA_ACCT)
while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) { eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
ag = pag->pag_agno + 1; if (qflags & XFS_GQUOTA_ACCT)
error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag); eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
xfs_perag_put(pag); if (qflags & XFS_PQUOTA_ACCT)
if (error) { eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
last_error = error;
if (error == -EFSCORRUPTED) return xfs_icwalk(mp, XFS_ICWALK_DQRELE, &eofb);
break;
}
}
return last_error;
} }
#else
# define xfs_dqrele_igrab(ip) (false)
# define xfs_dqrele_inode(ip, priv) ((void)0)
#endif /* CONFIG_XFS_QUOTA */
/* /*
* Grab the inode for reclaim exclusively. * Grab the inode for reclaim exclusively.
...@@ -935,7 +912,7 @@ xfs_inode_walk( ...@@ -935,7 +912,7 @@ xfs_inode_walk(
* Return true if we grabbed it, false otherwise. * Return true if we grabbed it, false otherwise.
*/ */
static bool static bool
xfs_reclaim_inode_grab( xfs_reclaim_igrab(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
ASSERT(rcu_read_lock_held()); ASSERT(rcu_read_lock_held());
...@@ -1018,7 +995,7 @@ xfs_reclaim_inode( ...@@ -1018,7 +995,7 @@ xfs_reclaim_inode(
if (!radix_tree_delete(&pag->pag_ici_root, if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ino))) XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0); ASSERT(0);
xfs_perag_clear_reclaim_tag(pag); xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
spin_unlock(&pag->pag_ici_lock); spin_unlock(&pag->pag_ici_lock);
/* /*
...@@ -1030,7 +1007,7 @@ xfs_reclaim_inode( ...@@ -1030,7 +1007,7 @@ xfs_reclaim_inode(
* unlocked after the lookup before we go ahead and free it. * unlocked after the lookup before we go ahead and free it.
*/ */
xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_qm_dqdetach(ip); ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
ASSERT(xfs_inode_clean(ip)); ASSERT(xfs_inode_clean(ip));
...@@ -1045,105 +1022,13 @@ xfs_reclaim_inode( ...@@ -1045,105 +1022,13 @@ xfs_reclaim_inode(
xfs_iflags_clear(ip, XFS_IRECLAIM); xfs_iflags_clear(ip, XFS_IRECLAIM);
} }
/*
* Walk the AGs and reclaim the inodes in them. Even if the filesystem is
* corrupted, we still want to try to reclaim all the inodes. If we don't,
* then a shut down during filesystem unmount reclaim walk leak all the
* unreclaimed inodes.
*
* Returns non-zero if any AGs or inodes were skipped in the reclaim pass
* so that callers that want to block until all dirty inodes are written back
* and reclaimed can sanely loop.
*/
static void
xfs_reclaim_inodes_ag(
struct xfs_mount *mp,
int *nr_to_scan)
{
struct xfs_perag *pag;
xfs_agnumber_t agno;
for_each_perag_tag(mp, agno, pag, XFS_ICI_RECLAIM_TAG) {
unsigned long first_index = 0;
int done = 0;
int nr_found = 0;
first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
do {
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int i;
rcu_read_lock();
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH,
XFS_ICI_RECLAIM_TAG);
if (!nr_found) {
done = 1;
rcu_read_unlock();
break;
}
/*
* Grab the inodes before we drop the lock. if we found
* nothing, nr == 0 and the loop will be skipped.
*/
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || !xfs_reclaim_inode_grab(ip))
batch[i] = NULL;
/*
* Update the index for the next lookup. Catch
* overflows into the next AG range which can
* occur if we have inodes in the last block of
* the AG and we are currently pointing to the
* last inode.
*
* Because we may see inodes that are from the
* wrong AG due to RCU freeing and
* reallocation, only update the index if it
* lies in this AG. It was a race that lead us
* to see this inode, so another lookup from
* the same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
pag->pag_agno)
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = 1;
}
/* unlock now we've grabbed the inodes. */
rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (batch[i])
xfs_reclaim_inode(batch[i], pag);
}
*nr_to_scan -= XFS_LOOKUP_BATCH;
cond_resched();
} while (nr_found && !done && *nr_to_scan > 0);
if (done)
first_index = 0;
WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
}
}
void void
xfs_reclaim_inodes( xfs_reclaim_inodes(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
int nr_to_scan = INT_MAX;
while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
xfs_ail_push_all_sync(mp->m_ail); xfs_ail_push_all_sync(mp->m_ail);
xfs_reclaim_inodes_ag(mp, &nr_to_scan); xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
} }
} }
...@@ -1159,11 +1044,16 @@ xfs_reclaim_inodes_nr( ...@@ -1159,11 +1044,16 @@ xfs_reclaim_inodes_nr(
struct xfs_mount *mp, struct xfs_mount *mp,
int nr_to_scan) int nr_to_scan)
{ {
struct xfs_eofblocks eofb = {
.eof_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
.icw_scan_limit = nr_to_scan,
};
/* kick background reclaimer and push the AIL */ /* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp); xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail); xfs_ail_push_all(mp->m_ail);
xfs_reclaim_inodes_ag(mp, &nr_to_scan); xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &eofb);
return 0; return 0;
} }
...@@ -1273,19 +1163,17 @@ xfs_reclaim_worker( ...@@ -1273,19 +1163,17 @@ xfs_reclaim_worker(
{ {
struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_reclaim_work); struct xfs_mount, m_reclaim_work);
int nr_to_scan = INT_MAX;
xfs_reclaim_inodes_ag(mp, &nr_to_scan); xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
xfs_reclaim_work_queue(mp); xfs_reclaim_work_queue(mp);
} }
STATIC int STATIC int
xfs_inode_free_eofblocks( xfs_inode_free_eofblocks(
struct xfs_inode *ip, struct xfs_inode *ip,
void *args, struct xfs_eofblocks *eofb,
unsigned int *lockflags) unsigned int *lockflags)
{ {
struct xfs_eofblocks *eofb = args;
bool wait; bool wait;
wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
...@@ -1323,22 +1211,6 @@ xfs_inode_free_eofblocks( ...@@ -1323,22 +1211,6 @@ xfs_inode_free_eofblocks(
return 0; return 0;
} }
/*
* Background scanning to trim preallocated space. This is queued based on the
* 'speculative_prealloc_lifetime' tunable (5m by default).
*/
static inline void
xfs_blockgc_queue(
struct xfs_perag *pag)
{
rcu_read_lock();
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
queue_delayed_work(pag->pag_mount->m_gc_workqueue,
&pag->pag_blockgc_work,
msecs_to_jiffies(xfs_blockgc_secs * 1000));
rcu_read_unlock();
}
static void static void
xfs_blockgc_set_iflag( xfs_blockgc_set_iflag(
struct xfs_inode *ip, struct xfs_inode *ip,
...@@ -1346,7 +1218,6 @@ xfs_blockgc_set_iflag( ...@@ -1346,7 +1218,6 @@ xfs_blockgc_set_iflag(
{ {
struct xfs_mount *mp = ip->i_mount; struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag; struct xfs_perag *pag;
int tagged;
ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
...@@ -1363,24 +1234,8 @@ xfs_blockgc_set_iflag( ...@@ -1363,24 +1234,8 @@ xfs_blockgc_set_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock); spin_lock(&pag->pag_ici_lock);
tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG); xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
XFS_ICI_BLOCKGC_TAG);
if (!tagged) {
/* propagate the blockgc tag up into the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_set(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
XFS_ICI_BLOCKGC_TAG); XFS_ICI_BLOCKGC_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
/* kick off background trimming */
xfs_blockgc_queue(pag);
trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
_RET_IP_);
}
spin_unlock(&pag->pag_ici_lock); spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag); xfs_perag_put(pag);
...@@ -1416,19 +1271,8 @@ xfs_blockgc_clear_iflag( ...@@ -1416,19 +1271,8 @@ xfs_blockgc_clear_iflag(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock); spin_lock(&pag->pag_ici_lock);
radix_tree_tag_clear(&pag->pag_ici_root, xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
XFS_ICI_BLOCKGC_TAG); XFS_ICI_BLOCKGC_TAG);
if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
/* clear the blockgc tag from the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
XFS_ICI_BLOCKGC_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
_RET_IP_);
}
spin_unlock(&pag->pag_ici_lock); spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag); xfs_perag_put(pag);
...@@ -1489,10 +1333,9 @@ xfs_prep_free_cowblocks( ...@@ -1489,10 +1333,9 @@ xfs_prep_free_cowblocks(
STATIC int STATIC int
xfs_inode_free_cowblocks( xfs_inode_free_cowblocks(
struct xfs_inode *ip, struct xfs_inode *ip,
void *args, struct xfs_eofblocks *eofb,
unsigned int *lockflags) unsigned int *lockflags)
{ {
struct xfs_eofblocks *eofb = args;
bool wait; bool wait;
int ret = 0; int ret = 0;
...@@ -1575,23 +1418,66 @@ xfs_blockgc_start( ...@@ -1575,23 +1418,66 @@ xfs_blockgc_start(
xfs_blockgc_queue(pag); xfs_blockgc_queue(pag);
} }
/* Don't try to run block gc on an inode that's in any of these states. */
#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
XFS_IRECLAIMABLE | \
XFS_IRECLAIM)
/*
* Decide if the given @ip is eligible for garbage collection of speculative
* preallocations, and grab it if so. Returns true if it's ready to go or
* false if we should just ignore it.
*/
static bool
xfs_blockgc_igrab(
struct xfs_inode *ip)
{
struct inode *inode = VFS_I(ip);
ASSERT(rcu_read_lock_held());
/* Check for stale RCU freed inode */
spin_lock(&ip->i_flags_lock);
if (!ip->i_ino)
goto out_unlock_noent;
if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return false;
/* If we can't grab the inode, it must on it's way to reclaim. */
if (!igrab(inode))
return false;
/* inode is valid */
return true;
out_unlock_noent:
spin_unlock(&ip->i_flags_lock);
return false;
}
/* Scan one incore inode for block preallocations that we can remove. */ /* Scan one incore inode for block preallocations that we can remove. */
static int static int
xfs_blockgc_scan_inode( xfs_blockgc_scan_inode(
struct xfs_inode *ip, struct xfs_inode *ip,
void *args) struct xfs_eofblocks *eofb)
{ {
unsigned int lockflags = 0; unsigned int lockflags = 0;
int error; int error;
error = xfs_inode_free_eofblocks(ip, args, &lockflags); error = xfs_inode_free_eofblocks(ip, eofb, &lockflags);
if (error) if (error)
goto unlock; goto unlock;
error = xfs_inode_free_cowblocks(ip, args, &lockflags); error = xfs_inode_free_cowblocks(ip, eofb, &lockflags);
unlock: unlock:
if (lockflags) if (lockflags)
xfs_iunlock(ip, lockflags); xfs_iunlock(ip, lockflags);
xfs_irele(ip);
return error; return error;
} }
...@@ -1607,8 +1493,7 @@ xfs_blockgc_worker( ...@@ -1607,8 +1493,7 @@ xfs_blockgc_worker(
if (!sb_start_write_trylock(mp->m_super)) if (!sb_start_write_trylock(mp->m_super))
return; return;
error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL, error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
XFS_ICI_BLOCKGC_TAG);
if (error) if (error)
xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
pag->pag_agno, error); pag->pag_agno, error);
...@@ -1626,8 +1511,7 @@ xfs_blockgc_free_space( ...@@ -1626,8 +1511,7 @@ xfs_blockgc_free_space(
{ {
trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_); trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb, return xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, eofb);
XFS_ICI_BLOCKGC_TAG);
} }
/* /*
...@@ -1695,3 +1579,222 @@ xfs_blockgc_free_quota( ...@@ -1695,3 +1579,222 @@ xfs_blockgc_free_quota(
xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags); xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
} }
/* XFS Inode Cache Walking Code */
/*
* The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between
* lookup reduction and stack usage. This is in the reclaim path, so we can't
* be too greedy.
*/
#define XFS_LOOKUP_BATCH 32
/*
* Decide if we want to grab this inode in anticipation of doing work towards
* the goal.
*/
static inline bool
xfs_icwalk_igrab(
enum xfs_icwalk_goal goal,
struct xfs_inode *ip)
{
switch (goal) {
case XFS_ICWALK_DQRELE:
return xfs_dqrele_igrab(ip);
case XFS_ICWALK_BLOCKGC:
return xfs_blockgc_igrab(ip);
case XFS_ICWALK_RECLAIM:
return xfs_reclaim_igrab(ip);
default:
return false;
}
}
/*
* Process an inode. Each processing function must handle any state changes
* made by the icwalk igrab function. Return -EAGAIN to skip an inode.
*/
static inline int
xfs_icwalk_process_inode(
enum xfs_icwalk_goal goal,
struct xfs_inode *ip,
struct xfs_perag *pag,
struct xfs_eofblocks *eofb)
{
int error = 0;
switch (goal) {
case XFS_ICWALK_DQRELE:
xfs_dqrele_inode(ip, eofb);
break;
case XFS_ICWALK_BLOCKGC:
error = xfs_blockgc_scan_inode(ip, eofb);
break;
case XFS_ICWALK_RECLAIM:
xfs_reclaim_inode(ip, pag);
break;
}
return error;
}
/*
* For a given per-AG structure @pag and a goal, grab qualifying inodes and
* process them in some manner.
*/
static int
xfs_icwalk_ag(
struct xfs_perag *pag,
enum xfs_icwalk_goal goal,
struct xfs_eofblocks *eofb)
{
struct xfs_mount *mp = pag->pag_mount;
uint32_t first_index;
int last_error = 0;
int skipped;
bool done;
int nr_found;
restart:
done = false;
skipped = 0;
if (goal == XFS_ICWALK_RECLAIM)
first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
else
first_index = 0;
nr_found = 0;
do {
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
unsigned int tag = xfs_icwalk_tag(goal);
int error = 0;
int i;
rcu_read_lock();
if (tag == XFS_ICWALK_NULL_TAG)
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH);
else
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **) batch, first_index,
XFS_LOOKUP_BATCH, tag);
if (!nr_found) {
done = true;
rcu_read_unlock();
break;
}
/*
* Grab the inodes before we drop the lock. if we found
* nothing, nr == 0 and the loop will be skipped.
*/
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || !xfs_icwalk_igrab(goal, ip))
batch[i] = NULL;
/*
* Update the index for the next lookup. Catch
* overflows into the next AG range which can occur if
* we have inodes in the last block of the AG and we
* are currently pointing to the last inode.
*
* Because we may see inodes that are from the wrong AG
* due to RCU freeing and reallocation, only update the
* index if it lies in this AG. It was a race that lead
* us to see this inode, so another lookup from the
* same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = true;
}
/* unlock now we've grabbed the inodes. */
rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
error = xfs_icwalk_process_inode(goal, batch[i], pag,
eofb);
if (error == -EAGAIN) {
skipped++;
continue;
}
if (error && last_error != -EFSCORRUPTED)
last_error = error;
}
/* bail out if the filesystem is corrupted. */
if (error == -EFSCORRUPTED)
break;
cond_resched();
if (eofb && (eofb->eof_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
eofb->icw_scan_limit -= XFS_LOOKUP_BATCH;
if (eofb->icw_scan_limit <= 0)
break;
}
} while (nr_found && !done);
if (goal == XFS_ICWALK_RECLAIM) {
if (done)
first_index = 0;
WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
}
if (skipped) {
delay(1);
goto restart;
}
return last_error;
}
/* Fetch the next (possibly tagged) per-AG structure. */
static inline struct xfs_perag *
xfs_icwalk_get_perag(
struct xfs_mount *mp,
xfs_agnumber_t agno,
enum xfs_icwalk_goal goal)
{
unsigned int tag = xfs_icwalk_tag(goal);
if (tag == XFS_ICWALK_NULL_TAG)
return xfs_perag_get(mp, agno);
return xfs_perag_get_tag(mp, agno, tag);
}
/* Walk all incore inodes to achieve a given goal. */
static int
xfs_icwalk(
struct xfs_mount *mp,
enum xfs_icwalk_goal goal,
struct xfs_eofblocks *eofb)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t agno = 0;
while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
agno = pag->pag_agno + 1;
error = xfs_icwalk_ag(pag, goal, eofb);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == -EFSCORRUPTED)
break;
}
}
return last_error;
BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);
}
...@@ -15,17 +15,9 @@ struct xfs_eofblocks { ...@@ -15,17 +15,9 @@ struct xfs_eofblocks {
kgid_t eof_gid; kgid_t eof_gid;
prid_t eof_prid; prid_t eof_prid;
__u64 eof_min_file_size; __u64 eof_min_file_size;
int icw_scan_limit;
}; };
/*
* tags for inode radix tree
*/
#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
in xfs_inode_walk */
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
/* Inode has speculative preallocations (posteof or cow) to clean. */
#define XFS_ICI_BLOCKGC_TAG 1
/* /*
* Flags for xfs_iget() * Flags for xfs_iget()
*/ */
...@@ -34,11 +26,6 @@ struct xfs_eofblocks { ...@@ -34,11 +26,6 @@ struct xfs_eofblocks {
#define XFS_IGET_DONTCACHE 0x4 #define XFS_IGET_DONTCACHE 0x4
#define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */ #define XFS_IGET_INCORE 0x8 /* don't read from disk or reinit */
/*
* flags for AG inode iterator
*/
#define XFS_INODE_WALK_INEW_WAIT 0x1 /* wait on new inodes */
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp); uint flags, uint lock_flags, xfs_inode_t **ipp);
...@@ -52,7 +39,7 @@ void xfs_reclaim_inodes(struct xfs_mount *mp); ...@@ -52,7 +39,7 @@ void xfs_reclaim_inodes(struct xfs_mount *mp);
int xfs_reclaim_inodes_count(struct xfs_mount *mp); int xfs_reclaim_inodes_count(struct xfs_mount *mp);
long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); void xfs_inode_mark_reclaimable(struct xfs_inode *ip);
int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp, int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
...@@ -68,9 +55,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip); ...@@ -68,9 +55,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
void xfs_blockgc_worker(struct work_struct *work); void xfs_blockgc_worker(struct work_struct *work);
int xfs_inode_walk(struct xfs_mount *mp, int iter_flags, #ifdef CONFIG_XFS_QUOTA
int (*execute)(struct xfs_inode *ip, void *args), int xfs_dqrele_all_inodes(struct xfs_mount *mp, unsigned int qflags);
void *args, int tag); #else
# define xfs_dqrele_all_inodes(mp, qflags) (0)
#endif
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse); xfs_ino_t ino, bool *inuse);
......
...@@ -1676,7 +1676,7 @@ xfs_inactive( ...@@ -1676,7 +1676,7 @@ xfs_inactive(
*/ */
if (VFS_I(ip)->i_mode == 0) { if (VFS_I(ip)->i_mode == 0) {
ASSERT(ip->i_df.if_broot_bytes == 0); ASSERT(ip->i_df.if_broot_bytes == 0);
return; goto out;
} }
mp = ip->i_mount; mp = ip->i_mount;
...@@ -1684,11 +1684,11 @@ xfs_inactive( ...@@ -1684,11 +1684,11 @@ xfs_inactive(
/* If this is a read-only mount, don't do this (would generate I/O) */ /* If this is a read-only mount, don't do this (would generate I/O) */
if (mp->m_flags & XFS_MOUNT_RDONLY) if (mp->m_flags & XFS_MOUNT_RDONLY)
return; goto out;
/* Metadata inodes require explicit resource cleanup. */ /* Metadata inodes require explicit resource cleanup. */
if (xfs_is_metadata_inode(ip)) if (xfs_is_metadata_inode(ip))
return; goto out;
/* Try to clean out the cow blocks if there are any. */ /* Try to clean out the cow blocks if there are any. */
if (xfs_inode_has_cow_data(ip)) if (xfs_inode_has_cow_data(ip))
...@@ -1707,7 +1707,7 @@ xfs_inactive( ...@@ -1707,7 +1707,7 @@ xfs_inactive(
if (xfs_can_free_eofblocks(ip, true)) if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip); xfs_free_eofblocks(ip);
return; goto out;
} }
if (S_ISREG(VFS_I(ip)->i_mode) && if (S_ISREG(VFS_I(ip)->i_mode) &&
...@@ -1717,14 +1717,14 @@ xfs_inactive( ...@@ -1717,14 +1717,14 @@ xfs_inactive(
error = xfs_qm_dqattach(ip); error = xfs_qm_dqattach(ip);
if (error) if (error)
return; goto out;
if (S_ISLNK(VFS_I(ip)->i_mode)) if (S_ISLNK(VFS_I(ip)->i_mode))
error = xfs_inactive_symlink(ip); error = xfs_inactive_symlink(ip);
else if (truncate) else if (truncate)
error = xfs_inactive_truncate(ip); error = xfs_inactive_truncate(ip);
if (error) if (error)
return; goto out;
/* /*
* If there are attributes associated with the file then blow them away * If there are attributes associated with the file then blow them away
...@@ -1734,7 +1734,7 @@ xfs_inactive( ...@@ -1734,7 +1734,7 @@ xfs_inactive(
if (XFS_IFORK_Q(ip)) { if (XFS_IFORK_Q(ip)) {
error = xfs_attr_inactive(ip); error = xfs_attr_inactive(ip);
if (error) if (error)
return; goto out;
} }
ASSERT(!ip->i_afp); ASSERT(!ip->i_afp);
...@@ -1743,12 +1743,12 @@ xfs_inactive( ...@@ -1743,12 +1743,12 @@ xfs_inactive(
/* /*
* Free the inode. * Free the inode.
*/ */
error = xfs_inactive_ifree(ip); xfs_inactive_ifree(ip);
if (error)
return;
out:
/* /*
* Release the dquots held by inode, if any. * We're done making metadata updates for this inode, so we can release
* the attached dquots.
*/ */
xfs_qm_dqdetach(ip); xfs_qm_dqdetach(ip);
} }
......
...@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); ...@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
/* dquot stuff */ /* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint); extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */ /* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
......
...@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff( ...@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff(
* depend on the quota inodes (and other things) being valid as long as * depend on the quota inodes (and other things) being valid as long as
* we keep the lock(s). * we keep the lock(s).
*/ */
xfs_qm_dqrele_all_inodes(mp, flags); error = xfs_dqrele_all_inodes(mp, flags);
ASSERT(!error);
/* /*
* Next we make the changes in the quota flag in the mount struct. * Next we make the changes in the quota flag in the mount struct.
...@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next( ...@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next(
xfs_qm_dqput(dqp); xfs_qm_dqput(dqp);
return error; return error;
} }
STATIC int
xfs_dqrele_inode(
struct xfs_inode *ip,
void *args)
{
uint *flags = args;
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
ASSERT(ip->i_pdquot == NULL);
return 0;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
/*
* Go thru all the inodes in the file system, releasing their dquots.
*
* Note that the mount structure gets modified to indicate that quotas are off
* AFTER this, in the case of quotaoff.
*/
void
xfs_qm_dqrele_all_inodes(
struct xfs_mount *mp,
uint flags)
{
ASSERT(mp->m_quotainfo);
xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
&flags, XFS_ICI_NO_TAG);
}
...@@ -668,7 +668,7 @@ xfs_fs_destroy_inode( ...@@ -668,7 +668,7 @@ xfs_fs_destroy_inode(
* reclaim path handles this more efficiently than we can here, so * reclaim path handles this more efficiently than we can here, so
* simply let background reclaim tear down all inodes. * simply let background reclaim tear down all inodes.
*/ */
xfs_inode_set_reclaim_tag(ip); xfs_inode_mark_reclaimable(ip);
} }
static void static void
......
...@@ -153,10 +153,8 @@ DEFINE_EVENT(xfs_perag_class, name, \ ...@@ -153,10 +153,8 @@ DEFINE_EVENT(xfs_perag_class, name, \
DEFINE_PERAG_REF_EVENT(xfs_perag_get); DEFINE_PERAG_REF_EVENT(xfs_perag_get);
DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_put); DEFINE_PERAG_REF_EVENT(xfs_perag_put);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_set_blockgc);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_blockgc);
DECLARE_EVENT_CLASS(xfs_ag_class, DECLARE_EVENT_CLASS(xfs_ag_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno), TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno),
...@@ -3898,6 +3896,7 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class, ...@@ -3898,6 +3896,7 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class,
__field(uint32_t, gid) __field(uint32_t, gid)
__field(prid_t, prid) __field(prid_t, prid)
__field(__u64, min_file_size) __field(__u64, min_file_size)
__field(int, scan_limit)
__field(unsigned long, caller_ip) __field(unsigned long, caller_ip)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -3909,15 +3908,17 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class, ...@@ -3909,15 +3908,17 @@ DECLARE_EVENT_CLASS(xfs_eofblocks_class,
eofb->eof_gid) : 0; eofb->eof_gid) : 0;
__entry->prid = eofb ? eofb->eof_prid : 0; __entry->prid = eofb ? eofb->eof_prid : 0;
__entry->min_file_size = eofb ? eofb->eof_min_file_size : 0; __entry->min_file_size = eofb ? eofb->eof_min_file_size : 0;
__entry->scan_limit = eofb ? eofb->icw_scan_limit : 0;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
), ),
TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu caller %pS", TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->flags, __entry->flags,
__entry->uid, __entry->uid,
__entry->gid, __entry->gid,
__entry->prid, __entry->prid,
__entry->min_file_size, __entry->min_file_size,
__entry->scan_limit,
(char *)__entry->caller_ip) (char *)__entry->caller_ip)
); );
#define DEFINE_EOFBLOCKS_EVENT(name) \ #define DEFINE_EOFBLOCKS_EVENT(name) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment