Commit b878dbbe authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'reduce-scrub-iget-overhead-6.10_2024-04-23' of...

Merge tag 'reduce-scrub-iget-overhead-6.10_2024-04-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.10-mergeC

xfs: reduce iget overhead in scrub

This patchset looks to reduce iget overhead in two ways: First, a
previous patch conditionally set DONTCACHE on inodes during xchk_irele
on the grounds that we knew better at irele time if an inode should be
dropped.  Unfortunately, over time that patch morphed into a call to
d_mark_dontcache, which resulted in inodes being dropped even if they
were referenced by the dcache.  This actually caused *more* recycle
overhead than if we'd simply called xfs_iget to set DONTCACHE only on
misses.

The second patch reduces the cost of untrusted iget for a vectored scrub
call by having the scrubv code maintain a separate refcount to the inode
so that the cache will always hit.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'reduce-scrub-iget-overhead-6.10_2024-04-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: only iget the file once when doing vectored scrub-by-handle
  xfs: use dontcache for grabbing inodes during scrub
parents 496baa2c 4ad350ac
......@@ -783,7 +783,7 @@ xchk_iget(
{
ASSERT(sc->tp != NULL);
return xfs_iget(sc->mp, sc->tp, inum, XFS_IGET_UNTRUSTED, 0, ipp);
return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp);
}
/*
......@@ -834,8 +834,8 @@ xchk_iget_agi(
if (error)
return error;
error = xfs_iget(mp, tp, inum,
XFS_IGET_NORETRY | XFS_IGET_UNTRUSTED, 0, ipp);
error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0,
ipp);
if (error == -EAGAIN) {
/*
* The inode may be in core but temporarily unavailable and may
......@@ -1062,12 +1062,6 @@ xchk_irele(
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
spin_unlock(&VFS_I(ip)->i_lock);
} else if (atomic_read(&VFS_I(ip)->i_count) == 1) {
/*
* If this is the last reference to the inode and the caller
* permits it, set DONTCACHE to avoid thrashing.
*/
d_mark_dontcache(VFS_I(ip));
}
xfs_irele(ip);
......
......@@ -407,6 +407,15 @@ xchk_iscan_iget_retry(
return -EAGAIN;
}
/*
* For an inode scan, we hold the AGI and want to try to grab a batch of
* inodes. Holding the AGI prevents inodegc from clearing freed inodes,
* so we must use noretry here. For every inode after the first one in the
* batch, we don't want to wait, so we use retry there too. Finally, use
* dontcache to avoid polluting the cache.
*/
#define ISCAN_IGET_FLAGS (XFS_IGET_NORETRY | XFS_IGET_DONTCACHE)
/*
* Grab an inode as part of an inode scan. While scanning this inode, the
* caller must ensure that no other threads can modify the inode until a call
......@@ -434,7 +443,7 @@ xchk_iscan_iget(
ASSERT(iscan->__inodes[0] == NULL);
/* Fill the first slot in the inode array. */
error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0,
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
&iscan->__inodes[idx]);
trace_xchk_iscan_iget(iscan, error);
......@@ -507,7 +516,7 @@ xchk_iscan_iget(
ASSERT(iscan->__inodes[idx] == NULL);
error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0,
error = xfs_iget(sc->mp, sc->tp, ino, ISCAN_IGET_FLAGS, 0,
&iscan->__inodes[idx]);
if (error)
break;
......
......@@ -792,6 +792,37 @@ xfs_scrubv_check_barrier(
return 0;
}
/*
* If the caller provided us with a nonzero inode number that isn't the ioctl
* file, try to grab a reference to it to eliminate all further untrusted inode
* lookups. If we can't get the inode, let each scrub function try again.
*/
STATIC struct xfs_inode *
xchk_scrubv_open_by_handle(
struct xfs_mount *mp,
const struct xfs_scrub_vec_head *head)
{
struct xfs_trans *tp;
struct xfs_inode *ip;
int error;
error = xfs_trans_alloc_empty(mp, &tp);
if (error)
return NULL;
error = xfs_iget(mp, tp, head->svh_ino, XCHK_IGET_FLAGS, 0, &ip);
xfs_trans_cancel(tp);
if (error)
return NULL;
if (VFS_I(ip)->i_generation != head->svh_gen) {
xfs_irele(ip);
return NULL;
}
return ip;
}
/* Vectored scrub implementation to reduce ioctl calls. */
int
xfs_ioc_scrubv_metadata(
......@@ -804,6 +835,7 @@ xfs_ioc_scrubv_metadata(
struct xfs_scrub_vec __user *uvectors;
struct xfs_inode *ip_in = XFS_I(file_inode(file));
struct xfs_mount *mp = ip_in->i_mount;
struct xfs_inode *handle_ip = NULL;
struct xfs_scrub_vec *v;
size_t vec_bytes;
unsigned int i;
......@@ -848,6 +880,17 @@ xfs_ioc_scrubv_metadata(
trace_xchk_scrubv_item(mp, &head, i, v);
}
/*
* If the caller wants us to do a scrub-by-handle and the file used to
* call the ioctl is not the same file, load the incore inode and pin
* it across all the scrubv actions to avoid repeated UNTRUSTED
* lookups. The reference is not passed to deeper layers of scrub
* because each scrubber gets to decide its own strategy and return
* values for getting an inode.
*/
if (head.svh_ino && head.svh_ino != ip_in->i_ino)
handle_ip = xchk_scrubv_open_by_handle(mp, &head);
/* Run all the scrubbers. */
for (i = 0, v = vectors; i < head.svh_nr; i++, v++) {
struct xfs_scrub_metadata sm = {
......@@ -895,6 +938,8 @@ xfs_ioc_scrubv_metadata(
}
out_free:
if (handle_ip)
xfs_irele(handle_ip);
kfree(vectors);
return error;
}
......@@ -60,6 +60,13 @@ static inline int xchk_maybe_relax(struct xchk_relax *widget)
#define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
__GFP_RETRY_MAYFAIL))
/*
* For opening files by handle for fsck operations, we don't trust the inumber
* or the allocation state; therefore, perform an untrusted lookup. We don't
* want these inodes to pollute the cache, so mark them for immediate removal.
*/
#define XCHK_IGET_FLAGS (XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE)
/* Type info and names for the scrub types. */
enum xchk_type {
ST_NONE = 1, /* disabled */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment