Commit 1eef0125 authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'repair-iunlink-6.10_2024-04-15' of...

Merge tag 'repair-iunlink-6.10_2024-04-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.10-mergeA

xfs: online fsck of iunlink buckets

This series enhances the AGI scrub code to check the unlinked inode
bucket lists for errors, and fixes them if necessary.  Now that iunlink
pointer updates are virtual log items, we can batch updates pretty
efficiently in the logging code.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'repair-iunlink-6.10_2024-04-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: repair AGI unlinked inode bucket lists
  xfs: hoist AGI repair context to a heap object
  xfs: check AGI unlinked inode buckets
parents 0313dd8f ab97f4b1
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "xfs_ialloc.h" #include "xfs_ialloc.h"
#include "xfs_rmap.h" #include "xfs_rmap.h"
#include "xfs_ag.h" #include "xfs_ag.h"
#include "xfs_inode.h"
#include "scrub/scrub.h" #include "scrub/scrub.h"
#include "scrub/common.h" #include "scrub/common.h"
...@@ -865,6 +866,43 @@ xchk_agi_xref( ...@@ -865,6 +866,43 @@ xchk_agi_xref(
/* scrub teardown will take care of sc->sa for us */ /* scrub teardown will take care of sc->sa for us */
} }
/*
* Check the unlinked buckets for links to bad inodes. We hold the AGI, so
* there cannot be any threads updating unlinked list pointers in this AG.
*/
STATIC void
xchk_iunlink(
struct xfs_scrub *sc,
struct xfs_agi *agi)
{
unsigned int i;
struct xfs_inode *ip;
for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
xfs_agino_t agino = be32_to_cpu(agi->agi_unlinked[i]);
while (agino != NULLAGINO) {
if (agino % XFS_AGI_UNLINKED_BUCKETS != i) {
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
return;
}
ip = xfs_iunlink_lookup(sc->sa.pag, agino);
if (!ip) {
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
return;
}
if (!xfs_inode_on_unlinked_list(ip)) {
xchk_block_set_corrupt(sc, sc->sa.agi_bp);
return;
}
agino = ip->i_next_unlinked;
}
}
}
/* Scrub the AGI. */ /* Scrub the AGI. */
int int
xchk_agi( xchk_agi(
...@@ -949,6 +987,8 @@ xchk_agi( ...@@ -949,6 +987,8 @@ xchk_agi(
if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount)) if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
xchk_block_set_corrupt(sc, sc->sa.agi_bp); xchk_block_set_corrupt(sc, sc->sa.agi_bp);
xchk_iunlink(sc, agi);
xchk_agi_xref(sc); xchk_agi_xref(sc);
out: out:
return error; return error;
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2018-2024 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#ifndef __XFS_SCRUB_AGINO_BITMAP_H__
#define __XFS_SCRUB_AGINO_BITMAP_H__
/* Bitmaps, but for type-checked for xfs_agino_t */
struct xagino_bitmap {
struct xbitmap32 aginobitmap;
};
static inline void xagino_bitmap_init(struct xagino_bitmap *bitmap)
{
xbitmap32_init(&bitmap->aginobitmap);
}
static inline void xagino_bitmap_destroy(struct xagino_bitmap *bitmap)
{
xbitmap32_destroy(&bitmap->aginobitmap);
}
static inline int xagino_bitmap_clear(struct xagino_bitmap *bitmap,
xfs_agino_t agino, unsigned int len)
{
return xbitmap32_clear(&bitmap->aginobitmap, agino, len);
}
static inline int xagino_bitmap_set(struct xagino_bitmap *bitmap,
xfs_agino_t agino, unsigned int len)
{
return xbitmap32_set(&bitmap->aginobitmap, agino, len);
}
static inline bool xagino_bitmap_test(struct xagino_bitmap *bitmap,
xfs_agino_t agino, unsigned int *len)
{
return xbitmap32_test(&bitmap->aginobitmap, agino, len);
}
static inline int xagino_bitmap_walk(struct xagino_bitmap *bitmap,
xbitmap32_walk_fn fn, void *priv)
{
return xbitmap32_walk(&bitmap->aginobitmap, fn, priv);
}
#endif /* __XFS_SCRUB_AGINO_BITMAP_H__ */
...@@ -2757,6 +2757,261 @@ DEFINE_EVENT(xrep_symlink_class, name, \ ...@@ -2757,6 +2757,261 @@ DEFINE_EVENT(xrep_symlink_class, name, \
DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_rebuild); DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_rebuild);
DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_reset_fork); DEFINE_XREP_SYMLINK_EVENT(xrep_symlink_reset_fork);
TRACE_EVENT(xrep_iunlink_visit,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t bucket_agino, struct xfs_inode *ip),
TP_ARGS(pag, bucket, bucket_agino, ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(unsigned int, bucket)
__field(xfs_agino_t, bucket_agino)
__field(xfs_agino_t, prev_agino)
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->agino = XFS_INO_TO_AGINO(pag->pag_mount, ip->i_ino);
__entry->bucket = bucket;
__entry->bucket_agino = bucket_agino;
__entry->prev_agino = ip->i_prev_unlinked;
__entry->next_agino = ip->i_next_unlinked;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x bucket_agino 0x%x prev_agino 0x%x next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->bucket,
__entry->agino,
__entry->bucket_agino,
__entry->prev_agino,
__entry->next_agino)
);
TRACE_EVENT(xrep_iunlink_reload_next,
TP_PROTO(struct xfs_inode *ip, xfs_agino_t prev_agino),
TP_ARGS(ip, prev_agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(xfs_agino_t, old_prev_agino)
__field(xfs_agino_t, prev_agino)
__field(xfs_agino_t, next_agino)
__field(unsigned int, nlink)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
__entry->old_prev_agino = ip->i_prev_unlinked;
__entry->prev_agino = prev_agino;
__entry->next_agino = ip->i_next_unlinked;
__entry->nlink = VFS_I(ip)->i_nlink;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x nlink %u old_prev_agino %u prev_agino 0x%x next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agino % XFS_AGI_UNLINKED_BUCKETS,
__entry->agino,
__entry->nlink,
__entry->old_prev_agino,
__entry->prev_agino,
__entry->next_agino)
);
TRACE_EVENT(xrep_iunlink_reload_ondisk,
TP_PROTO(struct xfs_inode *ip),
TP_ARGS(ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(unsigned int, nlink)
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
__entry->nlink = VFS_I(ip)->i_nlink;
__entry->next_agino = ip->i_next_unlinked;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x nlink %u next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agino % XFS_AGI_UNLINKED_BUCKETS,
__entry->agino,
__entry->nlink,
__entry->next_agino)
);
TRACE_EVENT(xrep_iunlink_walk_ondisk_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t prev_agino, xfs_agino_t next_agino),
TP_ARGS(pag, bucket, prev_agino, next_agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, bucket)
__field(xfs_agino_t, prev_agino)
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bucket = bucket;
__entry->prev_agino = prev_agino;
__entry->next_agino = next_agino;
),
TP_printk("dev %d:%d agno 0x%x bucket %u prev_agino 0x%x next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->bucket,
__entry->prev_agino,
__entry->next_agino)
);
DECLARE_EVENT_CLASS(xrep_iunlink_resolve_class,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t prev_agino, xfs_agino_t next_agino),
TP_ARGS(pag, bucket, prev_agino, next_agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, bucket)
__field(xfs_agino_t, prev_agino)
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bucket = bucket;
__entry->prev_agino = prev_agino;
__entry->next_agino = next_agino;
),
TP_printk("dev %d:%d agno 0x%x bucket %u prev_agino 0x%x next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->bucket,
__entry->prev_agino,
__entry->next_agino)
);
#define DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(name) \
DEFINE_EVENT(xrep_iunlink_resolve_class, name, \
TP_PROTO(struct xfs_perag *pag, unsigned int bucket, \
xfs_agino_t prev_agino, xfs_agino_t next_agino), \
TP_ARGS(pag, bucket, prev_agino, next_agino))
DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(xrep_iunlink_resolve_uncached);
DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(xrep_iunlink_resolve_wronglist);
DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(xrep_iunlink_resolve_nolist);
DEFINE_REPAIR_IUNLINK_RESOLVE_EVENT(xrep_iunlink_resolve_ok);
TRACE_EVENT(xrep_iunlink_relink_next,
TP_PROTO(struct xfs_inode *ip, xfs_agino_t next_agino),
TP_ARGS(ip, next_agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(xfs_agino_t, next_agino)
__field(xfs_agino_t, new_next_agino)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
__entry->next_agino = ip->i_next_unlinked;
__entry->new_next_agino = next_agino;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x next_agino 0x%x -> 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agino % XFS_AGI_UNLINKED_BUCKETS,
__entry->agino,
__entry->next_agino,
__entry->new_next_agino)
);
TRACE_EVENT(xrep_iunlink_relink_prev,
TP_PROTO(struct xfs_inode *ip, xfs_agino_t prev_agino),
TP_ARGS(ip, prev_agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agino_t, agino)
__field(xfs_agino_t, prev_agino)
__field(xfs_agino_t, new_prev_agino)
),
TP_fast_assign(
__entry->dev = ip->i_mount->m_super->s_dev;
__entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
__entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
__entry->prev_agino = ip->i_prev_unlinked;
__entry->new_prev_agino = prev_agino;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x prev_agino 0x%x -> 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agino % XFS_AGI_UNLINKED_BUCKETS,
__entry->agino,
__entry->prev_agino,
__entry->new_prev_agino)
);
TRACE_EVENT(xrep_iunlink_add_to_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t agino, xfs_agino_t curr_head),
TP_ARGS(pag, bucket, agino, curr_head),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, bucket)
__field(xfs_agino_t, agino)
__field(xfs_agino_t, next_agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bucket = bucket;
__entry->agino = agino;
__entry->next_agino = curr_head;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x next_agino 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->bucket,
__entry->agino,
__entry->next_agino)
);
TRACE_EVENT(xrep_iunlink_commit_bucket,
TP_PROTO(struct xfs_perag *pag, unsigned int bucket,
xfs_agino_t old_agino, xfs_agino_t agino),
TP_ARGS(pag, bucket, old_agino, agino),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(unsigned int, bucket)
__field(xfs_agino_t, old_agino)
__field(xfs_agino_t, agino)
),
TP_fast_assign(
__entry->dev = pag->pag_mount->m_super->s_dev;
__entry->agno = pag->pag_agno;
__entry->bucket = bucket;
__entry->old_agino = old_agino;
__entry->agino = agino;
),
TP_printk("dev %d:%d agno 0x%x bucket %u agino 0x%x -> 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->bucket,
__entry->old_agino,
__entry->agino)
);
#endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */ #endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */
#endif /* _TRACE_XFS_SCRUB_TRACE_H */ #endif /* _TRACE_XFS_SCRUB_TRACE_H */
......
...@@ -1985,7 +1985,7 @@ xfs_inactive( ...@@ -1985,7 +1985,7 @@ xfs_inactive(
* only unlinked, referenced inodes can be on the unlinked inode list. If we * only unlinked, referenced inodes can be on the unlinked inode list. If we
* don't find the inode in cache, then let the caller handle the situation. * don't find the inode in cache, then let the caller handle the situation.
*/ */
static struct xfs_inode * struct xfs_inode *
xfs_iunlink_lookup( xfs_iunlink_lookup(
struct xfs_perag *pag, struct xfs_perag *pag,
xfs_agino_t agino) xfs_agino_t agino)
......
...@@ -619,6 +619,7 @@ bool xfs_inode_needs_inactive(struct xfs_inode *ip); ...@@ -619,6 +619,7 @@ bool xfs_inode_needs_inactive(struct xfs_inode *ip);
int xfs_iunlink(struct xfs_trans *tp, struct xfs_inode *ip); int xfs_iunlink(struct xfs_trans *tp, struct xfs_inode *ip);
int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
struct xfs_inode *ip); struct xfs_inode *ip);
struct xfs_inode *xfs_iunlink_lookup(struct xfs_perag *pag, xfs_agino_t agino);
void xfs_end_io(struct work_struct *work); void xfs_end_io(struct work_struct *work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment