Commit a1954242 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: hoist inode cluster checks out of loop

Hoist the inode cluster checks out of the inobt record check loop into
a separate function in preparation for refactoring of that loop.  No
functional changes here; that's in the next patch.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 22234c62
...@@ -188,19 +188,19 @@ xchk_iallocbt_check_cluster_freemask( ...@@ -188,19 +188,19 @@ xchk_iallocbt_check_cluster_freemask(
return 0; return 0;
} }
/* Make sure the free mask is consistent with what the inodes think. */ /* Check an inode cluster. */
STATIC int STATIC int
xchk_iallocbt_check_freemask( xchk_iallocbt_check_cluster(
struct xchk_btree *bs, struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec) struct xfs_inobt_rec_incore *irec,
xfs_agino_t agino)
{ {
struct xfs_imap imap; struct xfs_imap imap;
struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_dinode *dip; struct xfs_dinode *dip;
struct xfs_buf *bp; struct xfs_buf *bp;
xfs_ino_t fsino; xfs_ino_t fsino;
xfs_agino_t nr_inodes; unsigned int nr_inodes;
xfs_agino_t agino;
xfs_agino_t chunkino; xfs_agino_t chunkino;
xfs_agino_t clusterino; xfs_agino_t clusterino;
xfs_agblock_t agbno; xfs_agblock_t agbno;
...@@ -212,60 +212,71 @@ xchk_iallocbt_check_freemask( ...@@ -212,60 +212,71 @@ xchk_iallocbt_check_freemask(
nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
mp->m_inodes_per_cluster); mp->m_inodes_per_cluster);
for (agino = irec->ir_startino; fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
agino < irec->ir_startino + XFS_INODES_PER_CHUNK; chunkino = agino - irec->ir_startino;
agino += mp->m_inodes_per_cluster) { agbno = XFS_AGINO_TO_AGBNO(mp, agino);
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
chunkino = agino - irec->ir_startino;
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
/* Compute the holemask mask for this cluster. */
for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
clusterino += XFS_INODES_PER_HOLEMASK_BIT)
holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
XFS_INODES_PER_HOLEMASK_BIT);
/* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
continue;
}
/* If any part of this is a hole, skip it. */ /* Compute the holemask mask for this cluster. */
if (ir_holemask) { for (clusterino = 0, holemask = 0; clusterino < nr_inodes;
xchk_xref_is_not_owned_by(bs->sc, agbno, clusterino += XFS_INODES_PER_HOLEMASK_BIT)
mp->m_blocks_per_cluster, holemask |= XFS_INOBT_MASK((chunkino + clusterino) /
&XFS_RMAP_OINFO_INODES); XFS_INODES_PER_HOLEMASK_BIT);
continue;
} /* The whole cluster must be a hole or not a hole. */
ir_holemask = (irec->ir_holemask & holemask);
if (ir_holemask != holemask && ir_holemask != 0) {
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
return 0;
}
xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster, /* If any part of this is a hole, skip it. */
if (ir_holemask) {
xchk_xref_is_not_owned_by(bs->sc, agbno,
mp->m_blocks_per_cluster,
&XFS_RMAP_OINFO_INODES); &XFS_RMAP_OINFO_INODES);
return 0;
}
/* Grab the inode cluster buffer. */ xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, &XFS_RMAP_OINFO_INODES);
agbno);
imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster); /* Grab the inode cluster buffer. */
imap.im_boffset = 0; imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, agbno);
imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, imap.im_boffset = 0;
&dip, &bp, 0, 0);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0,
&error))
continue;
/* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
error = xchk_iallocbt_check_cluster_freemask(bs,
fsino, chunkino, clusterino, irec, bp);
if (error) {
xfs_trans_brelse(bs->cur->bc_tp, bp);
return error;
}
}
xfs_trans_brelse(bs->cur->bc_tp, bp); error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &bp, 0, 0);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
return 0;
/* Which inodes are free? */
for (clusterino = 0; clusterino < nr_inodes; clusterino++) {
error = xchk_iallocbt_check_cluster_freemask(bs, fsino,
chunkino, clusterino, irec, bp);
if (error)
break;
}
xfs_trans_brelse(bs->cur->bc_tp, bp);
return error;
}
/* Make sure the free mask is consistent with what the inodes think. */
STATIC int
xchk_iallocbt_check_freemask(
struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
xfs_agino_t agino;
int error = 0;
for (agino = irec->ir_startino;
agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
agino += mp->m_inodes_per_cluster) {
error = xchk_iallocbt_check_cluster(bs, irec, agino);
if (error)
break;
} }
return error; return error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment