Commit dc04db2a authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: detect self referencing btree sibling pointers

To catch the obvious graph cycle problem and hence potential endless
looping.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent a44a027a
...@@ -51,6 +51,52 @@ xfs_btree_magic( ...@@ -51,6 +51,52 @@ xfs_btree_magic(
return magic; return magic;
} }
static xfs_failaddr_t
xfs_btree_check_lblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_fsblock_t fsb,
xfs_fsblock_t sibling)
{
if (sibling == NULLFSBLOCK)
return NULL;
if (sibling == fsb)
return __this_address;
if (level >= 0) {
if (!xfs_btree_check_lptr(cur, sibling, level + 1))
return __this_address;
} else {
if (!xfs_verify_fsbno(mp, sibling))
return __this_address;
}
return NULL;
}
static xfs_failaddr_t
xfs_btree_check_sblock_siblings(
struct xfs_mount *mp,
struct xfs_btree_cur *cur,
int level,
xfs_agnumber_t agno,
xfs_agblock_t agbno,
xfs_agblock_t sibling)
{
if (sibling == NULLAGBLOCK)
return NULL;
if (sibling == agbno)
return __this_address;
if (level >= 0) {
if (!xfs_btree_check_sptr(cur, sibling, level + 1))
return __this_address;
} else {
if (!xfs_verify_agbno(mp, agno, sibling))
return __this_address;
}
return NULL;
}
/* /*
* Check a long btree block header. Return the address of the failing check, * Check a long btree block header. Return the address of the failing check,
* or NULL if everything is ok. * or NULL if everything is ok.
...@@ -65,6 +111,8 @@ __xfs_btree_check_lblock( ...@@ -65,6 +111,8 @@ __xfs_btree_check_lblock(
struct xfs_mount *mp = cur->bc_mp; struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum; xfs_btnum_t btnum = cur->bc_btnum;
int crc = xfs_has_crc(mp); int crc = xfs_has_crc(mp);
xfs_failaddr_t fa;
xfs_fsblock_t fsb = NULLFSBLOCK;
if (crc) { if (crc) {
if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)) if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
...@@ -83,16 +131,16 @@ __xfs_btree_check_lblock( ...@@ -83,16 +131,16 @@ __xfs_btree_check_lblock(
if (be16_to_cpu(block->bb_numrecs) > if (be16_to_cpu(block->bb_numrecs) >
cur->bc_ops->get_maxrecs(cur, level)) cur->bc_ops->get_maxrecs(cur, level))
return __this_address; return __this_address;
if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
!xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_leftsib),
level + 1))
return __this_address;
if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
!xfs_btree_check_lptr(cur, be64_to_cpu(block->bb_u.l.bb_rightsib),
level + 1))
return __this_address;
return NULL; if (bp)
fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
be64_to_cpu(block->bb_u.l.bb_leftsib));
if (!fa)
fa = xfs_btree_check_lblock_siblings(mp, cur, level, fsb,
be64_to_cpu(block->bb_u.l.bb_rightsib));
return fa;
} }
/* Check a long btree block header. */ /* Check a long btree block header. */
...@@ -130,6 +178,9 @@ __xfs_btree_check_sblock( ...@@ -130,6 +178,9 @@ __xfs_btree_check_sblock(
struct xfs_mount *mp = cur->bc_mp; struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum; xfs_btnum_t btnum = cur->bc_btnum;
int crc = xfs_has_crc(mp); int crc = xfs_has_crc(mp);
xfs_failaddr_t fa;
xfs_agblock_t agbno = NULLAGBLOCK;
xfs_agnumber_t agno = NULLAGNUMBER;
if (crc) { if (crc) {
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
...@@ -146,16 +197,18 @@ __xfs_btree_check_sblock( ...@@ -146,16 +197,18 @@ __xfs_btree_check_sblock(
if (be16_to_cpu(block->bb_numrecs) > if (be16_to_cpu(block->bb_numrecs) >
cur->bc_ops->get_maxrecs(cur, level)) cur->bc_ops->get_maxrecs(cur, level))
return __this_address; return __this_address;
if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
!xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_leftsib),
level + 1))
return __this_address;
if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
!xfs_btree_check_sptr(cur, be32_to_cpu(block->bb_u.s.bb_rightsib),
level + 1))
return __this_address;
return NULL; if (bp) {
agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
}
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno, agbno,
be32_to_cpu(block->bb_u.s.bb_leftsib));
if (!fa)
fa = xfs_btree_check_sblock_siblings(mp, cur, level, agno,
agbno, be32_to_cpu(block->bb_u.s.bb_rightsib));
return fa;
} }
/* Check a short btree block header. */ /* Check a short btree block header. */
...@@ -4271,6 +4324,21 @@ xfs_btree_visit_block( ...@@ -4271,6 +4324,21 @@ xfs_btree_visit_block(
if (xfs_btree_ptr_is_null(cur, &rptr)) if (xfs_btree_ptr_is_null(cur, &rptr))
return -ENOENT; return -ENOENT;
/*
* We only visit blocks once in this walk, so we have to avoid the
* internal xfs_btree_lookup_get_block() optimisation where it will
* return the same block without checking if the right sibling points
* back to us and creates a cyclic reference in the btree.
*/
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (be64_to_cpu(rptr.l) == XFS_DADDR_TO_FSB(cur->bc_mp,
xfs_buf_daddr(bp)))
return -EFSCORRUPTED;
} else {
if (be32_to_cpu(rptr.s) == xfs_daddr_to_agbno(cur->bc_mp,
xfs_buf_daddr(bp)))
return -EFSCORRUPTED;
}
return xfs_btree_lookup_get_block(cur, level, &rptr, &block); return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
} }
...@@ -4445,20 +4513,21 @@ xfs_btree_lblock_verify( ...@@ -4445,20 +4513,21 @@ xfs_btree_lblock_verify(
{ {
struct xfs_mount *mp = bp->b_mount; struct xfs_mount *mp = bp->b_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
xfs_fsblock_t fsb;
xfs_failaddr_t fa;
/* numrecs verification */ /* numrecs verification */
if (be16_to_cpu(block->bb_numrecs) > max_recs) if (be16_to_cpu(block->bb_numrecs) > max_recs)
return __this_address; return __this_address;
/* sibling pointer verification */ /* sibling pointer verification */
if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && fsb = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
!xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
return __this_address; be64_to_cpu(block->bb_u.l.bb_leftsib));
if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && if (!fa)
!xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))) fa = xfs_btree_check_lblock_siblings(mp, NULL, -1, fsb,
return __this_address; be64_to_cpu(block->bb_u.l.bb_rightsib));
return fa;
return NULL;
} }
/** /**
...@@ -4499,7 +4568,9 @@ xfs_btree_sblock_verify( ...@@ -4499,7 +4568,9 @@ xfs_btree_sblock_verify(
{ {
struct xfs_mount *mp = bp->b_mount; struct xfs_mount *mp = bp->b_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
xfs_agblock_t agno; xfs_agnumber_t agno;
xfs_agblock_t agbno;
xfs_failaddr_t fa;
/* numrecs verification */ /* numrecs verification */
if (be16_to_cpu(block->bb_numrecs) > max_recs) if (be16_to_cpu(block->bb_numrecs) > max_recs)
...@@ -4507,14 +4578,13 @@ xfs_btree_sblock_verify( ...@@ -4507,14 +4578,13 @@ xfs_btree_sblock_verify(
/* sibling pointer verification */ /* sibling pointer verification */
agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp)); agno = xfs_daddr_to_agno(mp, xfs_buf_daddr(bp));
if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) && agbno = xfs_daddr_to_agbno(mp, xfs_buf_daddr(bp));
!xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib))) fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
return __this_address; be32_to_cpu(block->bb_u.s.bb_leftsib));
if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) && if (!fa)
!xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib))) fa = xfs_btree_check_sblock_siblings(mp, NULL, -1, agno, agbno,
return __this_address; be32_to_cpu(block->bb_u.s.bb_rightsib));
return fa;
return NULL;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment