Commit f9e325bf authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: drop XFS_BTREE_CRC_BLOCKS

All existing btree types set XFS_BTREE_CRC_BLOCKS when running against a
V5 filesystem.  All currently proposed btree types are V5 only and use
the richer XFS_BTREE_CRC_BLOCKS format.  Therefore, we can drop this
flag and change the conditional to xfs_has_crc.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 056d22c8
...@@ -526,9 +526,6 @@ xfs_allocbt_init_common( ...@@ -526,9 +526,6 @@ xfs_allocbt_init_common(
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur; return cur;
} }
......
...@@ -554,8 +554,6 @@ xfs_bmbt_init_common( ...@@ -554,8 +554,6 @@ xfs_bmbt_init_common(
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ino.ip = ip; cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0; cur->bc_ino.allocated = 0;
......
...@@ -598,11 +598,11 @@ xfs_btree_dup_cursor( ...@@ -598,11 +598,11 @@ xfs_btree_dup_cursor(
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{ {
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) if (xfs_has_crc(cur->bc_mp))
return XFS_BTREE_LBLOCK_CRC_LEN; return XFS_BTREE_LBLOCK_CRC_LEN;
return XFS_BTREE_LBLOCK_LEN; return XFS_BTREE_LBLOCK_LEN;
} }
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) if (xfs_has_crc(cur->bc_mp))
return XFS_BTREE_SBLOCK_CRC_LEN; return XFS_BTREE_SBLOCK_CRC_LEN;
return XFS_BTREE_SBLOCK_LEN; return XFS_BTREE_SBLOCK_LEN;
} }
...@@ -1576,7 +1576,7 @@ xfs_btree_log_block( ...@@ -1576,7 +1576,7 @@ xfs_btree_log_block(
if (bp) { if (bp) {
int nbits; int nbits;
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { if (xfs_has_crc(cur->bc_mp)) {
/* /*
* We don't log the CRC when updating a btree * We don't log the CRC when updating a btree
* block but instead recreate it during log * block but instead recreate it during log
...@@ -3048,7 +3048,7 @@ xfs_btree_new_iroot( ...@@ -3048,7 +3048,7 @@ xfs_btree_new_iroot(
* In that case have to also ensure the blkno remains correct * In that case have to also ensure the blkno remains correct
*/ */
memcpy(cblock, block, xfs_btree_block_len(cur)); memcpy(cblock, block, xfs_btree_block_len(cur));
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { if (xfs_has_crc(cur->bc_mp)) {
__be64 bno = cpu_to_be64(xfs_buf_daddr(cbp)); __be64 bno = cpu_to_be64(xfs_buf_daddr(cbp));
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
cblock->bb_u.l.bb_blkno = bno; cblock->bb_u.l.bb_blkno = bno;
......
...@@ -308,7 +308,6 @@ xfs_btree_cur_sizeof(unsigned int nlevels) ...@@ -308,7 +308,6 @@ xfs_btree_cur_sizeof(unsigned int nlevels)
#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */ #define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */ #define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */ #define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */ #define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
/* /*
* The root of this btree is a fakeroot structure so that we can stage a btree * The root of this btree is a fakeroot structure so that we can stage a btree
......
...@@ -466,9 +466,6 @@ xfs_inobt_init_common( ...@@ -466,9 +466,6 @@ xfs_inobt_init_common(
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
} }
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
return cur; return cur;
} }
......
...@@ -357,8 +357,6 @@ xfs_refcountbt_init_common( ...@@ -357,8 +357,6 @@ xfs_refcountbt_init_common(
xfs_refcountbt_cur_cache); xfs_refcountbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0; cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0; cur->bc_ag.refc.shape_changes = 0;
......
...@@ -505,7 +505,7 @@ xfs_rmapbt_init_common( ...@@ -505,7 +505,7 @@ xfs_rmapbt_init_common(
/* Overlapping btree; 2 keys per pointer. */ /* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops, cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; cur->bc_flags = XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment