Commit 07b7f2e3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: move the btree stats offset into struct btree_ops

The statistics offset is completely static, move it into the btree_ops
structure instead of the cursor.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 90cfae81
...@@ -459,6 +459,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = { ...@@ -459,6 +459,7 @@ const struct xfs_btree_ops xfs_bnobt_ops = {
.key_len = sizeof(xfs_alloc_key_t), .key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF, .lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.dup_cursor = xfs_allocbt_dup_cursor, .dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root, .set_root = xfs_allocbt_set_root,
...@@ -486,6 +487,7 @@ const struct xfs_btree_ops xfs_cntbt_ops = { ...@@ -486,6 +487,7 @@ const struct xfs_btree_ops xfs_cntbt_ops = {
.key_len = sizeof(xfs_alloc_key_t), .key_len = sizeof(xfs_alloc_key_t),
.lru_refs = XFS_ALLOC_BTREE_REF, .lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.dup_cursor = xfs_allocbt_dup_cursor, .dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root, .set_root = xfs_allocbt_set_root,
...@@ -514,22 +516,17 @@ xfs_allocbt_init_common( ...@@ -514,22 +516,17 @@ xfs_allocbt_init_common(
struct xfs_perag *pag, struct xfs_perag *pag,
xfs_btnum_t btnum) xfs_btnum_t btnum)
{ {
const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
if (btnum == XFS_BTNUM_CNT) { if (btnum == XFS_BTNUM_CNT)
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_cntbt_ops, ops = &xfs_cntbt_ops;
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
} else {
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_bnobt_ops,
mp->m_alloc_maxlevels, xfs_allocbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
return cur; return cur;
} }
......
...@@ -531,6 +531,7 @@ const struct xfs_btree_ops xfs_bmbt_ops = { ...@@ -531,6 +531,7 @@ const struct xfs_btree_ops xfs_bmbt_ops = {
.key_len = sizeof(xfs_bmbt_key_t), .key_len = sizeof(xfs_bmbt_key_t),
.lru_refs = XFS_BMAP_BTREE_REF, .lru_refs = XFS_BMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2),
.dup_cursor = xfs_bmbt_dup_cursor, .dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor, .update_cursor = xfs_bmbt_update_cursor,
...@@ -564,7 +565,6 @@ xfs_bmbt_init_common( ...@@ -564,7 +565,6 @@ xfs_bmbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops, cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache); mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ino.ip = ip; cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0; cur->bc_ino.allocated = 0;
......
...@@ -87,9 +87,11 @@ uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops); ...@@ -87,9 +87,11 @@ uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
* Generic stats interface * Generic stats interface
*/ */
#define XFS_BTREE_STATS_INC(cur, stat) \ #define XFS_BTREE_STATS_INC(cur, stat) \
XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat) XFS_STATS_INC_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \ #define XFS_BTREE_STATS_ADD(cur, stat, val) \
XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val) XFS_STATS_ADD_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat, val)
enum xbtree_key_contig { enum xbtree_key_contig {
XBTREE_KEY_GAP = 0, XBTREE_KEY_GAP = 0,
...@@ -123,6 +125,9 @@ struct xfs_btree_ops { ...@@ -123,6 +125,9 @@ struct xfs_btree_ops {
/* LRU refcount to set on each btree buffer created */ /* LRU refcount to set on each btree buffer created */
unsigned int lru_refs; unsigned int lru_refs;
/* offset of btree stats array */
unsigned int statoff;
/* cursor operations */ /* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
void (*update_cursor)(struct xfs_btree_cur *src, void (*update_cursor)(struct xfs_btree_cur *src,
...@@ -280,7 +285,6 @@ struct xfs_btree_cur ...@@ -280,7 +285,6 @@ struct xfs_btree_cur
union xfs_btree_irec bc_rec; /* current insert/search record value */ union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */ uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */ uint8_t bc_maxlevels; /* maximum levels for this btree type */
int bc_statoff; /* offset of btree stats array */
/* /*
* Short btree pointers need an agno to be able to turn the pointers * Short btree pointers need an agno to be able to turn the pointers
......
...@@ -403,6 +403,7 @@ const struct xfs_btree_ops xfs_inobt_ops = { ...@@ -403,6 +403,7 @@ const struct xfs_btree_ops xfs_inobt_ops = {
.key_len = sizeof(xfs_inobt_key_t), .key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF, .lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.dup_cursor = xfs_inobt_dup_cursor, .dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root, .set_root = xfs_inobt_set_root,
...@@ -427,6 +428,7 @@ const struct xfs_btree_ops xfs_finobt_ops = { ...@@ -427,6 +428,7 @@ const struct xfs_btree_ops xfs_finobt_ops = {
.key_len = sizeof(xfs_inobt_key_t), .key_len = sizeof(xfs_inobt_key_t),
.lru_refs = XFS_INO_BTREE_REF, .lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.dup_cursor = xfs_inobt_dup_cursor, .dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root, .set_root = xfs_finobt_set_root,
...@@ -456,20 +458,16 @@ xfs_inobt_init_common( ...@@ -456,20 +458,16 @@ xfs_inobt_init_common(
xfs_btnum_t btnum) /* ialloc or free ino btree */ xfs_btnum_t btnum) /* ialloc or free ino btree */
{ {
struct xfs_mount *mp = pag->pag_mount; struct xfs_mount *mp = pag->pag_mount;
const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur; struct xfs_btree_cur *cur;
if (btnum == XFS_BTNUM_INO) { ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_inobt_ops,
M_IGEO(mp)->inobt_maxlevels, if (btnum == XFS_BTNUM_FINO)
xfs_inobt_cur_cache); ops = &xfs_finobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
} else {
cur = xfs_btree_alloc_cursor(mp, tp, btnum, &xfs_finobt_ops,
M_IGEO(mp)->inobt_maxlevels,
xfs_inobt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
}
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
return cur; return cur;
} }
......
...@@ -322,6 +322,7 @@ const struct xfs_btree_ops xfs_refcountbt_ops = { ...@@ -322,6 +322,7 @@ const struct xfs_btree_ops xfs_refcountbt_ops = {
.key_len = sizeof(struct xfs_refcount_key), .key_len = sizeof(struct xfs_refcount_key),
.lru_refs = XFS_REFC_BTREE_REF, .lru_refs = XFS_REFC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.dup_cursor = xfs_refcountbt_dup_cursor, .dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root, .set_root = xfs_refcountbt_set_root,
...@@ -357,8 +358,6 @@ xfs_refcountbt_init_common( ...@@ -357,8 +358,6 @@ xfs_refcountbt_init_common(
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC, cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
&xfs_refcountbt_ops, mp->m_refc_maxlevels, &xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache); xfs_refcountbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0; cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0; cur->bc_ag.refc.shape_changes = 0;
......
...@@ -479,6 +479,7 @@ const struct xfs_btree_ops xfs_rmapbt_ops = { ...@@ -479,6 +479,7 @@ const struct xfs_btree_ops xfs_rmapbt_ops = {
.key_len = 2 * sizeof(struct xfs_rmap_key), .key_len = 2 * sizeof(struct xfs_rmap_key),
.lru_refs = XFS_RMAP_BTREE_REF, .lru_refs = XFS_RMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.dup_cursor = xfs_rmapbt_dup_cursor, .dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root, .set_root = xfs_rmapbt_set_root,
...@@ -509,8 +510,6 @@ xfs_rmapbt_init_common( ...@@ -509,8 +510,6 @@ xfs_rmapbt_init_common(
/* Overlapping btree; 2 keys per pointer. */ /* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops, cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache); mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ag.pag = xfs_perag_hold(pag); cur->bc_ag.pag = xfs_perag_hold(pag);
return cur; return cur;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment