Commit 681cb87b authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'btree-geometry-in-ops-6.9_2024-02-23' of...

Merge tag 'btree-geometry-in-ops-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.9-mergeC

xfs: move btree geometry to ops struct

This patchset prepares the generic btree code to allow for the creation
of new btree types outside of libxfs.  The end goal here is for online
fsck to be able to create its own in-memory btrees that will be used to
improve the performance (and reduce the memory requirements of) the
refcount btree.

To enable this, I decided that the btree ops structure is the ideal
place to encode all of the geometry information about a btree. The btree
ops struture already contains the buffer ops (and hence the btree block
magic numbers) as well as the key and record sizes, so it doesn't seem
all that farfetched to encode the XFS_BTREE_ flags that determine the
geometry (ROOT_IN_INODE, LONG_PTRS, etc).

The rest of the patchset cleans up the btree functions that initialize
btree blocks and btree buffers.  The bulk of this work is to replace
btree geometry related function call arguments with a single pointer to
the ops structure, and then clean up everything else around that.  As a
side effect, we rename the functions.

Later, Christoph Hellwig and I merged together a bunch more cleanups
that he wanted to do for a while.  All the btree geometry information is
now in the btree ops structure, we've created an explicit btree type
(ag, inode, mem) and moved the per-btree type information to a separate
union.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'btree-geometry-in-ops-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: create predicate to determine if cursor is at inode root level
  xfs: split the per-btree union in struct xfs_btree_cur
  xfs: split out a btree type from the btree ops geometry flags
  xfs: store the btree pointer length in struct xfs_btree_ops
  xfs: factor out a btree block owner check
  xfs: factor out a xfs_btree_owner helper
  xfs: move the btree stats offset into struct btree_ops
  xfs: move lru refs to the btree ops structure
  xfs: set btree block buffer ops in _init_buf
  xfs: remove the unnecessary daddr paramter to _init_block
  xfs: btree convert xfs_btree_init_block to xfs_btree_init_buf calls
  xfs: rename btree block/buffer init functions
  xfs: initialize btree blocks using btree_ops structure
  xfs: extern some btree ops structures
  xfs: turn the allocbt cursor active field into a btree flag
  xfs: consolidate the xfs_alloc_lookup_* helpers
  xfs: remove bc_ino.flags
  xfs: encode the btree geometry flags in the btree ops structure
  xfs: fix imprecise logic in xchk_btree_check_block_owner
  xfs: drop XFS_BTREE_CRC_BLOCKS
  xfs: set the btree cursor bc_ops in xfs_btree_alloc_cursor
  xfs: consolidate btree block allocation tracepoints
  xfs: consolidate btree block freeing tracepoints
parents 5d1bd19d f73def90
......@@ -492,7 +492,7 @@ xfs_btroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
}
/* Finish initializing a free space btree. */
......@@ -550,7 +550,7 @@ xfs_freesp_init_recs(
}
/*
* Alloc btree root block init functions
* bnobt/cntbt btree root block init functions
*/
static void
xfs_bnoroot_init(
......@@ -558,17 +558,7 @@ xfs_bnoroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
static void
xfs_cntroot_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
......@@ -584,7 +574,7 @@ xfs_rmaproot_init(
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_rmap_rec *rrec;
xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno);
/*
* mark the AG header regions as static metadata The BNO
......@@ -797,7 +787,7 @@ struct xfs_aghdr_grow_data {
size_t numblks;
const struct xfs_buf_ops *ops;
aghdr_init_work_f work;
xfs_btnum_t type;
const struct xfs_btree_ops *bc_ops;
bool need_init;
};
......@@ -851,13 +841,15 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_bnobt_buf_ops,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_bnobt_ops,
.need_init = true
},
{ /* CNT root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_cntbt_buf_ops,
.work = &xfs_cntroot_init,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_cntbt_ops,
.need_init = true
},
{ /* INO root block */
......@@ -865,7 +857,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_inobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_INO,
.bc_ops = &xfs_inobt_ops,
.need_init = true
},
{ /* FINO root block */
......@@ -873,7 +865,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_finobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_FINO,
.bc_ops = &xfs_finobt_ops,
.need_init = xfs_has_finobt(mp)
},
{ /* RMAP root block */
......@@ -881,6 +873,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_rmapbt_buf_ops,
.work = &xfs_rmaproot_init,
.bc_ops = &xfs_rmapbt_ops,
.need_init = xfs_has_rmapbt(mp)
},
{ /* REFC root block */
......@@ -888,7 +881,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_refcountbt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_REFC,
.bc_ops = &xfs_refcountbt_ops,
.need_init = xfs_has_reflink(mp)
},
{ /* NULL terminating block */
......@@ -906,7 +899,7 @@ xfs_ag_init_headers(
id->daddr = dp->daddr;
id->numblks = dp->numblks;
id->type = dp->type;
id->bc_ops = dp->bc_ops;
error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
if (error)
break;
......
......@@ -331,7 +331,7 @@ struct aghdr_init_data {
/* per header data */
xfs_daddr_t daddr; /* header location */
size_t numblks; /* size of header */
xfs_btnum_t type; /* type of btree root block */
const struct xfs_btree_ops *bc_ops; /* btree ops */
};
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
......
......@@ -151,23 +151,38 @@ xfs_alloc_ag_max_usable(
return mp->m_sb.sb_agblocks - blocks;
}
static int
xfs_alloc_lookup(
struct xfs_btree_cur *cur,
xfs_lookup_t dir,
xfs_agblock_t bno,
xfs_extlen_t len,
int *stat)
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, dir, stat);
if (*stat == 1)
cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
else
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
return error;
}
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
STATIC int /* error */
static inline int /* error */
xfs_alloc_lookup_eq(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
}
/*
......@@ -181,13 +196,7 @@ xfs_alloc_lookup_ge(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
}
/*
......@@ -201,19 +210,14 @@ xfs_alloc_lookup_le(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
}
static inline bool
xfs_alloc_cur_active(
struct xfs_btree_cur *cur)
{
return cur && cur->bc_ag.abt.active;
return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
}
/*
......@@ -991,7 +995,7 @@ xfs_alloc_cur_check(
deactivate = true;
out:
if (deactivate)
cur->bc_ag.abt.active = false;
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
*new);
return 0;
......@@ -1366,7 +1370,7 @@ xfs_alloc_walk_iter(
if (error)
return error;
if (i == 0)
cur->bc_ag.abt.active = false;
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
if (count > 0)
count--;
......@@ -1480,7 +1484,7 @@ xfs_alloc_ag_vextent_locality(
if (error)
return error;
if (i) {
acur->cnt->bc_ag.abt.active = true;
acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
fbcur = acur->cnt;
fbinc = false;
}
......
......@@ -454,9 +454,15 @@ xfs_allocbt_keys_contiguous(
be32_to_cpu(key2->alloc.ar_startblock));
}
static const struct xfs_btree_ops xfs_bnobt_ops = {
const struct xfs_btree_ops xfs_bnobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
......@@ -477,9 +483,16 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
.keys_contiguous = xfs_allocbt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_cntbt_ops = {
const struct xfs_btree_ops xfs_cntbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_LASTREC_UPDATE,
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
......@@ -508,28 +521,17 @@ xfs_allocbt_init_common(
struct xfs_perag *pag,
xfs_btnum_t btnum)
{
const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur->bc_ag.abt.active = false;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_ops = &xfs_cntbt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
cur->bc_ops = &xfs_bnobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
if (btnum == XFS_BTNUM_CNT)
ops = &xfs_cntbt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur;
}
......@@ -595,7 +597,6 @@ xfs_allocbt_commit_staged_btree(
if (cur->bc_btnum == XFS_BTNUM_BNO) {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
} else {
cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
}
}
......
......@@ -644,14 +644,13 @@ xfs_bmap_extents_to_btree(
* Fill in the root.
*/
block = ifp->if_broot;
xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, block, NULL, 1, 1);
/*
* Need a cursor. Can't allocate until bb_level is filled in.
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
if (wasdel)
cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
/*
* Convert to a btree with two levels, one record in root.
*/
......@@ -677,7 +676,7 @@ xfs_bmap_extents_to_btree(
goto out_root_realloc;
}
cur->bc_ino.allocated++;
cur->bc_bmap.allocated++;
ip->i_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
......@@ -689,11 +688,8 @@ xfs_bmap_extents_to_btree(
/*
* Fill in the child block.
*/
abp->b_ops = &xfs_bmbt_buf_ops;
ablock = XFS_BUF_TO_BLOCK(abp);
xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
for_each_xfs_iext(ifp, &icur, &rec) {
if (isnullstartblock(rec.br_startblock))
......@@ -898,7 +894,7 @@ xfs_bmap_add_attrfork_btree(
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return -ENOSPC;
}
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
}
return 0;
......@@ -926,7 +922,7 @@ xfs_bmap_add_attrfork_extents(
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
XFS_DATA_FORK);
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
......@@ -1449,8 +1445,7 @@ xfs_bmap_add_extent_delay_real(
ASSERT(whichfork != XFS_ATTR_FORK);
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
(bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
......@@ -1751,7 +1746,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_ino.allocated : 0));
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
PREV.br_startoff = new_endoff;
PREV.br_blockcount = temp;
......@@ -1841,7 +1836,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_ino.allocated : 0));
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
PREV.br_startblock = nullstartblock(da_new);
PREV.br_blockcount = temp;
......@@ -1964,8 +1959,8 @@ xfs_bmap_add_extent_delay_real(
xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
if (bma->cur) {
da_new += bma->cur->bc_ino.allocated;
bma->cur->bc_ino.allocated = 0;
da_new += bma->cur->bc_bmap.allocated;
bma->cur->bc_bmap.allocated = 0;
}
/* adjust for changes in reserved delayed indirect blocks */
......@@ -2530,7 +2525,7 @@ xfs_bmap_add_extent_unwritten_real(
/* clear out the allocated field, done with it now in any case. */
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
*curp = cur;
}
......@@ -2709,7 +2704,7 @@ xfs_bmap_add_extent_hole_real(
struct xfs_bmbt_irec old;
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
......@@ -2918,7 +2913,7 @@ xfs_bmap_add_extent_hole_real(
/* clear out the allocated field, done with it now in any case. */
if (cur)
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_bmap_check_leaf_extents(cur, ip, whichfork);
done:
......@@ -4229,9 +4224,8 @@ xfs_bmapi_allocate(
*/
bma->nallocs++;
if (bma->cur)
bma->cur->bc_ino.flags =
bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
if (bma->cur && bma->wasdel)
bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
bma->got.br_startoff = bma->offset;
bma->got.br_startblock = bma->blkno;
......@@ -4766,10 +4760,8 @@ xfs_bmapi_remap(
ip->i_nblocks += len;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
got.br_startoff = bno;
got.br_startblock = startblock;
......@@ -5400,7 +5392,6 @@ __xfs_bunmapi(
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
} else
cur = NULL;
......@@ -5638,7 +5629,7 @@ __xfs_bunmapi(
xfs_trans_log_inode(tp, ip, logflags);
if (cur) {
if (!error)
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
......@@ -5861,10 +5852,8 @@ xfs_bmap_collapse_extents(
if (error)
return error;
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
*done = true;
......@@ -5978,10 +5967,8 @@ xfs_bmap_insert_extents(
if (error)
return error;
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
if (*next_fsb == NULLFSBLOCK) {
xfs_iext_last(ifp, &icur);
......@@ -6098,7 +6085,6 @@ xfs_bmap_split_extent(
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
......@@ -6159,7 +6145,7 @@ xfs_bmap_split_extent(
del_cursor:
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
......
......@@ -26,6 +26,22 @@
static struct kmem_cache *xfs_bmbt_cur_cache;
void
xfs_bmbt_init_block(
struct xfs_inode *ip,
struct xfs_btree_block *buf,
struct xfs_buf *bp,
__u16 level,
__u16 numrecs)
{
if (bp)
xfs_btree_init_buf(ip->i_mount, bp, &xfs_bmbt_ops, level,
numrecs, ip->i_ino);
else
xfs_btree_init_block(ip->i_mount, buf, &xfs_bmbt_ops, level,
numrecs, ip->i_ino);
}
/*
* Convert on-disk form of btree root to in-memory form.
*/
......@@ -44,9 +60,7 @@ xfs_bmdr_to_bmbt(
xfs_bmbt_key_t *tkp;
__be64 *tpp;
xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, rblock, NULL, 0, 0);
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
......@@ -171,13 +185,8 @@ xfs_bmbt_dup_cursor(
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_ino.ip, cur->bc_ino.whichfork);
/*
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
new->bc_ino.flags = cur->bc_ino.flags;
new->bc_flags |= (cur->bc_flags &
(XFS_BTREE_BMBT_INVALID_OWNER | XFS_BTREE_BMBT_WASDEL));
return new;
}
......@@ -189,10 +198,10 @@ xfs_bmbt_update_cursor(
ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) ||
(dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
dst->bc_ino.allocated += src->bc_ino.allocated;
dst->bc_bmap.allocated += src->bc_bmap.allocated;
dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno;
src->bc_ino.allocated = 0;
src->bc_bmap.allocated = 0;
}
STATIC int
......@@ -211,7 +220,7 @@ xfs_bmbt_alloc_block(
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
cur->bc_ino.whichfork);
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
args.wasdel = cur->bc_flags & XFS_BTREE_BMBT_WASDEL;
if (!args.wasdel && args.tp->t_blk_res == 0)
return -ENOSPC;
......@@ -247,7 +256,7 @@ xfs_bmbt_alloc_block(
}
ASSERT(args.len == 1);
cur->bc_ino.allocated++;
cur->bc_bmap.allocated++;
cur->bc_ino.ip->i_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
......@@ -515,9 +524,15 @@ xfs_bmbt_keys_contiguous(
be64_to_cpu(key2->bmbt.br_startoff));
}
static const struct xfs_btree_ops xfs_bmbt_ops = {
const struct xfs_btree_ops xfs_bmbt_ops = {
.type = XFS_BTREE_TYPE_INODE,
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
.ptr_len = XFS_BTREE_LONG_PTR_LEN,
.lru_refs = XFS_BMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2),
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
......@@ -549,19 +564,11 @@ xfs_bmbt_init_common(
ASSERT(whichfork != XFS_COW_FORK);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ops = &xfs_bmbt_ops;
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0;
cur->bc_bmap.allocated = 0;
return cur;
}
......@@ -751,7 +758,7 @@ xfs_bmbt_change_owner(
ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
cur->bc_flags |= XFS_BTREE_BMBT_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
xfs_btree_del_cursor(cur, error);
......
......@@ -120,4 +120,7 @@ unsigned int xfs_bmbt_maxlevels_ondisk(void);
int __init xfs_bmbt_init_cur_cache(void);
void xfs_bmbt_destroy_cur_cache(void);
void xfs_bmbt_init_block(struct xfs_inode *ip, struct xfs_btree_block *buf,
struct xfs_buf *bp, __u16 level, __u16 numrecs);
#endif /* __XFS_BMAP_BTREE_H__ */
This diff is collapsed.
......@@ -63,7 +63,8 @@ union xfs_btree_rec {
#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi)
uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
struct xfs_btree_ops;
uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
/*
* For logging record fields.
......@@ -86,9 +87,11 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
* Generic stats interface
*/
#define XFS_BTREE_STATS_INC(cur, stat) \
XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
XFS_STATS_INC_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
XFS_STATS_ADD_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat, val)
enum xbtree_key_contig {
XBTREE_KEY_GAP = 0,
......@@ -111,10 +114,31 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
return XBTREE_KEY_OVERLAP;
}
#define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64))
#define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32))
enum xfs_btree_type {
XFS_BTREE_TYPE_AG,
XFS_BTREE_TYPE_INODE,
};
struct xfs_btree_ops {
/* size of the key and record structures */
size_t key_len;
size_t rec_len;
/* Type of btree - AG-rooted or inode-rooted */
enum xfs_btree_type type;
/* XFS_BTGEO_* flags that determine the geometry of the btree */
unsigned int geom_flags;
/* size of the key, pointer, and record structures */
size_t key_len;
size_t ptr_len;
size_t rec_len;
/* LRU refcount to set on each btree buffer created */
unsigned int lru_refs;
/* offset of btree stats array */
unsigned int statoff;
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
......@@ -199,6 +223,10 @@ struct xfs_btree_ops {
const union xfs_btree_key *mask);
};
/* btree geometry flags */
#define XFS_BTGEO_LASTREC_UPDATE (1U << 0) /* track last rec externally */
#define XFS_BTGEO_OVERLAPPING (1U << 1) /* overlapping intervals */
/*
* Reasons for the update_lastrec method to be called.
*/
......@@ -215,39 +243,6 @@ union xfs_btree_irec {
struct xfs_refcount_irec rc;
};
/* Per-AG btree information. */
struct xfs_btree_cur_ag {
struct xfs_perag *pag;
union {
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
};
union {
struct {
unsigned int nr_ops; /* # record updates */
unsigned int shape_changes; /* # of extent splits */
} refc;
struct {
bool active; /* allocation cursor state */
} abt;
};
};
/* Btree-in-inode cursor information */
struct xfs_btree_cur_ino {
struct xfs_inode *ip;
struct xbtree_ifakeroot *ifake; /* for staging cursor */
int allocated;
short forksize;
char whichfork;
char flags;
/* We are converting a delalloc reservation */
#define XFS_BTCUR_BMBT_WASDEL (1 << 0)
/* For extent swap, ignore owner check in verifier */
#define XFS_BTCUR_BMBT_INVALID_OWNER (1 << 1)
};
struct xfs_btree_level {
/* buffer pointer */
struct xfs_buf *bp;
......@@ -276,17 +271,31 @@ struct xfs_btree_cur
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
int bc_statoff; /* offset of btree stats array */
/*
* Short btree pointers need an agno to be able to turn the pointers
* into physical addresses for IO, so the btree cursor switches between
* bc_ino and bc_ag based on whether XFS_BTREE_LONG_PTRS is set for the
* cursor.
*/
/* per-type information */
union {
struct xfs_btree_cur_ag bc_ag;
struct xfs_btree_cur_ino bc_ino;
struct {
struct xfs_inode *ip;
short forksize;
char whichfork;
struct xbtree_ifakeroot *ifake; /* for staging cursor */
} bc_ino;
struct {
struct xfs_perag *pag;
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
} bc_ag;
};
/* per-format private data */
union {
struct {
int allocated;
} bc_bmap; /* bmapbt */
struct {
unsigned int nr_ops; /* # record updates */
unsigned int shape_changes; /* # of extent splits */
} bc_refc; /* refcountbt */
};
/* Must be at the end of the struct! */
......@@ -304,18 +313,22 @@ xfs_btree_cur_sizeof(unsigned int nlevels)
return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
}
/* cursor flags */
#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
/* cursor state flags */
/*
* The root of this btree is a fakeroot structure so that we can stage a btree
* rebuild without leaving it accessible via primary metadata. The ops struct
* is dynamically allocated and must be freed when the cursor is deleted.
*/
#define XFS_BTREE_STAGING (1<<5)
#define XFS_BTREE_STAGING (1U << 0)
/* We are converting a delalloc reservation (only for bmbt btrees) */
#define XFS_BTREE_BMBT_WASDEL (1U << 1)
/* For extent swap, ignore owner check in verifier (only for bmbt btrees) */
#define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2)
/* Cursor is active (only for allocbt btrees) */
#define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3)
#define XFS_BTREE_NOERROR 0
#define XFS_BTREE_ERROR 1
......@@ -430,25 +443,12 @@ xfs_btree_reada_bufs(
/*
* Initialise a new btree block header
*/
void
xfs_btree_init_block(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner);
void
xfs_btree_init_block_int(
struct xfs_mount *mp,
struct xfs_btree_block *buf,
xfs_daddr_t blkno,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner,
unsigned int flags);
void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp,
const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs,
__u64 owner);
void xfs_btree_init_block(struct xfs_mount *mp,
struct xfs_btree_block *buf, const struct xfs_btree_ops *ops,
__u16 level, __u16 numrecs, __u64 owner);
/*
* Common btree core entry points.
......@@ -690,7 +690,7 @@ xfs_btree_islastblock(
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
}
......@@ -720,14 +720,19 @@ xfs_btree_alloc_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_btnum_t btnum,
const struct xfs_btree_ops *ops,
uint8_t maxlevels,
struct kmem_cache *cache)
{
struct xfs_btree_cur *cur;
ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN ||
ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN);
/* BMBT allocations can come through from non-transactional context. */
cur = kmem_cache_zalloc(cache,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
cur->bc_ops = ops;
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
......@@ -742,4 +747,14 @@ void xfs_btree_destroy_cur_caches(void);
int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
/* Does this level of the cursor point to the inode root (and not a block)? */
static inline bool
xfs_btree_at_iroot(
const struct xfs_btree_cur *cur,
int level)
{
return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 1;
}
#endif /* __XFS_BTREE_H__ */
......@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot(
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE));
ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
......@@ -172,6 +172,7 @@ xfs_btree_commit_afakeroot(
trace_xfs_btree_commit_afakeroot(cur);
kfree((void *)cur->bc_ops);
cur->bc_ag.afake = NULL;
cur->bc_ag.agbp = agbp;
cur->bc_ops = ops;
cur->bc_flags &= ~XFS_BTREE_STAGING;
......@@ -217,7 +218,7 @@ xfs_btree_stage_ifakeroot(
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
......@@ -397,8 +398,7 @@ xfs_btree_bload_prep_block(
struct xfs_btree_block *new_block;
int ret;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
if (xfs_btree_at_iroot(cur, level)) {
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
size_t new_size;
......@@ -410,10 +410,8 @@ xfs_btree_bload_prep_block(
ifp->if_broot_bytes = (int)new_size;
/* Initialize it and send it out. */
xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
XFS_BUF_DADDR_NULL, cur->bc_btnum, level,
nr_this_block, cur->bc_ino.ip->i_ino,
cur->bc_flags);
xfs_btree_init_block(cur->bc_mp, ifp->if_broot, cur->bc_ops,
level, nr_this_block, cur->bc_ino.ip->i_ino);
*bpp = NULL;
*blockp = ifp->if_broot;
......@@ -704,7 +702,7 @@ xfs_btree_bload_compute_geometry(
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &level_blocks, &dontcare64);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/*
* If all the items we want to store at this level
* would fit in the inode root block, then we have our
......@@ -763,7 +761,7 @@ xfs_btree_bload_compute_geometry(
return -EOVERFLOW;
bbl->btree_height = cur->bc_nlevels;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
bbl->nr_blocks = nr_blocks - 1;
else
bbl->nr_blocks = nr_blocks;
......@@ -890,7 +888,7 @@ xfs_btree_bload(
}
/* Initialize the new root. */
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
cur->bc_ino.ifake->if_blocks = total_blocks - 1;
......
......@@ -76,8 +76,7 @@ struct xfs_btree_bload {
/*
* This function should return the size of the in-core btree root
* block. It is only necessary for XFS_BTREE_ROOT_IN_INODE btree
* types.
* block. It is only necessary for XFS_BTREE_TYPE_INODE btrees.
*/
xfs_btree_bload_iroot_size_fn iroot_size;
......
......@@ -398,9 +398,15 @@ xfs_inobt_keys_contiguous(
be32_to_cpu(key2->inobt.ir_startino));
}
static const struct xfs_btree_ops xfs_inobt_ops = {
const struct xfs_btree_ops xfs_inobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
......@@ -420,9 +426,15 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.keys_contiguous = xfs_inobt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_finobt_ops = {
const struct xfs_btree_ops xfs_finobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
......@@ -452,21 +464,16 @@ xfs_inobt_init_common(
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_mount *mp = pag->pag_mount;
const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, btnum,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
if (btnum == XFS_BTNUM_INO) {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
cur->bc_ops = &xfs_inobt_ops;
} else {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
cur->bc_ops = &xfs_finobt_ops;
}
ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
if (btnum == XFS_BTNUM_FINO)
ops = &xfs_finobt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
......
......@@ -1077,7 +1077,7 @@ xfs_refcount_still_have_space(
* to handle each of the shape changes to the refcount btree.
*/
overhead = xfs_allocfree_block_count(cur->bc_mp,
cur->bc_ag.refc.shape_changes);
cur->bc_refc.shape_changes);
overhead += cur->bc_mp->m_refc_maxlevels;
overhead *= cur->bc_mp->m_sb.sb_blocksize;
......@@ -1085,17 +1085,17 @@ xfs_refcount_still_have_space(
* Only allow 2 refcount extent updates per transaction if the
* refcount continue update "error" has been injected.
*/
if (cur->bc_ag.refc.nr_ops > 2 &&
if (cur->bc_refc.nr_ops > 2 &&
XFS_TEST_ERROR(false, cur->bc_mp,
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
return false;
if (cur->bc_ag.refc.nr_ops == 0)
if (cur->bc_refc.nr_ops == 0)
return true;
else if (overhead > cur->bc_tp->t_log_res)
return false;
return cur->bc_tp->t_log_res - overhead >
cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
return cur->bc_tp->t_log_res - overhead >
cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
}
/*
......@@ -1155,7 +1155,7 @@ xfs_refcount_adjust_extents(
* Either cover the hole (increment) or
* delete the range (decrement).
*/
cur->bc_ag.refc.nr_ops++;
cur->bc_refc.nr_ops++;
if (tmp.rc_refcount) {
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
......@@ -1216,7 +1216,7 @@ xfs_refcount_adjust_extents(
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
cur->bc_ag.pag->pag_agno, &ext);
cur->bc_ag.refc.nr_ops++;
cur->bc_refc.nr_ops++;
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
......@@ -1305,7 +1305,7 @@ xfs_refcount_adjust(
if (shape_changed)
shape_changes++;
if (shape_changes)
cur->bc_ag.refc.shape_changes++;
cur->bc_refc.shape_changes++;
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj);
......@@ -1400,8 +1400,8 @@ xfs_refcount_finish_one(
*/
rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
nr_ops = rcur->bc_ag.refc.nr_ops;
shape_changes = rcur->bc_ag.refc.shape_changes;
nr_ops = rcur->bc_refc.nr_ops;
shape_changes = rcur->bc_refc.shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
......@@ -1413,8 +1413,8 @@ xfs_refcount_finish_one(
return error;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, ri->ri_pag);
rcur->bc_ag.refc.nr_ops = nr_ops;
rcur->bc_ag.refc.shape_changes = shape_changes;
rcur->bc_refc.nr_ops = nr_ops;
rcur->bc_refc.shape_changes = shape_changes;
}
*pcur = rcur;
......
......@@ -77,8 +77,6 @@ xfs_refcountbt_alloc_block(
xfs_refc_block(args.mp)));
if (error)
goto out_error;
trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
......@@ -107,8 +105,6 @@ xfs_refcountbt_free_block(
struct xfs_agf *agf = agbp->b_addr;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
......@@ -321,9 +317,15 @@ xfs_refcountbt_keys_contiguous(
be32_to_cpu(key2->refc.rc_startblock));
}
static const struct xfs_btree_ops xfs_refcountbt_ops = {
const struct xfs_btree_ops xfs_refcountbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(struct xfs_refcount_rec),
.key_len = sizeof(struct xfs_refcount_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_REFC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root,
......@@ -357,15 +359,11 @@ xfs_refcountbt_init_common(
ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
&xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
cur->bc_ops = &xfs_refcountbt_ops;
cur->bc_refc.nr_ops = 0;
cur->bc_refc.shape_changes = 0;
return cur;
}
......
......@@ -94,8 +94,6 @@ xfs_rmapbt_alloc_block(
&bno, 1);
if (error)
return error;
trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
......@@ -125,8 +123,6 @@ xfs_rmapbt_free_block(
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
......@@ -476,9 +472,16 @@ xfs_rmapbt_keys_contiguous(
be32_to_cpu(key2->rmap.rm_startblock));
}
static const struct xfs_btree_ops xfs_rmapbt_ops = {
const struct xfs_btree_ops xfs_rmapbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_RMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
......@@ -507,12 +510,8 @@ xfs_rmapbt_init_common(
struct xfs_btree_cur *cur;
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ops = &xfs_rmapbt_ops;
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
......
......@@ -43,6 +43,15 @@ extern const struct xfs_buf_ops xfs_sb_buf_ops;
extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
/* btree ops */
extern const struct xfs_btree_ops xfs_bnobt_ops;
extern const struct xfs_btree_ops xfs_cntbt_ops;
extern const struct xfs_btree_ops xfs_inobt_ops;
extern const struct xfs_btree_ops xfs_finobt_ops;
extern const struct xfs_btree_ops xfs_bmbt_ops;
extern const struct xfs_btree_ops xfs_refcountbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_ops;
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
......
......@@ -47,7 +47,7 @@ __xchk_btree_process_error(
*error = 0;
fallthrough;
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip);
else
......@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt(
{
sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip);
else
......@@ -168,7 +168,7 @@ xchk_btree_rec(
if (xfs_btree_keycmp_lt(cur, &key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Is high_key(rec) no larger than the parent high key? */
......@@ -215,7 +215,7 @@ xchk_btree_key(
if (xfs_btree_keycmp_lt(cur, key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Is this block's high key no larger than the parent high key? */
......@@ -239,12 +239,12 @@ xchk_btree_ptr_ok(
bool res;
/* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
if (bs->cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == bs->cur->bc_nlevels)
return true;
/* Otherwise, check the pointers. */
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (bs->cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
......@@ -385,7 +385,12 @@ xchk_btree_check_block_owner(
agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
/*
* If the btree being examined is not itself a per-AG btree, initialize
* sc->sa so that we can check for the presence of an ownership record
* in the rmap btree for the AG containing the block.
*/
init_sa = bs->cur->bc_ops->type != XFS_BTREE_TYPE_AG;
if (init_sa) {
error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
......@@ -429,7 +434,7 @@ xchk_btree_check_owner(
* up.
*/
if (bp == NULL) {
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE)
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
......@@ -508,7 +513,7 @@ xchk_btree_check_minrecs(
* child block might be less than the standard minrecs, but that's ok
* provided that there's only one direct child of the root.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 2) {
struct xfs_btree_block *root_block;
struct xfs_buf *root_bp;
......@@ -562,7 +567,7 @@ xchk_btree_block_check_keys(
return;
}
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Make sure the high key of this block matches the parent. */
......@@ -597,7 +602,7 @@ xchk_btree_get_block(
return error;
xfs_btree_get_block(bs->cur, level, pbp);
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (bs->cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
level, *pbp);
else
......@@ -664,7 +669,7 @@ xchk_btree_block_keys(
if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Get high keys */
......
......@@ -535,7 +535,7 @@ xrep_newbt_claim_block(
trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
xnr->oinfo.oi_owner);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
agbno));
else
......
......@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno(
xfs_buf_daddr(cur->bc_levels[level].bp));
if (level == cur->bc_nlevels - 1 &&
(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
return NULLFSBLOCK;
......
......@@ -2496,6 +2496,83 @@ DEFINE_EVENT(xfs_btree_cur_class, name, \
DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
TRACE_EVENT(xfs_btree_alloc_block,
TP_PROTO(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int stat,
int error),
TP_ARGS(cur, ptr, stat, error),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__field(int, error)
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
__entry->agno = 0;
__entry->ino = cur->bc_ino.ip->i_ino;
} else {
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->ino = 0;
}
__entry->btnum = cur->bc_btnum;
__entry->error = error;
if (!error && stat) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp,
fsb);
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp,
fsb);
} else {
__entry->agbno = be32_to_cpu(ptr->s);
}
} else {
__entry->agbno = NULLAGBLOCK;
}
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->agno,
__entry->ino,
__entry->agbno,
__entry->error)
);
TRACE_EVENT(xfs_btree_free_block,
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_buf *bp),
TP_ARGS(cur, bp),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = xfs_daddr_to_agno(cur->bc_mp,
xfs_buf_daddr(bp));
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
__entry->ino = cur->bc_ino.ip->i_ino;
else
__entry->ino = 0;
__entry->btnum = cur->bc_btnum;
__entry->agbno = xfs_daddr_to_agbno(cur->bc_mp,
xfs_buf_daddr(bp));
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->agno,
__entry->ino,
__entry->agbno)
);
/* deferred ops */
struct xfs_defer_pending;
......@@ -2858,8 +2935,6 @@ DEFINE_EVENT(xfs_rmapbt_class, name, \
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
DEFINE_RMAPBT_EVENT(xfs_rmap_update);
DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
......@@ -3217,8 +3292,6 @@ DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
TP_ARGS(mp, agno, i1, i2, i3))
/* refcount btree tracepoints */
DEFINE_BUSY_EVENT(xfs_refcountbt_alloc_block);
DEFINE_BUSY_EVENT(xfs_refcountbt_free_block);
DEFINE_AG_BTREE_LOOKUP_EVENT(xfs_refcount_lookup);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
......@@ -4189,7 +4262,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->level = level;
__entry->block_idx = block_idx;
__entry->nr_blocks = nr_blocks;
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment