Commit 681cb87b authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'btree-geometry-in-ops-6.9_2024-02-23' of...

Merge tag 'btree-geometry-in-ops-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.9-mergeC

xfs: move btree geometry to ops struct

This patchset prepares the generic btree code to allow for the creation
of new btree types outside of libxfs.  The end goal here is for online
fsck to be able to create its own in-memory btrees that will be used to
improve the performance (and reduce the memory requirements of) the
refcount btree.

To enable this, I decided that the btree ops structure is the ideal
place to encode all of the geometry information about a btree. The btree
ops struture already contains the buffer ops (and hence the btree block
magic numbers) as well as the key and record sizes, so it doesn't seem
all that farfetched to encode the XFS_BTREE_ flags that determine the
geometry (ROOT_IN_INODE, LONG_PTRS, etc).

The rest of the patchset cleans up the btree functions that initialize
btree blocks and btree buffers.  The bulk of this work is to replace
btree geometry related function call arguments with a single pointer to
the ops structure, and then clean up everything else around that.  As a
side effect, we rename the functions.

Later, Christoph Hellwig and I merged together a bunch more cleanups
that he wanted to do for a while.  All the btree geometry information is
now in the btree ops structure, we've created an explicit btree type
(ag, inode, mem) and moved the per-btree type information to a separate
union.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'btree-geometry-in-ops-6.9_2024-02-23' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: create predicate to determine if cursor is at inode root level
  xfs: split the per-btree union in struct xfs_btree_cur
  xfs: split out a btree type from the btree ops geometry flags
  xfs: store the btree pointer length in struct xfs_btree_ops
  xfs: factor out a btree block owner check
  xfs: factor out a xfs_btree_owner helper
  xfs: move the btree stats offset into struct btree_ops
  xfs: move lru refs to the btree ops structure
  xfs: set btree block buffer ops in _init_buf
  xfs: remove the unnecessary daddr paramter to _init_block
  xfs: btree convert xfs_btree_init_block to xfs_btree_init_buf calls
  xfs: rename btree block/buffer init functions
  xfs: initialize btree blocks using btree_ops structure
  xfs: extern some btree ops structures
  xfs: turn the allocbt cursor active field into a btree flag
  xfs: consolidate the xfs_alloc_lookup_* helpers
  xfs: remove bc_ino.flags
  xfs: encode the btree geometry flags in the btree ops structure
  xfs: fix imprecise logic in xchk_btree_check_block_owner
  xfs: drop XFS_BTREE_CRC_BLOCKS
  xfs: set the btree cursor bc_ops in xfs_btree_alloc_cursor
  xfs: consolidate btree block allocation tracepoints
  xfs: consolidate btree block freeing tracepoints
parents 5d1bd19d f73def90
......@@ -492,7 +492,7 @@ xfs_btroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
}
/* Finish initializing a free space btree. */
......@@ -550,7 +550,7 @@ xfs_freesp_init_recs(
}
/*
* Alloc btree root block init functions
* bnobt/cntbt btree root block init functions
*/
static void
xfs_bnoroot_init(
......@@ -558,17 +558,7 @@ xfs_bnoroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
static void
xfs_cntroot_init(
struct xfs_mount *mp,
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
......@@ -584,7 +574,7 @@ xfs_rmaproot_init(
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_rmap_rec *rrec;
xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
xfs_btree_init_buf(mp, bp, id->bc_ops, 0, 4, id->agno);
/*
* mark the AG header regions as static metadata The BNO
......@@ -797,7 +787,7 @@ struct xfs_aghdr_grow_data {
size_t numblks;
const struct xfs_buf_ops *ops;
aghdr_init_work_f work;
xfs_btnum_t type;
const struct xfs_btree_ops *bc_ops;
bool need_init;
};
......@@ -851,13 +841,15 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_bnobt_buf_ops,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_bnobt_ops,
.need_init = true
},
{ /* CNT root block */
.daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_cntbt_buf_ops,
.work = &xfs_cntroot_init,
.work = &xfs_bnoroot_init,
.bc_ops = &xfs_cntbt_ops,
.need_init = true
},
{ /* INO root block */
......@@ -865,7 +857,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_inobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_INO,
.bc_ops = &xfs_inobt_ops,
.need_init = true
},
{ /* FINO root block */
......@@ -873,7 +865,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_finobt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_FINO,
.bc_ops = &xfs_finobt_ops,
.need_init = xfs_has_finobt(mp)
},
{ /* RMAP root block */
......@@ -881,6 +873,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_rmapbt_buf_ops,
.work = &xfs_rmaproot_init,
.bc_ops = &xfs_rmapbt_ops,
.need_init = xfs_has_rmapbt(mp)
},
{ /* REFC root block */
......@@ -888,7 +881,7 @@ xfs_ag_init_headers(
.numblks = BTOBB(mp->m_sb.sb_blocksize),
.ops = &xfs_refcountbt_buf_ops,
.work = &xfs_btroot_init,
.type = XFS_BTNUM_REFC,
.bc_ops = &xfs_refcountbt_ops,
.need_init = xfs_has_reflink(mp)
},
{ /* NULL terminating block */
......@@ -906,7 +899,7 @@ xfs_ag_init_headers(
id->daddr = dp->daddr;
id->numblks = dp->numblks;
id->type = dp->type;
id->bc_ops = dp->bc_ops;
error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
if (error)
break;
......
......@@ -331,7 +331,7 @@ struct aghdr_init_data {
/* per header data */
xfs_daddr_t daddr; /* header location */
size_t numblks; /* size of header */
xfs_btnum_t type; /* type of btree root block */
const struct xfs_btree_ops *bc_ops; /* btree ops */
};
int xfs_ag_init_headers(struct xfs_mount *mp, struct aghdr_init_data *id);
......
......@@ -151,23 +151,38 @@ xfs_alloc_ag_max_usable(
return mp->m_sb.sb_agblocks - blocks;
}
static int
xfs_alloc_lookup(
struct xfs_btree_cur *cur,
xfs_lookup_t dir,
xfs_agblock_t bno,
xfs_extlen_t len,
int *stat)
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, dir, stat);
if (*stat == 1)
cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
else
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
return error;
}
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
STATIC int /* error */
static inline int /* error */
xfs_alloc_lookup_eq(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat);
}
/*
......@@ -181,13 +196,7 @@ xfs_alloc_lookup_ge(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat);
}
/*
......@@ -201,19 +210,14 @@ xfs_alloc_lookup_le(
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
int error;
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
cur->bc_ag.abt.active = (*stat == 1);
return error;
return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat);
}
static inline bool
xfs_alloc_cur_active(
struct xfs_btree_cur *cur)
{
return cur && cur->bc_ag.abt.active;
return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE);
}
/*
......@@ -991,7 +995,7 @@ xfs_alloc_cur_check(
deactivate = true;
out:
if (deactivate)
cur->bc_ag.abt.active = false;
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
*new);
return 0;
......@@ -1366,7 +1370,7 @@ xfs_alloc_walk_iter(
if (error)
return error;
if (i == 0)
cur->bc_ag.abt.active = false;
cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE;
if (count > 0)
count--;
......@@ -1480,7 +1484,7 @@ xfs_alloc_ag_vextent_locality(
if (error)
return error;
if (i) {
acur->cnt->bc_ag.abt.active = true;
acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE;
fbcur = acur->cnt;
fbinc = false;
}
......
......@@ -454,9 +454,15 @@ xfs_allocbt_keys_contiguous(
be32_to_cpu(key2->alloc.ar_startblock));
}
static const struct xfs_btree_ops xfs_bnobt_ops = {
const struct xfs_btree_ops xfs_bnobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtb_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
......@@ -477,9 +483,16 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
.keys_contiguous = xfs_allocbt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_cntbt_ops = {
const struct xfs_btree_ops xfs_cntbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_LASTREC_UPDATE,
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_ALLOC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_abtc_2),
.dup_cursor = xfs_allocbt_dup_cursor,
.set_root = xfs_allocbt_set_root,
......@@ -508,28 +521,17 @@ xfs_allocbt_init_common(
struct xfs_perag *pag,
xfs_btnum_t btnum)
{
const struct xfs_btree_ops *ops = &xfs_bnobt_ops;
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur->bc_ag.abt.active = false;
if (btnum == XFS_BTNUM_CNT) {
cur->bc_ops = &xfs_cntbt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
} else {
cur->bc_ops = &xfs_bnobt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
}
if (btnum == XFS_BTNUM_CNT)
ops = &xfs_cntbt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops, mp->m_alloc_maxlevels,
xfs_allocbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
return cur;
}
......@@ -595,7 +597,6 @@ xfs_allocbt_commit_staged_btree(
if (cur->bc_btnum == XFS_BTNUM_BNO) {
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops);
} else {
cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE;
xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops);
}
}
......
......@@ -644,14 +644,13 @@ xfs_bmap_extents_to_btree(
* Fill in the root.
*/
block = ifp->if_broot;
xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, block, NULL, 1, 1);
/*
* Need a cursor. Can't allocate until bb_level is filled in.
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
if (wasdel)
cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
/*
* Convert to a btree with two levels, one record in root.
*/
......@@ -677,7 +676,7 @@ xfs_bmap_extents_to_btree(
goto out_root_realloc;
}
cur->bc_ino.allocated++;
cur->bc_bmap.allocated++;
ip->i_nblocks++;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
......@@ -689,11 +688,8 @@ xfs_bmap_extents_to_btree(
/*
* Fill in the child block.
*/
abp->b_ops = &xfs_bmbt_buf_ops;
ablock = XFS_BUF_TO_BLOCK(abp);
xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, ablock, abp, 0, 0);
for_each_xfs_iext(ifp, &icur, &rec) {
if (isnullstartblock(rec.br_startblock))
......@@ -898,7 +894,7 @@ xfs_bmap_add_attrfork_btree(
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
return -ENOSPC;
}
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
}
return 0;
......@@ -926,7 +922,7 @@ xfs_bmap_add_attrfork_extents(
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
XFS_DATA_FORK);
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
......@@ -1449,8 +1445,7 @@ xfs_bmap_add_extent_delay_real(
ASSERT(whichfork != XFS_ATTR_FORK);
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
(bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
ASSERT(!bma->cur || (bma->cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
......@@ -1751,7 +1746,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_ino.allocated : 0));
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
PREV.br_startoff = new_endoff;
PREV.br_blockcount = temp;
......@@ -1841,7 +1836,7 @@ xfs_bmap_add_extent_delay_real(
temp = PREV.br_blockcount - new->br_blockcount;
da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_ino.allocated : 0));
(bma->cur ? bma->cur->bc_bmap.allocated : 0));
PREV.br_startblock = nullstartblock(da_new);
PREV.br_blockcount = temp;
......@@ -1964,8 +1959,8 @@ xfs_bmap_add_extent_delay_real(
xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
if (bma->cur) {
da_new += bma->cur->bc_ino.allocated;
bma->cur->bc_ino.allocated = 0;
da_new += bma->cur->bc_bmap.allocated;
bma->cur->bc_bmap.allocated = 0;
}
/* adjust for changes in reserved delayed indirect blocks */
......@@ -2530,7 +2525,7 @@ xfs_bmap_add_extent_unwritten_real(
/* clear out the allocated field, done with it now in any case. */
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
*curp = cur;
}
......@@ -2709,7 +2704,7 @@ xfs_bmap_add_extent_hole_real(
struct xfs_bmbt_irec old;
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
ASSERT(!cur || !(cur->bc_flags & XFS_BTREE_BMBT_WASDEL));
XFS_STATS_INC(mp, xs_add_exlist);
......@@ -2918,7 +2913,7 @@ xfs_bmap_add_extent_hole_real(
/* clear out the allocated field, done with it now in any case. */
if (cur)
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_bmap_check_leaf_extents(cur, ip, whichfork);
done:
......@@ -4229,9 +4224,8 @@ xfs_bmapi_allocate(
*/
bma->nallocs++;
if (bma->cur)
bma->cur->bc_ino.flags =
bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
if (bma->cur && bma->wasdel)
bma->cur->bc_flags |= XFS_BTREE_BMBT_WASDEL;
bma->got.br_startoff = bma->offset;
bma->got.br_startblock = bma->blkno;
......@@ -4766,10 +4760,8 @@ xfs_bmapi_remap(
ip->i_nblocks += len;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
got.br_startoff = bno;
got.br_startblock = startblock;
......@@ -5400,7 +5392,6 @@ __xfs_bunmapi(
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
} else
cur = NULL;
......@@ -5638,7 +5629,7 @@ __xfs_bunmapi(
xfs_trans_log_inode(tp, ip, logflags);
if (cur) {
if (!error)
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
return error;
......@@ -5861,10 +5852,8 @@ xfs_bmap_collapse_extents(
if (error)
return error;
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
*done = true;
......@@ -5978,10 +5967,8 @@ xfs_bmap_insert_extents(
if (error)
return error;
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (ifp->if_format == XFS_DINODE_FMT_BTREE)
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
}
if (*next_fsb == NULLFSBLOCK) {
xfs_iext_last(ifp, &icur);
......@@ -6098,7 +6085,6 @@ xfs_bmap_split_extent(
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
error = xfs_bmbt_lookup_eq(cur, &got, &i);
if (error)
goto del_cursor;
......@@ -6159,7 +6145,7 @@ xfs_bmap_split_extent(
del_cursor:
if (cur) {
cur->bc_ino.allocated = 0;
cur->bc_bmap.allocated = 0;
xfs_btree_del_cursor(cur, error);
}
......
......@@ -26,6 +26,22 @@
static struct kmem_cache *xfs_bmbt_cur_cache;
void
xfs_bmbt_init_block(
struct xfs_inode *ip,
struct xfs_btree_block *buf,
struct xfs_buf *bp,
__u16 level,
__u16 numrecs)
{
if (bp)
xfs_btree_init_buf(ip->i_mount, bp, &xfs_bmbt_ops, level,
numrecs, ip->i_ino);
else
xfs_btree_init_block(ip->i_mount, buf, &xfs_bmbt_ops, level,
numrecs, ip->i_ino);
}
/*
* Convert on-disk form of btree root to in-memory form.
*/
......@@ -44,9 +60,7 @@ xfs_bmdr_to_bmbt(
xfs_bmbt_key_t *tkp;
__be64 *tpp;
xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
XFS_BTREE_LONG_PTRS);
xfs_bmbt_init_block(ip, rblock, NULL, 0, 0);
rblock->bb_level = dblock->bb_level;
ASSERT(be16_to_cpu(rblock->bb_level) > 0);
rblock->bb_numrecs = dblock->bb_numrecs;
......@@ -171,13 +185,8 @@ xfs_bmbt_dup_cursor(
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_ino.ip, cur->bc_ino.whichfork);
/*
* Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
new->bc_ino.flags = cur->bc_ino.flags;
new->bc_flags |= (cur->bc_flags &
(XFS_BTREE_BMBT_INVALID_OWNER | XFS_BTREE_BMBT_WASDEL));
return new;
}
......@@ -189,10 +198,10 @@ xfs_bmbt_update_cursor(
ASSERT((dst->bc_tp->t_highest_agno != NULLAGNUMBER) ||
(dst->bc_ino.ip->i_diflags & XFS_DIFLAG_REALTIME));
dst->bc_ino.allocated += src->bc_ino.allocated;
dst->bc_bmap.allocated += src->bc_bmap.allocated;
dst->bc_tp->t_highest_agno = src->bc_tp->t_highest_agno;
src->bc_ino.allocated = 0;
src->bc_bmap.allocated = 0;
}
STATIC int
......@@ -211,7 +220,7 @@ xfs_bmbt_alloc_block(
xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_ino.ip->i_ino,
cur->bc_ino.whichfork);
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL;
args.wasdel = cur->bc_flags & XFS_BTREE_BMBT_WASDEL;
if (!args.wasdel && args.tp->t_blk_res == 0)
return -ENOSPC;
......@@ -247,7 +256,7 @@ xfs_bmbt_alloc_block(
}
ASSERT(args.len == 1);
cur->bc_ino.allocated++;
cur->bc_bmap.allocated++;
cur->bc_ino.ip->i_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_ino.ip, XFS_ILOG_CORE);
xfs_trans_mod_dquot_byino(args.tp, cur->bc_ino.ip,
......@@ -515,9 +524,15 @@ xfs_bmbt_keys_contiguous(
be64_to_cpu(key2->bmbt.br_startoff));
}
static const struct xfs_btree_ops xfs_bmbt_ops = {
const struct xfs_btree_ops xfs_bmbt_ops = {
.type = XFS_BTREE_TYPE_INODE,
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
.ptr_len = XFS_BTREE_LONG_PTR_LEN,
.lru_refs = XFS_BMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2),
.dup_cursor = xfs_bmbt_dup_cursor,
.update_cursor = xfs_bmbt_update_cursor,
......@@ -549,19 +564,11 @@ xfs_bmbt_init_common(
ASSERT(whichfork != XFS_COW_FORK);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP, &xfs_bmbt_ops,
mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
cur->bc_ops = &xfs_bmbt_ops;
cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
cur->bc_ino.ip = ip;
cur->bc_ino.allocated = 0;
cur->bc_ino.flags = 0;
cur->bc_bmap.allocated = 0;
return cur;
}
......@@ -751,7 +758,7 @@ xfs_bmbt_change_owner(
ASSERT(xfs_ifork_ptr(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
cur->bc_ino.flags |= XFS_BTCUR_BMBT_INVALID_OWNER;
cur->bc_flags |= XFS_BTREE_BMBT_INVALID_OWNER;
error = xfs_btree_change_owner(cur, new_owner, buffer_list);
xfs_btree_del_cursor(cur, error);
......
......@@ -120,4 +120,7 @@ unsigned int xfs_bmbt_maxlevels_ondisk(void);
int __init xfs_bmbt_init_cur_cache(void);
void xfs_bmbt_destroy_cur_cache(void);
void xfs_bmbt_init_block(struct xfs_inode *ip, struct xfs_btree_block *buf,
struct xfs_buf *bp, __u16 level, __u16 numrecs);
#endif /* __XFS_BMAP_BTREE_H__ */
......@@ -32,24 +32,17 @@
/*
* Btree magic numbers.
*/
static const uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
{ XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
XFS_FIBT_MAGIC, 0 },
{ XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC,
XFS_REFC_CRC_MAGIC }
};
uint32_t
xfs_btree_magic(
int crc,
xfs_btnum_t btnum)
struct xfs_mount *mp,
const struct xfs_btree_ops *ops)
{
uint32_t magic = xfs_magics[crc][btnum];
int idx = xfs_has_crc(mp) ? 1 : 0;
__be32 magic = ops->buf_ops->magic[idx];
/* Ensure we asked for crc for crc-only magics. */
ASSERT(magic != 0);
return magic;
return be32_to_cpu(magic);
}
/*
......@@ -128,8 +121,7 @@ __xfs_btree_check_lblock(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
xfs_btnum_t btnum = cur->bc_btnum;
int crc = xfs_has_crc(mp);
bool crc = xfs_has_crc(mp);
xfs_failaddr_t fa;
xfs_fsblock_t fsb = NULLFSBLOCK;
......@@ -143,7 +135,7 @@ __xfs_btree_check_lblock(
return __this_address;
}
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
return __this_address;
if (be16_to_cpu(block->bb_level) != level)
return __this_address;
......@@ -197,8 +189,7 @@ __xfs_btree_check_sblock(
{
struct xfs_mount *mp = cur->bc_mp;
struct xfs_perag *pag = cur->bc_ag.pag;
xfs_btnum_t btnum = cur->bc_btnum;
int crc = xfs_has_crc(mp);
bool crc = xfs_has_crc(mp);
xfs_failaddr_t fa;
xfs_agblock_t agbno = NULLAGBLOCK;
......@@ -210,7 +201,7 @@ __xfs_btree_check_sblock(
return __this_address;
}
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(crc, btnum))
if (be32_to_cpu(block->bb_magic) != xfs_btree_magic(mp, cur->bc_ops))
return __this_address;
if (be16_to_cpu(block->bb_level) != level)
return __this_address;
......@@ -261,7 +252,7 @@ xfs_btree_check_block(
int level, /* level of the btree block */
struct xfs_buf *bp) /* buffer containing block, if any */
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return xfs_btree_check_lblock(cur, block, level, bp);
else
return xfs_btree_check_sblock(cur, block, level, bp);
......@@ -302,7 +293,7 @@ xfs_btree_check_ptr(
int index,
int level)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]),
level))
return 0;
......@@ -414,6 +405,8 @@ xfs_btree_free_block(
{
int error;
trace_xfs_btree_free_block(cur, bp);
error = cur->bc_ops->free_block(cur, bp);
if (!error) {
xfs_trans_binval(cur->bc_tp, bp);
......@@ -452,12 +445,21 @@ xfs_btree_del_cursor(
* zero, then we should be shut down or on our way to shutdown due to
* cancelling a dirty transaction on error.
*/
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_ino.allocated == 0 ||
ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_bmap.allocated == 0 ||
xfs_is_shutdown(cur->bc_mp) || error != 0);
switch (cur->bc_ops->type) {
case XFS_BTREE_TYPE_AG:
if (cur->bc_ag.pag)
xfs_perag_put(cur->bc_ag.pag);
break;
case XFS_BTREE_TYPE_INODE:
/* nothing to do */
break;
}
if (unlikely(cur->bc_flags & XFS_BTREE_STAGING))
kfree(cur->bc_ops);
if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
xfs_perag_put(cur->bc_ag.pag);
kmem_cache_free(cur->bc_cache, cur);
}
......@@ -545,7 +547,7 @@ xfs_btree_dup_cursor(
* record, key or pointer (xfs_btree_*_addr). Note that all addressing
* inside the btree block is done using indices starting at one, not zero!
*
* If XFS_BTREE_OVERLAPPING is set, then this btree supports keys containing
* If XFS_BTGEO_OVERLAPPING is set, then this btree supports keys containing
* overlapping intervals. In such a tree, records are still sorted lowest to
* highest and indexed by the smallest key value that refers to the record.
* However, nodes are different: each pointer has two associated keys -- one
......@@ -595,25 +597,16 @@ xfs_btree_dup_cursor(
*/
static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (xfs_has_crc(cur->bc_mp))
return XFS_BTREE_LBLOCK_CRC_LEN;
return XFS_BTREE_LBLOCK_LEN;
}
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS)
if (xfs_has_crc(cur->bc_mp))
return XFS_BTREE_SBLOCK_CRC_LEN;
return XFS_BTREE_SBLOCK_LEN;
}
/*
* Return size of btree block pointers for this btree instance.
*/
static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur)
{
return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
sizeof(__be64) : sizeof(__be32);
}
/*
* Calculate offset of the n-th record in a btree block.
*/
......@@ -661,7 +654,7 @@ xfs_btree_ptr_offset(
{
return xfs_btree_block_len(cur) +
cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len +
(n - 1) * xfs_btree_ptr_len(cur);
(n - 1) * cur->bc_ops->ptr_len;
}
/*
......@@ -724,7 +717,7 @@ struct xfs_ifork *
xfs_btree_ifork_ptr(
struct xfs_btree_cur *cur)
{
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
if (cur->bc_flags & XFS_BTREE_STAGING)
return cur->bc_ino.ifake->if_fork;
......@@ -756,8 +749,7 @@ xfs_btree_get_block(
int level, /* level in btree */
struct xfs_buf **bpp) /* buffer containing the block */
{
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
if (xfs_btree_at_iroot(cur, level)) {
*bpp = NULL;
return xfs_btree_get_iroot(cur);
}
......@@ -999,8 +991,7 @@ xfs_btree_readahead(
* No readahead needed if we are at the root level and the
* btree root is stored in the inode.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(lev == cur->bc_nlevels - 1))
if (xfs_btree_at_iroot(cur, lev))
return 0;
if ((cur->bc_levels[lev].ra | lr) == cur->bc_levels[lev].ra)
......@@ -1009,7 +1000,7 @@ xfs_btree_readahead(
cur->bc_levels[lev].ra |= lr;
block = XFS_BUF_TO_BLOCK(cur->bc_levels[lev].bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return xfs_btree_readahead_lblock(cur, lr, block);
return xfs_btree_readahead_sblock(cur, lr, block);
}
......@@ -1028,7 +1019,7 @@ xfs_btree_ptr_to_daddr(
if (error)
return error;
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
fsbno = be64_to_cpu(ptr->l);
*daddr = XFS_FSB_TO_DADDR(cur->bc_mp, fsbno);
} else {
......@@ -1078,7 +1069,7 @@ xfs_btree_setbuf(
cur->bc_levels[lev].ra = 0;
b = XFS_BUF_TO_BLOCK(bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK))
cur->bc_levels[lev].ra |= XFS_BTCUR_LEFTRA;
if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK))
......@@ -1096,7 +1087,7 @@ xfs_btree_ptr_is_null(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return ptr->l == cpu_to_be64(NULLFSBLOCK);
else
return ptr->s == cpu_to_be32(NULLAGBLOCK);
......@@ -1107,7 +1098,7 @@ xfs_btree_set_ptr_null(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
ptr->l = cpu_to_be64(NULLFSBLOCK);
else
ptr->s = cpu_to_be32(NULLAGBLOCK);
......@@ -1125,7 +1116,7 @@ xfs_btree_get_sibling(
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (lr == XFS_BB_RIGHTSIB)
ptr->l = block->bb_u.l.bb_rightsib;
else
......@@ -1147,7 +1138,7 @@ xfs_btree_set_sibling(
{
ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (lr == XFS_BB_RIGHTSIB)
block->bb_u.l.bb_rightsib = ptr->l;
else
......@@ -1160,25 +1151,24 @@ xfs_btree_set_sibling(
}
}
void
xfs_btree_init_block_int(
static void
__xfs_btree_init_block(
struct xfs_mount *mp,
struct xfs_btree_block *buf,
const struct xfs_btree_ops *ops,
xfs_daddr_t blkno,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner,
unsigned int flags)
__u64 owner)
{
int crc = xfs_has_crc(mp);
__u32 magic = xfs_btree_magic(crc, btnum);
bool crc = xfs_has_crc(mp);
__u32 magic = xfs_btree_magic(mp, ops);
buf->bb_magic = cpu_to_be32(magic);
buf->bb_level = cpu_to_be16(level);
buf->bb_numrecs = cpu_to_be16(numrecs);
if (flags & XFS_BTREE_LONG_PTRS) {
if (ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
if (crc) {
......@@ -1189,14 +1179,12 @@ xfs_btree_init_block_int(
buf->bb_u.l.bb_lsn = 0;
}
} else {
/* owner is a 32 bit value on short blocks */
__u32 __owner = (__u32)owner;
buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
if (crc) {
buf->bb_u.s.bb_blkno = cpu_to_be64(blkno);
buf->bb_u.s.bb_owner = cpu_to_be32(__owner);
/* owner is a 32 bit value on short blocks */
buf->bb_u.s.bb_owner = cpu_to_be32((__u32)owner);
uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid);
buf->bb_u.s.bb_lsn = 0;
}
......@@ -1205,15 +1193,38 @@ xfs_btree_init_block_int(
void
xfs_btree_init_block(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner)
struct xfs_mount *mp,
struct xfs_btree_block *block,
const struct xfs_btree_ops *ops,
__u16 level,
__u16 numrecs,
__u64 owner)
{
__xfs_btree_init_block(mp, block, ops, XFS_BUF_DADDR_NULL, level,
numrecs, owner);
}
void
xfs_btree_init_buf(
struct xfs_mount *mp,
struct xfs_buf *bp,
const struct xfs_btree_ops *ops,
__u16 level,
__u16 numrecs,
__u64 owner)
{
__xfs_btree_init_block(mp, XFS_BUF_TO_BLOCK(bp), ops,
xfs_buf_daddr(bp), level, numrecs, owner);
bp->b_ops = ops->buf_ops;
}
static inline __u64
xfs_btree_owner(
struct xfs_btree_cur *cur)
{
xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), xfs_buf_daddr(bp),
btnum, level, numrecs, owner, 0);
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
return cur->bc_ino.ip->i_ino;
return cur->bc_ag.pag->pag_agno;
}
void
......@@ -1223,22 +1234,8 @@ xfs_btree_init_block_cur(
int level,
int numrecs)
{
__u64 owner;
/*
* we can pull the owner from the cursor right now as the different
* owners align directly with the pointer size of the btree. This may
* change in future, but is safe for current users of the generic btree
* code.
*/
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
owner = cur->bc_ino.ip->i_ino;
else
owner = cur->bc_ag.pag->pag_agno;
xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp),
xfs_buf_daddr(bp), cur->bc_btnum, level,
numrecs, owner, cur->bc_flags);
xfs_btree_init_buf(cur->bc_mp, bp, cur->bc_ops, level, numrecs,
xfs_btree_owner(cur));
}
/*
......@@ -1256,7 +1253,7 @@ xfs_btree_is_lastrec(
if (level > 0)
return 0;
if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_LASTREC_UPDATE))
return 0;
xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB);
......@@ -1271,7 +1268,7 @@ xfs_btree_buf_to_ptr(
struct xfs_buf *bp,
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
xfs_buf_daddr(bp)));
else {
......@@ -1280,32 +1277,12 @@ xfs_btree_buf_to_ptr(
}
}
STATIC void
static inline void
xfs_btree_set_refs(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
switch (cur->bc_btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF);
break;
case XFS_BTNUM_INO:
case XFS_BTNUM_FINO:
xfs_buf_set_ref(bp, XFS_INO_BTREE_REF);
break;
case XFS_BTNUM_BMAP:
xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
break;
case XFS_BTNUM_RMAP:
xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
break;
case XFS_BTNUM_REFC:
xfs_buf_set_ref(bp, XFS_REFC_BTREE_REF);
break;
default:
ASSERT(0);
}
xfs_buf_set_ref(bp, cur->bc_ops->lru_refs);
}
int
......@@ -1406,7 +1383,7 @@ xfs_btree_copy_ptrs(
int numptrs)
{
ASSERT(numptrs >= 0);
memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur));
memcpy(dst_ptr, src_ptr, numptrs * cur->bc_ops->ptr_len);
}
/*
......@@ -1462,8 +1439,8 @@ xfs_btree_shift_ptrs(
ASSERT(numptrs >= 0);
ASSERT(dir == 1 || dir == -1);
dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur));
memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur));
dst_ptr = (char *)ptr + (dir * cur->bc_ops->ptr_len);
memmove(dst_ptr, ptr, numptrs * cur->bc_ops->ptr_len);
}
/*
......@@ -1574,7 +1551,7 @@ xfs_btree_log_block(
if (bp) {
int nbits;
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
if (xfs_has_crc(cur->bc_mp)) {
/*
* We don't log the CRC when updating a btree
* block but instead recreate it during log
......@@ -1589,7 +1566,7 @@ xfs_btree_log_block(
nbits = XFS_BB_NUM_BITS;
}
xfs_btree_offsets(fields,
(cur->bc_flags & XFS_BTREE_LONG_PTRS) ?
(cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ?
loffsets : soffsets,
nbits, &first, &last);
xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF);
......@@ -1666,7 +1643,7 @@ xfs_btree_increment(
* confused or have the tree root in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
goto out0;
ASSERT(0);
xfs_btree_mark_sick(cur);
......@@ -1760,7 +1737,7 @@ xfs_btree_decrement(
* or the root of the tree is in an inode.
*/
if (lev == cur->bc_nlevels) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
goto out0;
ASSERT(0);
xfs_btree_mark_sick(cur);
......@@ -1796,6 +1773,33 @@ xfs_btree_decrement(
return error;
}
/*
* Check the btree block owner now that we have the context to know who the
* real owner is.
*/
static inline xfs_failaddr_t
xfs_btree_check_block_owner(
struct xfs_btree_cur *cur,
struct xfs_btree_block *block)
{
__u64 owner;
if (!xfs_has_crc(cur->bc_mp) ||
(cur->bc_flags & XFS_BTREE_BMBT_INVALID_OWNER))
return NULL;
owner = xfs_btree_owner(cur);
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (be64_to_cpu(block->bb_u.l.bb_owner) != owner)
return __this_address;
} else {
if (be32_to_cpu(block->bb_u.s.bb_owner) != owner)
return __this_address;
}
return NULL;
}
int
xfs_btree_lookup_get_block(
struct xfs_btree_cur *cur, /* btree cursor */
......@@ -1808,8 +1812,7 @@ xfs_btree_lookup_get_block(
int error = 0;
/* special case the root block if in an inode */
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1)) {
if (xfs_btree_at_iroot(cur, level)) {
*blkp = xfs_btree_get_iroot(cur);
return 0;
}
......@@ -1834,11 +1837,7 @@ xfs_btree_lookup_get_block(
return error;
/* Check the inode owner since the verifiers don't. */
if (xfs_has_crc(cur->bc_mp) &&
!(cur->bc_ino.flags & XFS_BTCUR_BMBT_INVALID_OWNER) &&
(cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
cur->bc_ino.ip->i_ino)
if (xfs_btree_check_block_owner(cur, *blkp) != NULL)
goto out_bad;
/* Did we get the level we were looking for? */
......@@ -2056,7 +2055,7 @@ xfs_btree_high_key_from_key(
struct xfs_btree_cur *cur,
union xfs_btree_key *key)
{
ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
return (union xfs_btree_key *)((char *)key +
(cur->bc_ops->key_len / 2));
}
......@@ -2077,7 +2076,7 @@ xfs_btree_get_leaf_keys(
rec = xfs_btree_rec_addr(cur, 1, block);
cur->bc_ops->init_key_from_rec(key, rec);
if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
......@@ -2104,7 +2103,7 @@ xfs_btree_get_node_keys(
union xfs_btree_key *high;
int n;
if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
memcpy(key, xfs_btree_key_addr(cur, 1, block),
cur->bc_ops->key_len / 2);
......@@ -2148,7 +2147,7 @@ xfs_btree_needs_key_update(
struct xfs_btree_cur *cur,
int ptr)
{
return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
return (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) || ptr == 1;
}
/*
......@@ -2172,7 +2171,7 @@ __xfs_btree_updkeys(
struct xfs_buf *bp;
int ptr;
ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
ASSERT(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING);
/* Exit if there aren't any parent levels to update. */
if (level + 1 >= cur->bc_nlevels)
......@@ -2241,7 +2240,7 @@ xfs_btree_update_keys(
ASSERT(level >= 0);
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)
return __xfs_btree_updkeys(cur, level, block, bp, false);
/*
......@@ -2348,8 +2347,7 @@ xfs_btree_lshift(
int error; /* error return value */
int i;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1)
if (xfs_btree_at_iroot(cur, level))
goto out0;
/* Set up variables for this block as "right". */
......@@ -2476,7 +2474,7 @@ xfs_btree_lshift(
* Using a temporary cursor, update the parent key values of the
* block on the left.
*/
if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
error = xfs_btree_dup_cursor(cur, &tcur);
if (error)
goto error0;
......@@ -2544,8 +2542,7 @@ xfs_btree_rshift(
int error; /* error return value */
int i; /* loop counter */
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level == cur->bc_nlevels - 1))
if (xfs_btree_at_iroot(cur, level))
goto out0;
/* Set up variables for this block as "left". */
......@@ -2663,7 +2660,7 @@ xfs_btree_rshift(
goto error1;
/* Update the parent high keys of the left block, if needed. */
if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
error = xfs_btree_update_keys(cur, level);
if (error)
goto error1;
......@@ -2691,6 +2688,20 @@ xfs_btree_rshift(
return error;
}
static inline int
xfs_btree_alloc_block(
struct xfs_btree_cur *cur,
const union xfs_btree_ptr *hint_block,
union xfs_btree_ptr *new_block,
int *stat)
{
int error;
error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
return error;
}
/*
* Split cur/level block in half.
* Return new block number and the key to its first
......@@ -2734,7 +2745,7 @@ __xfs_btree_split(
xfs_btree_buf_to_ptr(cur, lbp, &lptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat);
error = xfs_btree_alloc_block(cur, &lptr, &rptr, stat);
if (error)
goto error0;
if (*stat == 0)
......@@ -2841,7 +2852,7 @@ __xfs_btree_split(
}
/* Update the parent high keys of the left block, if needed. */
if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
if (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING) {
error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
......@@ -2981,7 +2992,6 @@ xfs_btree_split(
#define xfs_btree_split __xfs_btree_split
#endif /* __KERNEL__ */
/*
* Copy the old inode root contents into a real block and make the
* broot point to it.
......@@ -3006,7 +3016,7 @@ xfs_btree_new_iroot(
XFS_BTREE_STATS_INC(cur, newroot);
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
level = cur->bc_nlevels - 1;
......@@ -3014,7 +3024,7 @@ xfs_btree_new_iroot(
pp = xfs_btree_ptr_addr(cur, 1, block);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat);
error = xfs_btree_alloc_block(cur, pp, &nptr, stat);
if (error)
goto error0;
if (*stat == 0)
......@@ -3032,9 +3042,9 @@ xfs_btree_new_iroot(
* In that case have to also ensure the blkno remains correct
*/
memcpy(cblock, block, xfs_btree_block_len(cur));
if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
if (xfs_has_crc(cur->bc_mp)) {
__be64 bno = cpu_to_be64(xfs_buf_daddr(cbp));
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
cblock->bb_u.l.bb_blkno = bno;
else
cblock->bb_u.s.bb_blkno = bno;
......@@ -3114,7 +3124,7 @@ xfs_btree_new_root(
cur->bc_ops->init_ptr_from_cur(cur, &rptr);
/* Allocate the new block. If we can't do it, we're toast. Give up. */
error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat);
error = xfs_btree_alloc_block(cur, &rptr, &lptr, stat);
if (error)
goto error0;
if (*stat == 0)
......@@ -3231,8 +3241,7 @@ xfs_btree_make_block_unfull(
{
int error = 0;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
if (xfs_btree_at_iroot(cur, level)) {
struct xfs_inode *ip = cur->bc_ino.ip;
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
......@@ -3317,8 +3326,8 @@ xfs_btree_insrec(
* If we have an external root pointer, and we've made it to the
* root level, allocate a new root block and we're done.
*/
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
(level >= cur->bc_nlevels)) {
if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE &&
level >= cur->bc_nlevels) {
error = xfs_btree_new_root(cur, stat);
xfs_btree_set_ptr_null(cur, ptrp);
......@@ -3605,7 +3614,7 @@ xfs_btree_kill_iroot(
#endif
int i;
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_nlevels > 1);
/*
......@@ -3841,27 +3850,25 @@ xfs_btree_delrec(
* Try to get rid of the next level down. If we can't then there's
* nothing left to do.
*/
if (level == cur->bc_nlevels - 1) {
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
xfs_iroot_realloc(cur->bc_ino.ip, -1,
cur->bc_ino.whichfork);
if (xfs_btree_at_iroot(cur, level)) {
xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork);
error = xfs_btree_kill_iroot(cur);
if (error)
goto error0;
error = xfs_btree_kill_iroot(cur);
if (error)
goto error0;
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
*stat = 1;
return 0;
}
error = xfs_btree_dec_cursor(cur, level, stat);
if (error)
goto error0;
*stat = 1;
return 0;
}
/*
* If this is the root level, and there's only one entry left,
* and it's NOT the leaf level, then we can get rid of this
* level.
*/
/*
* If this is the root level, and there's only one entry left, and it's
* NOT the leaf level, then we can get rid of this level.
*/
if (level == cur->bc_nlevels - 1) {
if (numrecs == 1 && level > 0) {
union xfs_btree_ptr *pp;
/*
......@@ -3910,7 +3917,7 @@ xfs_btree_delrec(
xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/*
* One child of root, need to get a chance to copy its contents
* into the root and delete it. Can't go up to next level,
......@@ -4227,8 +4234,8 @@ xfs_btree_delrec(
* If we joined with the right neighbor and there's a level above
* us, increment the cursor at that level.
*/
else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) ||
(level + 1 < cur->bc_nlevels)) {
else if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE ||
level + 1 < cur->bc_nlevels) {
error = xfs_btree_increment(cur, level + 1, &i);
if (error)
goto error0;
......@@ -4296,7 +4303,7 @@ xfs_btree_delete(
* If we combined blocks as part of deleting the record, delrec won't
* have updated the parent high keys so we have to do that here.
*/
if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
if (joined && (cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING)) {
error = xfs_btree_updkeys_force(cur, 0);
if (error)
goto error0;
......@@ -4393,7 +4400,7 @@ xfs_btree_visit_block(
* return the same block without checking if the right sibling points
* back to us and creates a cyclic reference in the btree.
*/
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (be64_to_cpu(rptr.l) == XFS_DADDR_TO_FSB(cur->bc_mp,
xfs_buf_daddr(bp))) {
xfs_btree_mark_sick(cur);
......@@ -4501,7 +4508,7 @@ xfs_btree_block_change_owner(
/* modify the owner */
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
return 0;
block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
......@@ -4519,7 +4526,7 @@ xfs_btree_block_change_owner(
* though, so everything is consistent in memory.
*/
if (!bp) {
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(level == cur->bc_nlevels - 1);
return 0;
}
......@@ -4996,7 +5003,7 @@ xfs_btree_query_range(
if (!xfs_btree_keycmp_le(cur, &low_key, &high_key))
return -EINVAL;
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return xfs_btree_simple_query_range(cur, &low_key,
&high_key, fn, priv);
return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
......@@ -5050,7 +5057,7 @@ xfs_btree_diff_two_ptrs(
const union xfs_btree_ptr *a,
const union xfs_btree_ptr *b)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
}
......@@ -5104,7 +5111,7 @@ xfs_btree_has_records_helper(
key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
&rec_key, info->key_mask);
if (key_contig == XBTREE_KEY_OVERLAP &&
!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return -EFSCORRUPTED;
if (key_contig == XBTREE_KEY_GAP)
return -ECANCELED;
......@@ -5198,7 +5205,7 @@ xfs_btree_has_more_records(
return true;
/* There are more record blocks. */
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK);
else
return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
......
......@@ -63,7 +63,8 @@ union xfs_btree_rec {
#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
#define XFS_BTNUM_REFC ((xfs_btnum_t)XFS_BTNUM_REFCi)
uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
struct xfs_btree_ops;
uint32_t xfs_btree_magic(struct xfs_mount *mp, const struct xfs_btree_ops *ops);
/*
* For logging record fields.
......@@ -86,9 +87,11 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
* Generic stats interface
*/
#define XFS_BTREE_STATS_INC(cur, stat) \
XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
XFS_STATS_INC_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat)
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
XFS_STATS_ADD_OFF((cur)->bc_mp, \
(cur)->bc_ops->statoff + __XBTS_ ## stat, val)
enum xbtree_key_contig {
XBTREE_KEY_GAP = 0,
......@@ -111,10 +114,31 @@ static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
return XBTREE_KEY_OVERLAP;
}
#define XFS_BTREE_LONG_PTR_LEN (sizeof(__be64))
#define XFS_BTREE_SHORT_PTR_LEN (sizeof(__be32))
enum xfs_btree_type {
XFS_BTREE_TYPE_AG,
XFS_BTREE_TYPE_INODE,
};
struct xfs_btree_ops {
/* size of the key and record structures */
size_t key_len;
size_t rec_len;
/* Type of btree - AG-rooted or inode-rooted */
enum xfs_btree_type type;
/* XFS_BTGEO_* flags that determine the geometry of the btree */
unsigned int geom_flags;
/* size of the key, pointer, and record structures */
size_t key_len;
size_t ptr_len;
size_t rec_len;
/* LRU refcount to set on each btree buffer created */
unsigned int lru_refs;
/* offset of btree stats array */
unsigned int statoff;
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
......@@ -199,6 +223,10 @@ struct xfs_btree_ops {
const union xfs_btree_key *mask);
};
/* btree geometry flags */
#define XFS_BTGEO_LASTREC_UPDATE (1U << 0) /* track last rec externally */
#define XFS_BTGEO_OVERLAPPING (1U << 1) /* overlapping intervals */
/*
* Reasons for the update_lastrec method to be called.
*/
......@@ -215,39 +243,6 @@ union xfs_btree_irec {
struct xfs_refcount_irec rc;
};
/* Per-AG btree information. */
struct xfs_btree_cur_ag {
struct xfs_perag *pag;
union {
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
};
union {
struct {
unsigned int nr_ops; /* # record updates */
unsigned int shape_changes; /* # of extent splits */
} refc;
struct {
bool active; /* allocation cursor state */
} abt;
};
};
/* Btree-in-inode cursor information */
struct xfs_btree_cur_ino {
struct xfs_inode *ip;
struct xbtree_ifakeroot *ifake; /* for staging cursor */
int allocated;
short forksize;
char whichfork;
char flags;
/* We are converting a delalloc reservation */
#define XFS_BTCUR_BMBT_WASDEL (1 << 0)
/* For extent swap, ignore owner check in verifier */
#define XFS_BTCUR_BMBT_INVALID_OWNER (1 << 1)
};
struct xfs_btree_level {
/* buffer pointer */
struct xfs_buf *bp;
......@@ -276,17 +271,31 @@ struct xfs_btree_cur
union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */
int bc_statoff; /* offset of btree stats array */
/*
* Short btree pointers need an agno to be able to turn the pointers
* into physical addresses for IO, so the btree cursor switches between
* bc_ino and bc_ag based on whether XFS_BTREE_LONG_PTRS is set for the
* cursor.
*/
/* per-type information */
union {
struct xfs_btree_cur_ag bc_ag;
struct xfs_btree_cur_ino bc_ino;
struct {
struct xfs_inode *ip;
short forksize;
char whichfork;
struct xbtree_ifakeroot *ifake; /* for staging cursor */
} bc_ino;
struct {
struct xfs_perag *pag;
struct xfs_buf *agbp;
struct xbtree_afakeroot *afake; /* for staging cursor */
} bc_ag;
};
/* per-format private data */
union {
struct {
int allocated;
} bc_bmap; /* bmapbt */
struct {
unsigned int nr_ops; /* # record updates */
unsigned int shape_changes; /* # of extent splits */
} bc_refc; /* refcountbt */
};
/* Must be at the end of the struct! */
......@@ -304,18 +313,22 @@ xfs_btree_cur_sizeof(unsigned int nlevels)
return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
}
/* cursor flags */
#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
/* cursor state flags */
/*
* The root of this btree is a fakeroot structure so that we can stage a btree
* rebuild without leaving it accessible via primary metadata. The ops struct
* is dynamically allocated and must be freed when the cursor is deleted.
*/
#define XFS_BTREE_STAGING (1<<5)
#define XFS_BTREE_STAGING (1U << 0)
/* We are converting a delalloc reservation (only for bmbt btrees) */
#define XFS_BTREE_BMBT_WASDEL (1U << 1)
/* For extent swap, ignore owner check in verifier (only for bmbt btrees) */
#define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2)
/* Cursor is active (only for allocbt btrees) */
#define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3)
#define XFS_BTREE_NOERROR 0
#define XFS_BTREE_ERROR 1
......@@ -430,25 +443,12 @@ xfs_btree_reada_bufs(
/*
* Initialise a new btree block header
*/
void
xfs_btree_init_block(
struct xfs_mount *mp,
struct xfs_buf *bp,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner);
void
xfs_btree_init_block_int(
struct xfs_mount *mp,
struct xfs_btree_block *buf,
xfs_daddr_t blkno,
xfs_btnum_t btnum,
__u16 level,
__u16 numrecs,
__u64 owner,
unsigned int flags);
void xfs_btree_init_buf(struct xfs_mount *mp, struct xfs_buf *bp,
const struct xfs_btree_ops *ops, __u16 level, __u16 numrecs,
__u64 owner);
void xfs_btree_init_block(struct xfs_mount *mp,
struct xfs_btree_block *buf, const struct xfs_btree_ops *ops,
__u16 level, __u16 numrecs, __u64 owner);
/*
* Common btree core entry points.
......@@ -690,7 +690,7 @@ xfs_btree_islastblock(
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK);
return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
}
......@@ -720,14 +720,19 @@ xfs_btree_alloc_cursor(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_btnum_t btnum,
const struct xfs_btree_ops *ops,
uint8_t maxlevels,
struct kmem_cache *cache)
{
struct xfs_btree_cur *cur;
ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN ||
ops->ptr_len == XFS_BTREE_SHORT_PTR_LEN);
/* BMBT allocations can come through from non-transactional context. */
cur = kmem_cache_zalloc(cache,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
cur->bc_ops = ops;
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = btnum;
......@@ -742,4 +747,14 @@ void xfs_btree_destroy_cur_caches(void);
int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
/* Does this level of the cursor point to the inode root (and not a block)? */
static inline bool
xfs_btree_at_iroot(
const struct xfs_btree_cur *cur,
int level)
{
return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 1;
}
#endif /* __XFS_BTREE_H__ */
......@@ -136,7 +136,7 @@ xfs_btree_stage_afakeroot(
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE));
ASSERT(cur->bc_ops->type != XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
......@@ -172,6 +172,7 @@ xfs_btree_commit_afakeroot(
trace_xfs_btree_commit_afakeroot(cur);
kfree((void *)cur->bc_ops);
cur->bc_ag.afake = NULL;
cur->bc_ag.agbp = agbp;
cur->bc_ops = ops;
cur->bc_flags &= ~XFS_BTREE_STAGING;
......@@ -217,7 +218,7 @@ xfs_btree_stage_ifakeroot(
struct xfs_btree_ops *nops;
ASSERT(!(cur->bc_flags & XFS_BTREE_STAGING));
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE);
ASSERT(cur->bc_tp == NULL);
nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_KERNEL | __GFP_NOFAIL);
......@@ -397,8 +398,7 @@ xfs_btree_bload_prep_block(
struct xfs_btree_block *new_block;
int ret;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
level == cur->bc_nlevels - 1) {
if (xfs_btree_at_iroot(cur, level)) {
struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur);
size_t new_size;
......@@ -410,10 +410,8 @@ xfs_btree_bload_prep_block(
ifp->if_broot_bytes = (int)new_size;
/* Initialize it and send it out. */
xfs_btree_init_block_int(cur->bc_mp, ifp->if_broot,
XFS_BUF_DADDR_NULL, cur->bc_btnum, level,
nr_this_block, cur->bc_ino.ip->i_ino,
cur->bc_flags);
xfs_btree_init_block(cur->bc_mp, ifp->if_broot, cur->bc_ops,
level, nr_this_block, cur->bc_ino.ip->i_ino);
*bpp = NULL;
*blockp = ifp->if_broot;
......@@ -704,7 +702,7 @@ xfs_btree_bload_compute_geometry(
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &level_blocks, &dontcare64);
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
/*
* If all the items we want to store at this level
* would fit in the inode root block, then we have our
......@@ -763,7 +761,7 @@ xfs_btree_bload_compute_geometry(
return -EOVERFLOW;
bbl->btree_height = cur->bc_nlevels;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
bbl->nr_blocks = nr_blocks - 1;
else
bbl->nr_blocks = nr_blocks;
......@@ -890,7 +888,7 @@ xfs_btree_bload(
}
/* Initialize the new root. */
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) {
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
ASSERT(xfs_btree_ptr_is_null(cur, &ptr));
cur->bc_ino.ifake->if_levels = cur->bc_nlevels;
cur->bc_ino.ifake->if_blocks = total_blocks - 1;
......
......@@ -76,8 +76,7 @@ struct xfs_btree_bload {
/*
* This function should return the size of the in-core btree root
* block. It is only necessary for XFS_BTREE_ROOT_IN_INODE btree
* types.
* block. It is only necessary for XFS_BTREE_TYPE_INODE btrees.
*/
xfs_btree_bload_iroot_size_fn iroot_size;
......
......@@ -398,9 +398,15 @@ xfs_inobt_keys_contiguous(
be32_to_cpu(key2->inobt.ir_startino));
}
static const struct xfs_btree_ops xfs_inobt_ops = {
const struct xfs_btree_ops xfs_inobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_ibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_inobt_set_root,
......@@ -420,9 +426,15 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.keys_contiguous = xfs_inobt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_finobt_ops = {
const struct xfs_btree_ops xfs_finobt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_INO_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_fibt_2),
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
......@@ -452,21 +464,16 @@ xfs_inobt_init_common(
xfs_btnum_t btnum) /* ialloc or free ino btree */
{
struct xfs_mount *mp = pag->pag_mount;
const struct xfs_btree_ops *ops = &xfs_inobt_ops;
struct xfs_btree_cur *cur;
cur = xfs_btree_alloc_cursor(mp, tp, btnum,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
if (btnum == XFS_BTNUM_INO) {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
cur->bc_ops = &xfs_inobt_ops;
} else {
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
cur->bc_ops = &xfs_finobt_ops;
}
ASSERT(btnum == XFS_BTNUM_INO || btnum == XFS_BTNUM_FINO);
if (xfs_has_crc(mp))
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
if (btnum == XFS_BTNUM_FINO)
ops = &xfs_finobt_ops;
cur = xfs_btree_alloc_cursor(mp, tp, btnum, ops,
M_IGEO(mp)->inobt_maxlevels, xfs_inobt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
......
......@@ -1077,7 +1077,7 @@ xfs_refcount_still_have_space(
* to handle each of the shape changes to the refcount btree.
*/
overhead = xfs_allocfree_block_count(cur->bc_mp,
cur->bc_ag.refc.shape_changes);
cur->bc_refc.shape_changes);
overhead += cur->bc_mp->m_refc_maxlevels;
overhead *= cur->bc_mp->m_sb.sb_blocksize;
......@@ -1085,17 +1085,17 @@ xfs_refcount_still_have_space(
* Only allow 2 refcount extent updates per transaction if the
* refcount continue update "error" has been injected.
*/
if (cur->bc_ag.refc.nr_ops > 2 &&
if (cur->bc_refc.nr_ops > 2 &&
XFS_TEST_ERROR(false, cur->bc_mp,
XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
return false;
if (cur->bc_ag.refc.nr_ops == 0)
if (cur->bc_refc.nr_ops == 0)
return true;
else if (overhead > cur->bc_tp->t_log_res)
return false;
return cur->bc_tp->t_log_res - overhead >
cur->bc_ag.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
return cur->bc_tp->t_log_res - overhead >
cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
}
/*
......@@ -1155,7 +1155,7 @@ xfs_refcount_adjust_extents(
* Either cover the hole (increment) or
* delete the range (decrement).
*/
cur->bc_ag.refc.nr_ops++;
cur->bc_refc.nr_ops++;
if (tmp.rc_refcount) {
error = xfs_refcount_insert(cur, &tmp,
&found_tmp);
......@@ -1216,7 +1216,7 @@ xfs_refcount_adjust_extents(
ext.rc_refcount += adj;
trace_xfs_refcount_modify_extent(cur->bc_mp,
cur->bc_ag.pag->pag_agno, &ext);
cur->bc_ag.refc.nr_ops++;
cur->bc_refc.nr_ops++;
if (ext.rc_refcount > 1) {
error = xfs_refcount_update(cur, &ext);
if (error)
......@@ -1305,7 +1305,7 @@ xfs_refcount_adjust(
if (shape_changed)
shape_changes++;
if (shape_changes)
cur->bc_ag.refc.shape_changes++;
cur->bc_refc.shape_changes++;
/* Now that we've taken care of the ends, adjust the middle extents */
error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj);
......@@ -1400,8 +1400,8 @@ xfs_refcount_finish_one(
*/
rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
nr_ops = rcur->bc_ag.refc.nr_ops;
shape_changes = rcur->bc_ag.refc.shape_changes;
nr_ops = rcur->bc_refc.nr_ops;
shape_changes = rcur->bc_refc.shape_changes;
xfs_refcount_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
......@@ -1413,8 +1413,8 @@ xfs_refcount_finish_one(
return error;
rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, ri->ri_pag);
rcur->bc_ag.refc.nr_ops = nr_ops;
rcur->bc_ag.refc.shape_changes = shape_changes;
rcur->bc_refc.nr_ops = nr_ops;
rcur->bc_refc.shape_changes = shape_changes;
}
*pcur = rcur;
......
......@@ -77,8 +77,6 @@ xfs_refcountbt_alloc_block(
xfs_refc_block(args.mp)));
if (error)
goto out_error;
trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
......@@ -107,8 +105,6 @@ xfs_refcountbt_free_block(
struct xfs_agf *agf = agbp->b_addr;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
return xfs_free_extent_later(cur->bc_tp, fsbno, 1,
......@@ -321,9 +317,15 @@ xfs_refcountbt_keys_contiguous(
be32_to_cpu(key2->refc.rc_startblock));
}
static const struct xfs_btree_ops xfs_refcountbt_ops = {
const struct xfs_btree_ops xfs_refcountbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.rec_len = sizeof(struct xfs_refcount_rec),
.key_len = sizeof(struct xfs_refcount_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_REFC_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2),
.dup_cursor = xfs_refcountbt_dup_cursor,
.set_root = xfs_refcountbt_set_root,
......@@ -357,15 +359,11 @@ xfs_refcountbt_init_common(
ASSERT(pag->pag_agno < mp->m_sb.sb_agcount);
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_REFC,
mp->m_refc_maxlevels, xfs_refcountbt_cur_cache);
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
&xfs_refcountbt_ops, mp->m_refc_maxlevels,
xfs_refcountbt_cur_cache);
cur->bc_ag.pag = xfs_perag_hold(pag);
cur->bc_ag.refc.nr_ops = 0;
cur->bc_ag.refc.shape_changes = 0;
cur->bc_ops = &xfs_refcountbt_ops;
cur->bc_refc.nr_ops = 0;
cur->bc_refc.shape_changes = 0;
return cur;
}
......
......@@ -94,8 +94,6 @@ xfs_rmapbt_alloc_block(
&bno, 1);
if (error)
return error;
trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1);
if (bno == NULLAGBLOCK) {
*stat = 0;
return 0;
......@@ -125,8 +123,6 @@ xfs_rmapbt_free_block(
int error;
bno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp));
trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno,
bno, 1);
be32_add_cpu(&agf->agf_rmap_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
error = xfs_alloc_put_freelist(pag, cur->bc_tp, agbp, NULL, bno, 1);
......@@ -476,9 +472,16 @@ xfs_rmapbt_keys_contiguous(
be32_to_cpu(key2->rmap.rm_startblock));
}
static const struct xfs_btree_ops xfs_rmapbt_ops = {
const struct xfs_btree_ops xfs_rmapbt_ops = {
.type = XFS_BTREE_TYPE_AG,
.geom_flags = XFS_BTGEO_OVERLAPPING,
.rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key),
.ptr_len = XFS_BTREE_SHORT_PTR_LEN,
.lru_refs = XFS_RMAP_BTREE_REF,
.statoff = XFS_STATS_CALC_INDEX(xs_rmap_2),
.dup_cursor = xfs_rmapbt_dup_cursor,
.set_root = xfs_rmapbt_set_root,
......@@ -507,12 +510,8 @@ xfs_rmapbt_init_common(
struct xfs_btree_cur *cur;
/* Overlapping btree; 2 keys per pointer. */
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP,
cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_RMAP, &xfs_rmapbt_ops,
mp->m_rmap_maxlevels, xfs_rmapbt_cur_cache);
cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
cur->bc_ops = &xfs_rmapbt_ops;
cur->bc_ag.pag = xfs_perag_hold(pag);
return cur;
}
......
......@@ -43,6 +43,15 @@ extern const struct xfs_buf_ops xfs_sb_buf_ops;
extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
extern const struct xfs_buf_ops xfs_symlink_buf_ops;
/* btree ops */
extern const struct xfs_btree_ops xfs_bnobt_ops;
extern const struct xfs_btree_ops xfs_cntbt_ops;
extern const struct xfs_btree_ops xfs_inobt_ops;
extern const struct xfs_btree_ops xfs_finobt_ops;
extern const struct xfs_btree_ops xfs_bmbt_ops;
extern const struct xfs_btree_ops xfs_refcountbt_ops;
extern const struct xfs_btree_ops xfs_rmapbt_ops;
/* log size calculation functions */
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
......
......@@ -47,7 +47,7 @@ __xchk_btree_process_error(
*error = 0;
fallthrough;
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_op_error(sc, cur, level,
*error, ret_ip);
else
......@@ -91,7 +91,7 @@ __xchk_btree_set_corrupt(
{
sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
trace_xchk_ifork_btree_error(sc, cur, level,
ret_ip);
else
......@@ -168,7 +168,7 @@ xchk_btree_rec(
if (xfs_btree_keycmp_lt(cur, &key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Is high_key(rec) no larger than the parent high key? */
......@@ -215,7 +215,7 @@ xchk_btree_key(
if (xfs_btree_keycmp_lt(cur, key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Is this block's high key no larger than the parent high key? */
......@@ -239,12 +239,12 @@ xchk_btree_ptr_ok(
bool res;
/* A btree rooted in an inode has no block pointer to the root. */
if ((bs->cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
if (bs->cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == bs->cur->bc_nlevels)
return true;
/* Otherwise, check the pointers. */
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (bs->cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
res = xfs_btree_check_lptr(bs->cur, be64_to_cpu(ptr->l), level);
else
res = xfs_btree_check_sptr(bs->cur, be32_to_cpu(ptr->s), level);
......@@ -385,7 +385,12 @@ xchk_btree_check_block_owner(
agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
/*
* If the btree being examined is not itself a per-AG btree, initialize
* sc->sa so that we can check for the presence of an ownership record
* in the rmap btree for the AG containing the block.
*/
init_sa = bs->cur->bc_ops->type != XFS_BTREE_TYPE_AG;
if (init_sa) {
error = xchk_ag_init_existing(bs->sc, agno, &bs->sc->sa);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur,
......@@ -429,7 +434,7 @@ xchk_btree_check_owner(
* up.
*/
if (bp == NULL) {
if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
if (cur->bc_ops->type != XFS_BTREE_TYPE_INODE)
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
......@@ -508,7 +513,7 @@ xchk_btree_check_minrecs(
* child block might be less than the standard minrecs, but that's ok
* provided that there's only one direct child of the root.
*/
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 2) {
struct xfs_btree_block *root_block;
struct xfs_buf *root_bp;
......@@ -562,7 +567,7 @@ xchk_btree_block_check_keys(
return;
}
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Make sure the high key of this block matches the parent. */
......@@ -597,7 +602,7 @@ xchk_btree_get_block(
return error;
xfs_btree_get_block(bs->cur, level, pbp);
if (bs->cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (bs->cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
failed_at = __xfs_btree_check_lblock(bs->cur, *pblock,
level, *pbp);
else
......@@ -664,7 +669,7 @@ xchk_btree_block_keys(
if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
if (!(cur->bc_ops->geom_flags & XFS_BTGEO_OVERLAPPING))
return;
/* Get high keys */
......
......@@ -535,7 +535,7 @@ xrep_newbt_claim_block(
trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
xnr->oinfo.oi_owner);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN)
ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
agbno));
else
......
......@@ -37,7 +37,7 @@ xchk_btree_cur_fsbno(
xfs_buf_daddr(cur->bc_levels[level].bp));
if (level == cur->bc_nlevels - 1 &&
(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
return XFS_INO_TO_FSB(cur->bc_mp, cur->bc_ino.ip->i_ino);
return NULLFSBLOCK;
......
......@@ -2496,6 +2496,83 @@ DEFINE_EVENT(xfs_btree_cur_class, name, \
DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
TRACE_EVENT(xfs_btree_alloc_block,
TP_PROTO(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int stat,
int error),
TP_ARGS(cur, ptr, stat, error),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__field(int, error)
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) {
__entry->agno = 0;
__entry->ino = cur->bc_ino.ip->i_ino;
} else {
__entry->agno = cur->bc_ag.pag->pag_agno;
__entry->ino = 0;
}
__entry->btnum = cur->bc_btnum;
__entry->error = error;
if (!error && stat) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp,
fsb);
__entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp,
fsb);
} else {
__entry->agbno = be32_to_cpu(ptr->s);
}
} else {
__entry->agbno = NULLAGBLOCK;
}
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->agno,
__entry->ino,
__entry->agbno,
__entry->error)
);
TRACE_EVENT(xfs_btree_free_block,
TP_PROTO(struct xfs_btree_cur *cur, struct xfs_buf *bp),
TP_ARGS(cur, bp),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_ino_t, ino)
__field(xfs_btnum_t, btnum)
__field(xfs_agblock_t, agbno)
),
TP_fast_assign(
__entry->dev = cur->bc_mp->m_super->s_dev;
__entry->agno = xfs_daddr_to_agno(cur->bc_mp,
xfs_buf_daddr(bp));
if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE)
__entry->ino = cur->bc_ino.ip->i_ino;
else
__entry->ino = 0;
__entry->btnum = cur->bc_btnum;
__entry->agbno = xfs_daddr_to_agbno(cur->bc_mp,
xfs_buf_daddr(bp));
),
TP_printk("dev %d:%d btree %s agno 0x%x ino 0x%llx agbno 0x%x",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->agno,
__entry->ino,
__entry->agbno)
);
/* deferred ops */
struct xfs_defer_pending;
......@@ -2858,8 +2935,6 @@ DEFINE_EVENT(xfs_rmapbt_class, name, \
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
DEFINE_RMAPBT_EVENT(xfs_rmap_update);
DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
......@@ -3217,8 +3292,6 @@ DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \
TP_ARGS(mp, agno, i1, i2, i3))
/* refcount btree tracepoints */
DEFINE_BUSY_EVENT(xfs_refcountbt_alloc_block);
DEFINE_BUSY_EVENT(xfs_refcountbt_free_block);
DEFINE_AG_BTREE_LOOKUP_EVENT(xfs_refcount_lookup);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get);
DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update);
......@@ -4189,7 +4262,7 @@ TRACE_EVENT(xfs_btree_bload_block,
__entry->level = level;
__entry->block_idx = block_idx;
__entry->nr_blocks = nr_blocks;
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) {
xfs_fsblock_t fsb = be64_to_cpu(ptr->l);
__entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment