Commit 561f7d17 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Lachlan McIlroy

[XFS] split up xfs_btree_init_cursor

xfs_btree_init_cursor contains close to little shared code for the
different btrees and will get even more non-common code in the future.
Split it up into one routine per btree type.

Because xfs_btree_dup_cursor needs to call the init routine for a generic
btree cursor add a new btree operation vector that contains a dup_cursor
method that initializes a new cursor based on an existing one.

The btree operations vector is based on an idea and code from Dave Chinner
and will grow more entries later during this series.

SGI-PV: 985583

SGI-Modid: xfs-linux-melb:xfs-kern:32176a
Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLachlan McIlroy <lachlan@sgi.com>
Signed-off-by: default avatarBill O'Donnell <billodo@sgi.com>
Signed-off-by: default avatarDavid Chinner <david@fromorbit.com>
parent f2277f06
...@@ -640,8 +640,8 @@ xfs_alloc_ag_vextent_exact( ...@@ -640,8 +640,8 @@ xfs_alloc_ag_vextent_exact(
/* /*
* Allocate/initialize a cursor for the by-number freespace btree. * Allocate/initialize a cursor for the by-number freespace btree.
*/ */
bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
/* /*
* Lookup bno and minlen in the btree (minlen is irrelevant, really). * Lookup bno and minlen in the btree (minlen is irrelevant, really).
* Look for the closest free block <= bno, it must contain bno * Look for the closest free block <= bno, it must contain bno
...@@ -696,8 +696,8 @@ xfs_alloc_ag_vextent_exact( ...@@ -696,8 +696,8 @@ xfs_alloc_ag_vextent_exact(
* We are allocating agbno for rlen [agbno .. end] * We are allocating agbno for rlen [agbno .. end]
* Allocate/initialize a cursor for the by-size btree. * Allocate/initialize a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <= ASSERT(args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
...@@ -759,8 +759,8 @@ xfs_alloc_ag_vextent_near( ...@@ -759,8 +759,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Get a cursor for the by-size btree. * Get a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
ltlen = 0; ltlen = 0;
bno_cur_lt = bno_cur_gt = NULL; bno_cur_lt = bno_cur_gt = NULL;
/* /*
...@@ -886,8 +886,8 @@ xfs_alloc_ag_vextent_near( ...@@ -886,8 +886,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Set up a cursor for the by-bno tree. * Set up a cursor for the by-bno tree.
*/ */
bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0); args->agbp, args->agno, XFS_BTNUM_BNO);
/* /*
* Fix up the btree entries. * Fix up the btree entries.
*/ */
...@@ -914,8 +914,8 @@ xfs_alloc_ag_vextent_near( ...@@ -914,8 +914,8 @@ xfs_alloc_ag_vextent_near(
/* /*
* Allocate and initialize the cursor for the leftward search. * Allocate and initialize the cursor for the leftward search.
*/ */
bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
/* /*
* Lookup <= bno to find the leftward search's starting point. * Lookup <= bno to find the leftward search's starting point.
*/ */
...@@ -1267,8 +1267,8 @@ xfs_alloc_ag_vextent_size( ...@@ -1267,8 +1267,8 @@ xfs_alloc_ag_vextent_size(
/* /*
* Allocate and initialize a cursor for the by-size btree. * Allocate and initialize a cursor for the by-size btree.
*/ */
cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT, NULL, 0); args->agno, XFS_BTNUM_CNT);
bno_cur = NULL; bno_cur = NULL;
/* /*
* Look for an entry >= maxlen+alignment-1 blocks. * Look for an entry >= maxlen+alignment-1 blocks.
...@@ -1372,8 +1372,8 @@ xfs_alloc_ag_vextent_size( ...@@ -1372,8 +1372,8 @@ xfs_alloc_ag_vextent_size(
/* /*
* Allocate and initialize a cursor for the by-block tree. * Allocate and initialize a cursor for the by-block tree.
*/ */
bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO, NULL, 0); args->agno, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK))) rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0; goto error0;
...@@ -1515,8 +1515,7 @@ xfs_free_ag_extent( ...@@ -1515,8 +1515,7 @@ xfs_free_ag_extent(
/* /*
* Allocate and initialize a cursor for the by-block btree. * Allocate and initialize a cursor for the by-block btree.
*/ */
bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL, bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
0);
cnt_cur = NULL; cnt_cur = NULL;
/* /*
* Look for a neighboring block on the left (lower block numbers) * Look for a neighboring block on the left (lower block numbers)
...@@ -1575,8 +1574,7 @@ xfs_free_ag_extent( ...@@ -1575,8 +1574,7 @@ xfs_free_ag_extent(
/* /*
* Now allocate and initialize a cursor for the by-size tree. * Now allocate and initialize a cursor for the by-size tree.
*/ */
cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL, cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
0);
/* /*
* Have both left and right contiguous neighbors. * Have both left and right contiguous neighbors.
* Merge all three into a single free block. * Merge all three into a single free block.
......
...@@ -2209,3 +2209,48 @@ xfs_alloc_update( ...@@ -2209,3 +2209,48 @@ xfs_alloc_update(
} }
return 0; return 0;
} }
STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_private.a.agbp, cur->bc_private.a.agno,
cur->bc_btnum);
}
static const struct xfs_btree_ops xfs_allocbt_ops = {
.dup_cursor = xfs_allocbt_dup_cursor,
};
/*
* Allocate a new allocation btree cursor.
*/
struct xfs_btree_cur * /* new alloc btree cursor */
xfs_allocbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* buffer for agf structure */
xfs_agnumber_t agno, /* allocation group number */
xfs_btnum_t btnum) /* btree identifier */
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_ops = &xfs_allocbt_ops;
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
return cur;
}
...@@ -152,4 +152,9 @@ extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno, ...@@ -152,4 +152,9 @@ extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno, extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len); xfs_extlen_t len);
extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *,
xfs_agnumber_t, xfs_btnum_t);
#endif /* __XFS_ALLOC_BTREE_H__ */ #endif /* __XFS_ALLOC_BTREE_H__ */
...@@ -422,8 +422,7 @@ xfs_bmap_add_attrfork_btree( ...@@ -422,8 +422,7 @@ xfs_bmap_add_attrfork_btree(
if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
*flags |= XFS_ILOG_DBROOT; *flags |= XFS_ILOG_DBROOT;
else { else {
cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
XFS_DATA_FORK);
cur->bc_private.b.flist = flist; cur->bc_private.b.flist = flist;
cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.firstblock = *firstblock;
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
...@@ -3441,8 +3440,7 @@ xfs_bmap_extents_to_btree( ...@@ -3441,8 +3440,7 @@ xfs_bmap_extents_to_btree(
* Need a cursor. Can't allocate until bb_level is filled in. * Need a cursor. Can't allocate until bb_level is filled in.
*/ */
mp = ip->i_mount; mp = ip->i_mount;
cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
whichfork);
cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.firstblock = *firstblock;
cur->bc_private.b.flist = flist; cur->bc_private.b.flist = flist;
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
...@@ -5029,8 +5027,7 @@ xfs_bmapi( ...@@ -5029,8 +5027,7 @@ xfs_bmapi(
if (abno == NULLFSBLOCK) if (abno == NULLFSBLOCK)
break; break;
if ((ifp->if_flags & XFS_IFBROOT) && !cur) { if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(mp, cur = xfs_bmbt_init_cursor(mp, tp,
tp, NULL, 0, XFS_BTNUM_BMAP,
ip, whichfork); ip, whichfork);
cur->bc_private.b.firstblock = cur->bc_private.b.firstblock =
*firstblock; *firstblock;
...@@ -5147,9 +5144,8 @@ xfs_bmapi( ...@@ -5147,9 +5144,8 @@ xfs_bmapi(
*/ */
ASSERT(mval->br_blockcount <= len); ASSERT(mval->br_blockcount <= len);
if ((ifp->if_flags & XFS_IFBROOT) && !cur) { if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(mp, cur = xfs_bmbt_init_cursor(mp,
tp, NULL, 0, XFS_BTNUM_BMAP, tp, ip, whichfork);
ip, whichfork);
cur->bc_private.b.firstblock = cur->bc_private.b.firstblock =
*firstblock; *firstblock;
cur->bc_private.b.flist = flist; cur->bc_private.b.flist = flist;
...@@ -5440,8 +5436,7 @@ xfs_bunmapi( ...@@ -5440,8 +5436,7 @@ xfs_bunmapi(
logflags = 0; logflags = 0;
if (ifp->if_flags & XFS_IFBROOT) { if (ifp->if_flags & XFS_IFBROOT) {
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
whichfork);
cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.firstblock = *firstblock;
cur->bc_private.b.flist = flist; cur->bc_private.b.flist = flist;
cur->bc_private.b.flags = 0; cur->bc_private.b.flags = 0;
......
...@@ -2608,3 +2608,62 @@ xfs_check_nostate_extents( ...@@ -2608,3 +2608,62 @@ xfs_check_nostate_extents(
} }
return 0; return 0;
} }
STATIC struct xfs_btree_cur *
xfs_bmbt_dup_cursor(
struct xfs_btree_cur *cur)
{
struct xfs_btree_cur *new;
new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_private.b.ip, cur->bc_private.b.whichfork);
/*
* Copy the firstblock, flist, and flags values,
* since init cursor doesn't get them.
*/
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
new->bc_private.b.flist = cur->bc_private.b.flist;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
}
static const struct xfs_btree_ops xfs_bmbt_ops = {
.dup_cursor = xfs_bmbt_dup_cursor,
};
/*
* Allocate a new bmap btree cursor.
*/
struct xfs_btree_cur * /* new bmap btree cursor */
xfs_bmbt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_inode *ip, /* inode owning the btree */
int whichfork) /* data or attr fork */
{
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_cur *cur;
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
cur->bc_btnum = XFS_BTNUM_BMAP;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_ops = &xfs_bmbt_ops;
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
cur->bc_private.b.flist = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
return cur;
}
...@@ -24,6 +24,7 @@ struct xfs_btree_cur; ...@@ -24,6 +24,7 @@ struct xfs_btree_cur;
struct xfs_btree_lblock; struct xfs_btree_lblock;
struct xfs_mount; struct xfs_mount;
struct xfs_inode; struct xfs_inode;
struct xfs_trans;
/* /*
* Bmap root header, on-disk form only. * Bmap root header, on-disk form only.
...@@ -300,6 +301,9 @@ extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); ...@@ -300,6 +301,9 @@ extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int);
extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t, extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t,
xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t); xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t);
extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_inode *, int);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __XFS_BMAP_BTREE_H__ */ #endif /* __XFS_BMAP_BTREE_H__ */
...@@ -387,16 +387,17 @@ xfs_btree_dup_cursor( ...@@ -387,16 +387,17 @@ xfs_btree_dup_cursor(
tp = cur->bc_tp; tp = cur->bc_tp;
mp = cur->bc_mp; mp = cur->bc_mp;
/* /*
* Allocate a new cursor like the old one. * Allocate a new cursor like the old one.
*/ */
new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp, new = cur->bc_ops->dup_cursor(cur);
cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip,
cur->bc_private.b.whichfork);
/* /*
* Copy the record currently in the cursor. * Copy the record currently in the cursor.
*/ */
new->bc_rec = cur->bc_rec; new->bc_rec = cur->bc_rec;
/* /*
* For each level current, re-get the buffer and copy the ptr value. * For each level current, re-get the buffer and copy the ptr value.
*/ */
...@@ -416,15 +417,6 @@ xfs_btree_dup_cursor( ...@@ -416,15 +417,6 @@ xfs_btree_dup_cursor(
} else } else
new->bc_bufs[i] = NULL; new->bc_bufs[i] = NULL;
} }
/*
* For bmap btrees, copy the firstblock, flist, and flags values,
* since init cursor doesn't get them.
*/
if (new->bc_btnum == XFS_BTNUM_BMAP) {
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
new->bc_private.b.flist = cur->bc_private.b.flist;
new->bc_private.b.flags = cur->bc_private.b.flags;
}
*ncur = new; *ncur = new;
return 0; return 0;
} }
...@@ -504,97 +496,6 @@ xfs_btree_get_bufs( ...@@ -504,97 +496,6 @@ xfs_btree_get_bufs(
return bp; return bp;
} }
/*
* Allocate a new btree cursor.
* The cursor is either for allocation (A) or bmap (B) or inodes (I).
*/
xfs_btree_cur_t * /* new btree cursor */
xfs_btree_init_cursor(
xfs_mount_t *mp, /* file system mount point */
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *agbp, /* (A only) buffer for agf structure */
/* (I only) buffer for agi structure */
xfs_agnumber_t agno, /* (AI only) allocation group number */
xfs_btnum_t btnum, /* btree identifier */
xfs_inode_t *ip, /* (B only) inode owning the btree */
int whichfork) /* (B only) data or attr fork */
{
xfs_agf_t *agf; /* (A) allocation group freespace */
xfs_agi_t *agi; /* (I) allocation group inodespace */
xfs_btree_cur_t *cur; /* return value */
xfs_ifork_t *ifp; /* (I) inode fork pointer */
int nlevels=0; /* number of levels in the btree */
ASSERT(xfs_btree_cur_zone != NULL);
/*
* Allocate a new cursor.
*/
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
/*
* Deduce the number of btree levels from the arguments.
*/
switch (btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
agf = XFS_BUF_TO_AGF(agbp);
nlevels = be32_to_cpu(agf->agf_levels[btnum]);
break;
case XFS_BTNUM_BMAP:
ifp = XFS_IFORK_PTR(ip, whichfork);
nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
break;
case XFS_BTNUM_INO:
agi = XFS_BUF_TO_AGI(agbp);
nlevels = be32_to_cpu(agi->agi_level);
break;
default:
ASSERT(0);
}
/*
* Fill in the common fields.
*/
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_nlevels = nlevels;
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
/*
* Fill in private fields.
*/
switch (btnum) {
case XFS_BTNUM_BNO:
case XFS_BTNUM_CNT:
/*
* Allocation btree fields.
*/
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
break;
case XFS_BTNUM_INO:
/*
* Inode allocation btree fields.
*/
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
break;
case XFS_BTNUM_BMAP:
/*
* Bmap btree fields.
*/
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
cur->bc_private.b.flist = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
break;
default:
ASSERT(0);
}
return cur;
}
/* /*
* Check for the cursor referring to the last block at the given level. * Check for the cursor referring to the last block at the given level.
*/ */
......
...@@ -131,6 +131,11 @@ extern const __uint32_t xfs_magics[]; ...@@ -131,6 +131,11 @@ extern const __uint32_t xfs_magics[];
#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ #define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
struct xfs_btree_ops {
/* cursor operations */
struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
};
/* /*
* Btree cursor structure. * Btree cursor structure.
* This collects all information needed by the btree code in one place. * This collects all information needed by the btree code in one place.
...@@ -139,6 +144,7 @@ typedef struct xfs_btree_cur ...@@ -139,6 +144,7 @@ typedef struct xfs_btree_cur
{ {
struct xfs_trans *bc_tp; /* transaction we're in, if any */ struct xfs_trans *bc_tp; /* transaction we're in, if any */
struct xfs_mount *bc_mp; /* file system mount struct */ struct xfs_mount *bc_mp; /* file system mount struct */
const struct xfs_btree_ops *bc_ops;
union { union {
xfs_alloc_rec_incore_t a; xfs_alloc_rec_incore_t a;
xfs_bmbt_irec_t b; xfs_bmbt_irec_t b;
...@@ -307,20 +313,6 @@ xfs_btree_get_bufs( ...@@ -307,20 +313,6 @@ xfs_btree_get_bufs(
xfs_agblock_t agbno, /* allocation group block number */ xfs_agblock_t agbno, /* allocation group block number */
uint lock); /* lock flags for get_buf */ uint lock); /* lock flags for get_buf */
/*
* Allocate a new btree cursor.
* The cursor is either for allocation (A) or bmap (B).
*/
xfs_btree_cur_t * /* new btree cursor */
xfs_btree_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* (A only) buffer for agf structure */
xfs_agnumber_t agno, /* (A only) allocation group number */
xfs_btnum_t btnum, /* btree identifier */
struct xfs_inode *ip, /* (B only) inode owning the btree */
int whichfork); /* (B only) data/attr fork */
/* /*
* Check for the cursor referring to the last block at the given level. * Check for the cursor referring to the last block at the given level.
*/ */
......
...@@ -335,8 +335,7 @@ xfs_ialloc_ag_alloc( ...@@ -335,8 +335,7 @@ xfs_ialloc_ag_alloc(
/* /*
* Insert records describing the new inode chunk into the btree. * Insert records describing the new inode chunk into the btree.
*/ */
cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno, cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno);
XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
for (thisino = newino; for (thisino = newino;
thisino < newino + newlen; thisino < newino + newlen;
thisino += XFS_INODES_PER_CHUNK) { thisino += XFS_INODES_PER_CHUNK) {
...@@ -676,8 +675,7 @@ xfs_dialloc( ...@@ -676,8 +675,7 @@ xfs_dialloc(
*/ */
agno = tagno; agno = tagno;
*IO_agbp = NULL; *IO_agbp = NULL;
cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno), cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
/* /*
* If pagino is 0 (this is the root inode allocation) use newino. * If pagino is 0 (this is the root inode allocation) use newino.
* This must work because we've just allocated some. * This must work because we've just allocated some.
...@@ -1022,8 +1020,7 @@ xfs_difree( ...@@ -1022,8 +1020,7 @@ xfs_difree(
/* /*
* Initialize the cursor. * Initialize the cursor.
*/ */
cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
(xfs_inode_t *)0, 0);
#ifdef DEBUG #ifdef DEBUG
if (cur->bc_nlevels == 1) { if (cur->bc_nlevels == 1) {
int freecount = 0; int freecount = 0;
...@@ -1259,8 +1256,7 @@ xfs_dilocate( ...@@ -1259,8 +1256,7 @@ xfs_dilocate(
#endif /* DEBUG */ #endif /* DEBUG */
return error; return error;
} }
cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
(xfs_inode_t *)0, 0);
if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) {
#ifdef DEBUG #ifdef DEBUG
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: "
......
...@@ -2076,3 +2076,44 @@ xfs_inobt_update( ...@@ -2076,3 +2076,44 @@ xfs_inobt_update(
} }
return 0; return 0;
} }
STATIC struct xfs_btree_cur *
xfs_inobt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp,
cur->bc_private.a.agbp, cur->bc_private.a.agno);
}
static const struct xfs_btree_ops xfs_inobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
};
/*
* Allocate a new inode btree cursor.
*/
struct xfs_btree_cur * /* new inode btree cursor */
xfs_inobt_init_cursor(
struct xfs_mount *mp, /* file system mount point */
struct xfs_trans *tp, /* transaction pointer */
struct xfs_buf *agbp, /* buffer for agi structure */
xfs_agnumber_t agno) /* allocation group number */
{
struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
struct xfs_btree_cur *cur;
cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_nlevels = be32_to_cpu(agi->agi_level);
cur->bc_btnum = XFS_BTNUM_INO;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
cur->bc_ops = &xfs_inobt_ops;
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
return cur;
}
...@@ -175,4 +175,8 @@ extern int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, ...@@ -175,4 +175,8 @@ extern int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino,
extern int xfs_inobt_update(struct xfs_btree_cur *cur, xfs_agino_t ino, extern int xfs_inobt_update(struct xfs_btree_cur *cur, xfs_agino_t ino,
__int32_t fcnt, xfs_inofree_t free); __int32_t fcnt, xfs_inofree_t free);
extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
#endif /* __XFS_IALLOC_BTREE_H__ */ #endif /* __XFS_IALLOC_BTREE_H__ */
...@@ -416,8 +416,7 @@ xfs_bulkstat( ...@@ -416,8 +416,7 @@ xfs_bulkstat(
/* /*
* Allocate and initialize a btree cursor for ialloc btree. * Allocate and initialize a btree cursor for ialloc btree.
*/ */
cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
(xfs_inode_t *)0, 0);
irbp = irbuf; irbp = irbuf;
irbufend = irbuf + nirbuf; irbufend = irbuf + nirbuf;
end_of_ag = 0; end_of_ag = 0;
...@@ -842,8 +841,7 @@ xfs_inumbers( ...@@ -842,8 +841,7 @@ xfs_inumbers(
agino = 0; agino = 0;
continue; continue;
} }
cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno);
XFS_BTNUM_INO, (xfs_inode_t *)0, 0);
error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp);
if (error) { if (error) {
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment