Commit 83dcdb44 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: precalculate inodes and blocks per inode cluster

Store the number of inodes and blocks per inode cluster in the mount
data so that we don't have to keep recalculating them.
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: default avatarBrian Foster <bfoster@redhat.com>
parent 43004b2a
...@@ -288,7 +288,7 @@ xfs_ialloc_inode_init( ...@@ -288,7 +288,7 @@ xfs_ialloc_inode_init(
{ {
struct xfs_buf *fbuf; struct xfs_buf *fbuf;
struct xfs_dinode *free; struct xfs_dinode *free;
int nbufs, blks_per_cluster, inodes_per_cluster; int nbufs;
int version; int version;
int i, j; int i, j;
xfs_daddr_t d; xfs_daddr_t d;
...@@ -299,9 +299,7 @@ xfs_ialloc_inode_init( ...@@ -299,9 +299,7 @@ xfs_ialloc_inode_init(
* sizes, manipulate the inodes in buffers which are multiples of the * sizes, manipulate the inodes in buffers which are multiples of the
* blocks size. * blocks size.
*/ */
blks_per_cluster = xfs_icluster_size_fsb(mp); nbufs = length / mp->m_blocks_per_cluster;
inodes_per_cluster = XFS_FSB_TO_INO(mp, blks_per_cluster);
nbufs = length / blks_per_cluster;
/* /*
* Figure out what version number to use in the inodes we create. If * Figure out what version number to use in the inodes we create. If
...@@ -344,9 +342,10 @@ xfs_ialloc_inode_init( ...@@ -344,9 +342,10 @@ xfs_ialloc_inode_init(
/* /*
* Get the block. * Get the block.
*/ */
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); d = XFS_AGB_TO_DADDR(mp, agno, agbno +
(j * mp->m_blocks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster, mp->m_bsize * mp->m_blocks_per_cluster,
XBF_UNMAPPED); XBF_UNMAPPED);
if (!fbuf) if (!fbuf)
return -ENOMEM; return -ENOMEM;
...@@ -354,7 +353,7 @@ xfs_ialloc_inode_init( ...@@ -354,7 +353,7 @@ xfs_ialloc_inode_init(
/* Initialize the inode buffers and log them appropriately. */ /* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops; fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
for (i = 0; i < inodes_per_cluster; i++) { for (i = 0; i < mp->m_inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog; int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version); uint isize = xfs_dinode_size(version);
...@@ -2289,7 +2288,6 @@ xfs_imap( ...@@ -2289,7 +2288,6 @@ xfs_imap(
xfs_agblock_t agbno; /* block number of inode in the alloc group */ xfs_agblock_t agbno; /* block number of inode in the alloc group */
xfs_agino_t agino; /* inode number within alloc group */ xfs_agino_t agino; /* inode number within alloc group */
xfs_agnumber_t agno; /* allocation group number */ xfs_agnumber_t agno; /* allocation group number */
int blks_per_cluster; /* num blocks per inode cluster */
xfs_agblock_t chunk_agbno; /* first block in inode chunk */ xfs_agblock_t chunk_agbno; /* first block in inode chunk */
xfs_agblock_t cluster_agbno; /* first block in inode cluster */ xfs_agblock_t cluster_agbno; /* first block in inode cluster */
int error; /* error code */ int error; /* error code */
...@@ -2335,8 +2333,6 @@ xfs_imap( ...@@ -2335,8 +2333,6 @@ xfs_imap(
return -EINVAL; return -EINVAL;
} }
blks_per_cluster = xfs_icluster_size_fsb(mp);
/* /*
* For bulkstat and handle lookups, we have an untrusted inode number * For bulkstat and handle lookups, we have an untrusted inode number
* that we have to verify is valid. We cannot do this just by reading * that we have to verify is valid. We cannot do this just by reading
...@@ -2356,7 +2352,7 @@ xfs_imap( ...@@ -2356,7 +2352,7 @@ xfs_imap(
* If the inode cluster size is the same as the blocksize or * If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics. * smaller we get to the buffer by simple arithmetics.
*/ */
if (blks_per_cluster == 1) { if (mp->m_blocks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino); offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock); ASSERT(offset < mp->m_sb.sb_inopblock);
...@@ -2385,12 +2381,13 @@ xfs_imap( ...@@ -2385,12 +2381,13 @@ xfs_imap(
out_map: out_map:
ASSERT(agbno >= chunk_agbno); ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno + cluster_agbno = chunk_agbno +
((offset_agbno / blks_per_cluster) * blks_per_cluster); ((offset_agbno / mp->m_blocks_per_cluster) *
mp->m_blocks_per_cluster);
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino); XFS_INO_TO_OFFSET(mp, ino);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno); imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); imap->im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog); imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
/* /*
......
...@@ -193,18 +193,16 @@ xchk_iallocbt_check_freemask( ...@@ -193,18 +193,16 @@ xchk_iallocbt_check_freemask(
xfs_agino_t chunkino; xfs_agino_t chunkino;
xfs_agino_t clusterino; xfs_agino_t clusterino;
xfs_agblock_t agbno; xfs_agblock_t agbno;
int blks_per_cluster;
uint16_t holemask; uint16_t holemask;
uint16_t ir_holemask; uint16_t ir_holemask;
int error = 0; int error = 0;
/* Make sure the freemask matches the inode records. */ /* Make sure the freemask matches the inode records. */
blks_per_cluster = xfs_icluster_size_fsb(mp); nr_inodes = mp->m_inodes_per_cluster;
nr_inodes = XFS_FSB_TO_INO(mp, blks_per_cluster);
for (agino = irec->ir_startino; for (agino = irec->ir_startino;
agino < irec->ir_startino + XFS_INODES_PER_CHUNK; agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
agino += blks_per_cluster * mp->m_sb.sb_inopblock) { agino += mp->m_inodes_per_cluster) {
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino); fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
chunkino = agino - irec->ir_startino; chunkino = agino - irec->ir_startino;
agbno = XFS_AGINO_TO_AGBNO(mp, agino); agbno = XFS_AGINO_TO_AGBNO(mp, agino);
...@@ -225,18 +223,18 @@ xchk_iallocbt_check_freemask( ...@@ -225,18 +223,18 @@ xchk_iallocbt_check_freemask(
/* If any part of this is a hole, skip it. */ /* If any part of this is a hole, skip it. */
if (ir_holemask) { if (ir_holemask) {
xchk_xref_is_not_owned_by(bs->sc, agbno, xchk_xref_is_not_owned_by(bs->sc, agbno,
blks_per_cluster, mp->m_blocks_per_cluster,
&XFS_RMAP_OINFO_INODES); &XFS_RMAP_OINFO_INODES);
continue; continue;
} }
xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster, xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
&XFS_RMAP_OINFO_INODES); &XFS_RMAP_OINFO_INODES);
/* Grab the inode cluster buffer. */ /* Grab the inode cluster buffer. */
imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
agbno); agbno);
imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
imap.im_boffset = 0; imap.im_boffset = 0;
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
...@@ -303,7 +301,7 @@ xchk_iallocbt_rec( ...@@ -303,7 +301,7 @@ xchk_iallocbt_rec(
/* Make sure this record is aligned to cluster and inoalignmnt size. */ /* Make sure this record is aligned to cluster and inoalignmnt size. */
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) || if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
(agbno & (xfs_icluster_size_fsb(mp) - 1))) (agbno & (mp->m_blocks_per_cluster - 1)))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
*inode_blocks += XFS_B_TO_FSB(mp, *inode_blocks += XFS_B_TO_FSB(mp,
......
...@@ -2184,8 +2184,6 @@ xfs_ifree_cluster( ...@@ -2184,8 +2184,6 @@ xfs_ifree_cluster(
struct xfs_icluster *xic) struct xfs_icluster *xic)
{ {
xfs_mount_t *mp = free_ip->i_mount; xfs_mount_t *mp = free_ip->i_mount;
int blks_per_cluster;
int inodes_per_cluster;
int nbufs; int nbufs;
int i, j; int i, j;
int ioffset; int ioffset;
...@@ -2199,11 +2197,9 @@ xfs_ifree_cluster( ...@@ -2199,11 +2197,9 @@ xfs_ifree_cluster(
inum = xic->first_ino; inum = xic->first_ino;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
blks_per_cluster = xfs_icluster_size_fsb(mp); nbufs = mp->m_ialloc_blks / mp->m_blocks_per_cluster;
inodes_per_cluster = XFS_FSB_TO_INO(mp, blks_per_cluster);
nbufs = mp->m_ialloc_blks / blks_per_cluster;
for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) { for (j = 0; j < nbufs; j++, inum += mp->m_inodes_per_cluster) {
/* /*
* The allocation bitmap tells us which inodes of the chunk were * The allocation bitmap tells us which inodes of the chunk were
* physically allocated. Skip the cluster if an inode falls into * physically allocated. Skip the cluster if an inode falls into
...@@ -2211,7 +2207,7 @@ xfs_ifree_cluster( ...@@ -2211,7 +2207,7 @@ xfs_ifree_cluster(
*/ */
ioffset = inum - xic->first_ino; ioffset = inum - xic->first_ino;
if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
ASSERT(ioffset % inodes_per_cluster == 0); ASSERT(ioffset % mp->m_inodes_per_cluster == 0);
continue; continue;
} }
...@@ -2227,7 +2223,7 @@ xfs_ifree_cluster( ...@@ -2227,7 +2223,7 @@ xfs_ifree_cluster(
* to mark all the active inodes on the buffer stale. * to mark all the active inodes on the buffer stale.
*/ */
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
mp->m_bsize * blks_per_cluster, mp->m_bsize * mp->m_blocks_per_cluster,
XBF_UNMAPPED); XBF_UNMAPPED);
if (!bp) if (!bp)
...@@ -2274,7 +2270,7 @@ xfs_ifree_cluster( ...@@ -2274,7 +2270,7 @@ xfs_ifree_cluster(
* transaction stale above, which means there is no point in * transaction stale above, which means there is no point in
* even trying to lock them. * even trying to lock them.
*/ */
for (i = 0; i < inodes_per_cluster; i++) { for (i = 0; i < mp->m_inodes_per_cluster; i++) {
retry: retry:
rcu_read_lock(); rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, ip = radix_tree_lookup(&pag->pag_ici_root,
......
...@@ -167,19 +167,17 @@ xfs_bulkstat_ichunk_ra( ...@@ -167,19 +167,17 @@ xfs_bulkstat_ichunk_ra(
{ {
xfs_agblock_t agbno; xfs_agblock_t agbno;
struct blk_plug plug; struct blk_plug plug;
int blks_per_cluster;
int inodes_per_cluster;
int i; /* inode chunk index */ int i; /* inode chunk index */
agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
blks_per_cluster = xfs_icluster_size_fsb(mp);
inodes_per_cluster = XFS_FSB_TO_INO(mp, blks_per_cluster);
blk_start_plug(&plug); blk_start_plug(&plug);
for (i = 0; i < XFS_INODES_PER_CHUNK; for (i = 0; i < XFS_INODES_PER_CHUNK;
i += inodes_per_cluster, agbno += blks_per_cluster) { i += mp->m_inodes_per_cluster, agbno += mp->m_blocks_per_cluster) {
if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { if (xfs_inobt_maskn(i, mp->m_inodes_per_cluster) &
xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, ~irec->ir_free) {
xfs_btree_reada_bufs(mp, agno, agbno,
mp->m_blocks_per_cluster,
&xfs_inode_buf_ops); &xfs_inode_buf_ops);
} }
} }
......
...@@ -3850,7 +3850,6 @@ xlog_recover_do_icreate_pass2( ...@@ -3850,7 +3850,6 @@ xlog_recover_do_icreate_pass2(
unsigned int count; unsigned int count;
unsigned int isize; unsigned int isize;
xfs_agblock_t length; xfs_agblock_t length;
int blks_per_cluster;
int bb_per_cluster; int bb_per_cluster;
int cancel_count; int cancel_count;
int nbufs; int nbufs;
...@@ -3918,14 +3917,13 @@ xlog_recover_do_icreate_pass2( ...@@ -3918,14 +3917,13 @@ xlog_recover_do_icreate_pass2(
* buffers for cancellation so we don't overwrite anything written after * buffers for cancellation so we don't overwrite anything written after
* a cancellation. * a cancellation.
*/ */
blks_per_cluster = xfs_icluster_size_fsb(mp); bb_per_cluster = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster); nbufs = length / mp->m_blocks_per_cluster;
nbufs = length / blks_per_cluster;
for (i = 0, cancel_count = 0; i < nbufs; i++) { for (i = 0, cancel_count = 0; i < nbufs; i++) {
xfs_daddr_t daddr; xfs_daddr_t daddr;
daddr = XFS_AGB_TO_DADDR(mp, agno, daddr = XFS_AGB_TO_DADDR(mp, agno,
agbno + i * blks_per_cluster); agbno + i * mp->m_blocks_per_cluster);
if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0)) if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
cancel_count++; cancel_count++;
} }
......
...@@ -798,6 +798,8 @@ xfs_mountfs( ...@@ -798,6 +798,8 @@ xfs_mountfs(
if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size)) if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
mp->m_inode_cluster_size = new_size; mp->m_inode_cluster_size = new_size;
} }
mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp);
mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster);
/* /*
* If enabled, sparse inode chunk alignment is expected to match the * If enabled, sparse inode chunk alignment is expected to match the
......
...@@ -101,6 +101,8 @@ typedef struct xfs_mount { ...@@ -101,6 +101,8 @@ typedef struct xfs_mount {
uint8_t m_agno_log; /* log #ag's */ uint8_t m_agno_log; /* log #ag's */
uint8_t m_agino_log; /* #bits for agino in inum */ uint8_t m_agino_log; /* #bits for agino in inum */
uint m_inode_cluster_size;/* min inode buf size */ uint m_inode_cluster_size;/* min inode buf size */
unsigned int m_inodes_per_cluster;
unsigned int m_blocks_per_cluster;
uint m_blockmask; /* sb_blocksize-1 */ uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */ uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */ uint m_blockwmask; /* blockwsize-1 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment