Commit 0703a8e1 authored by Dave Chinner's avatar Dave Chinner Committed by Darrick J. Wong

xfs: replace do_mod with native operations

do_mod() is a hold-over from when we have different sizes for file
offsets and and other internal values for 40 bit XFS filesystems.
Hence depending on build flags variables passed to do_mod() could
change size. We no longer support those small format filesystems and
hence everything is of fixed size theses days, even on 32 bit
platforms.

As such, we can convert all the do_mod() callers to platform
optimised modulus operations as defined by linux/math64.h.
Individual conversions depend on the types of variables being used.
Signed-Off-By: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent bb3d48dc
...@@ -2923,7 +2923,7 @@ xfs_bmap_extsize_align( ...@@ -2923,7 +2923,7 @@ xfs_bmap_extsize_align(
* perform this alignment, or if a truncate shot us in the * perform this alignment, or if a truncate shot us in the
* foot. * foot.
*/ */
temp = do_mod(orig_off, extsz); div_u64_rem(orig_off, extsz, &temp);
if (temp) { if (temp) {
align_alen += temp; align_alen += temp;
align_off -= temp; align_off -= temp;
...@@ -3497,15 +3497,17 @@ xfs_bmap_btalloc( ...@@ -3497,15 +3497,17 @@ xfs_bmap_btalloc(
/* apply extent size hints if obtained earlier */ /* apply extent size hints if obtained earlier */
if (align) { if (align) {
args.prod = align; args.prod = align;
if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) div_u64_rem(ap->offset, args.prod, &args.mod);
args.mod = (xfs_extlen_t)(args.prod - args.mod); if (args.mod)
args.mod = args.prod - args.mod;
} else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
args.prod = 1; args.prod = 1;
args.mod = 0; args.mod = 0;
} else { } else {
args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) div_u64_rem(ap->offset, args.prod, &args.mod);
args.mod = (xfs_extlen_t)(args.prod - args.mod); if (args.mod)
args.mod = args.prod - args.mod;
} }
/* /*
* If we are not low on available data blocks, and the * If we are not low on available data blocks, and the
...@@ -4953,13 +4955,15 @@ xfs_bmap_del_extent_real( ...@@ -4953,13 +4955,15 @@ xfs_bmap_del_extent_real(
if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
xfs_fsblock_t bno; xfs_fsblock_t bno;
xfs_filblks_t len; xfs_filblks_t len;
xfs_extlen_t mod;
bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
&mod);
ASSERT(mod == 0);
len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
&mod);
ASSERT(mod == 0);
ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
bno = del->br_startblock;
len = del->br_blockcount;
do_div(bno, mp->m_sb.sb_rextsize);
do_div(len, mp->m_sb.sb_rextsize);
error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
if (error) if (error)
goto done; goto done;
...@@ -5296,9 +5300,12 @@ __xfs_bunmapi( ...@@ -5296,9 +5300,12 @@ __xfs_bunmapi(
del.br_blockcount = max_len; del.br_blockcount = max_len;
} }
if (!isrt)
goto delete;
sum = del.br_startblock + del.br_blockcount; sum = del.br_startblock + del.br_blockcount;
if (isrt && div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
(mod = do_mod(sum, mp->m_sb.sb_rextsize))) { if (mod) {
/* /*
* Realtime extent not lined up at the end. * Realtime extent not lined up at the end.
* The extent could have been split into written * The extent could have been split into written
...@@ -5345,7 +5352,8 @@ __xfs_bunmapi( ...@@ -5345,7 +5352,8 @@ __xfs_bunmapi(
goto error0; goto error0;
goto nodelete; goto nodelete;
} }
if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) { div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
if (mod) {
/* /*
* Realtime extent is lined up at the end but not * Realtime extent is lined up at the end but not
* at the front. We'll get rid of full extents if * at the front. We'll get rid of full extents if
...@@ -5414,6 +5422,7 @@ __xfs_bunmapi( ...@@ -5414,6 +5422,7 @@ __xfs_bunmapi(
} }
} }
delete:
if (wasdel) { if (wasdel) {
error = xfs_bmap_del_extent_delay(ip, whichfork, &icur, error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
&got, &del); &got, &del);
......
...@@ -80,6 +80,7 @@ xfs_bmap_rtalloc( ...@@ -80,6 +80,7 @@ xfs_bmap_rtalloc(
int error; /* error return value */ int error; /* error return value */
xfs_mount_t *mp; /* mount point structure */ xfs_mount_t *mp; /* mount point structure */
xfs_extlen_t prod = 0; /* product factor for allocators */ xfs_extlen_t prod = 0; /* product factor for allocators */
xfs_extlen_t mod = 0; /* product factor for allocators */
xfs_extlen_t ralen = 0; /* realtime allocation length */ xfs_extlen_t ralen = 0; /* realtime allocation length */
xfs_extlen_t align; /* minimum allocation alignment */ xfs_extlen_t align; /* minimum allocation alignment */
xfs_rtblock_t rtb; xfs_rtblock_t rtb;
...@@ -99,7 +100,8 @@ xfs_bmap_rtalloc( ...@@ -99,7 +100,8 @@ xfs_bmap_rtalloc(
* If the offset & length are not perfectly aligned * If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble. * then kill prod, it will just get us in trouble.
*/ */
if (do_mod(ap->offset, align) || ap->length % align) div_u64_rem(ap->offset, align, &mod);
if (mod || ap->length % align)
prod = 1; prod = 1;
/* /*
* Set ralen to be the actual requested length in rtextents. * Set ralen to be the actual requested length in rtextents.
...@@ -936,9 +938,11 @@ xfs_alloc_file_space( ...@@ -936,9 +938,11 @@ xfs_alloc_file_space(
do_div(s, extsz); do_div(s, extsz);
s *= extsz; s *= extsz;
e = startoffset_fsb + allocatesize_fsb; e = startoffset_fsb + allocatesize_fsb;
if ((temp = do_mod(startoffset_fsb, extsz))) div_u64_rem(startoffset_fsb, extsz, &temp);
if (temp)
e += temp; e += temp;
if ((temp = do_mod(e, extsz))) div_u64_rem(e, extsz, &temp);
if (temp)
e += extsz - temp; e += extsz - temp;
} else { } else {
s = 0; s = 0;
...@@ -1099,7 +1103,7 @@ xfs_adjust_extent_unmap_boundaries( ...@@ -1099,7 +1103,7 @@ xfs_adjust_extent_unmap_boundaries(
if (nimap && imap.br_startblock != HOLESTARTBLOCK) { if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
ASSERT(imap.br_startblock != DELAYSTARTBLOCK); ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
mod = do_mod(imap.br_startblock, mp->m_sb.sb_rextsize); div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
if (mod) if (mod)
*startoffset_fsb += mp->m_sb.sb_rextsize - mod; *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
} }
......
...@@ -2258,7 +2258,7 @@ xfs_ifree_cluster( ...@@ -2258,7 +2258,7 @@ xfs_ifree_cluster(
*/ */
ioffset = inum - xic->first_ino; ioffset = inum - xic->first_ino;
if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
ASSERT(do_mod(ioffset, inodes_per_cluster) == 0); ASSERT(ioffset % inodes_per_cluster == 0);
continue; continue;
} }
......
...@@ -30,10 +30,10 @@ xfs_aligned_fsb_count( ...@@ -30,10 +30,10 @@ xfs_aligned_fsb_count(
if (extsz) { if (extsz) {
xfs_extlen_t align; xfs_extlen_t align;
align = do_mod(offset_fsb, extsz); div_u64_rem(offset_fsb, extsz, &align);
if (align) if (align)
count_fsb += align; count_fsb += align;
align = do_mod(count_fsb, extsz); div_u64_rem(count_fsb, extsz, &align);
if (align) if (align)
count_fsb += extsz - align; count_fsb += extsz - align;
} }
......
...@@ -207,25 +207,6 @@ static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev) ...@@ -207,25 +207,6 @@ static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev)
#define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL) #define xfs_sort(a,n,s,fn) sort(a,n,s,fn,NULL)
#define xfs_stack_trace() dump_stack() #define xfs_stack_trace() dump_stack()
/* Side effect free 64 bit mod operation */
static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
{
switch (n) {
case 4:
return *(__u32 *)a % b;
case 8:
{
__u64 c = *(__u64 *)a;
return do_div(c, b);
}
}
/* NOTREACHED */
return 0;
}
#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a))
static inline uint64_t roundup_64(uint64_t x, uint32_t y) static inline uint64_t roundup_64(uint64_t x, uint32_t y)
{ {
x += y - 1; x += y - 1;
......
...@@ -1235,6 +1235,25 @@ xlog_verify_head( ...@@ -1235,6 +1235,25 @@ xlog_verify_head(
be32_to_cpu((*rhead)->h_size)); be32_to_cpu((*rhead)->h_size));
} }
/*
* We need to make sure we handle log wrapping properly, so we can't use the
* calculated logbno directly. Make sure it wraps to the correct bno inside the
* log.
*
* The log is limited to 32 bit sizes, so we use the appropriate modulus
* operation here and cast it back to a 64 bit daddr on return.
*/
static inline xfs_daddr_t
xlog_wrap_logbno(
struct xlog *log,
xfs_daddr_t bno)
{
int mod;
div_s64_rem(bno, log->l_logBBsize, &mod);
return mod;
}
/* /*
* Check whether the head of the log points to an unmount record. In other * Check whether the head of the log points to an unmount record. In other
* words, determine whether the log is clean. If so, update the in-core state * words, determine whether the log is clean. If so, update the in-core state
...@@ -1283,12 +1302,13 @@ xlog_check_unmount_rec( ...@@ -1283,12 +1302,13 @@ xlog_check_unmount_rec(
} else { } else {
hblks = 1; hblks = 1;
} }
after_umount_blk = rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len));
after_umount_blk = do_mod(after_umount_blk, log->l_logBBsize); after_umount_blk = xlog_wrap_logbno(log,
rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
if (*head_blk == after_umount_blk && if (*head_blk == after_umount_blk &&
be32_to_cpu(rhead->h_num_logops) == 1) { be32_to_cpu(rhead->h_num_logops) == 1) {
umount_data_blk = rhead_blk + hblks; umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
umount_data_blk = do_mod(umount_data_blk, log->l_logBBsize);
error = xlog_bread(log, umount_data_blk, 1, bp, &offset); error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
if (error) if (error)
return error; return error;
...@@ -5459,9 +5479,7 @@ xlog_do_recovery_pass( ...@@ -5459,9 +5479,7 @@ xlog_do_recovery_pass(
*/ */
if (blk_no + bblks <= log->l_logBBsize || if (blk_no + bblks <= log->l_logBBsize ||
blk_no >= log->l_logBBsize) { blk_no >= log->l_logBBsize) {
/* mod blk_no in case the header wrapped and rblk_no = xlog_wrap_logbno(log, blk_no);
* pushed it beyond the end of the log */
rblk_no = do_mod(blk_no, log->l_logBBsize);
error = xlog_bread(log, rblk_no, bblks, dbp, error = xlog_bread(log, rblk_no, bblks, dbp,
&offset); &offset);
if (error) if (error)
......
...@@ -301,8 +301,12 @@ xfs_rtallocate_extent_block( ...@@ -301,8 +301,12 @@ xfs_rtallocate_extent_block(
/* /*
* If size should be a multiple of prod, make that so. * If size should be a multiple of prod, make that so.
*/ */
if (prod > 1 && (p = do_mod(bestlen, prod))) if (prod > 1) {
bestlen -= p; div_u64_rem(bestlen, prod, &p);
if (p)
bestlen -= p;
}
/* /*
* Allocate besti for bestlen & return that. * Allocate besti for bestlen & return that.
*/ */
...@@ -1263,7 +1267,7 @@ xfs_rtpick_extent( ...@@ -1263,7 +1267,7 @@ xfs_rtpick_extent(
b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >> b = (mp->m_sb.sb_rextents * ((resid << 1) + 1ULL)) >>
(log2 + 1); (log2 + 1);
if (b >= mp->m_sb.sb_rextents) if (b >= mp->m_sb.sb_rextents)
b = do_mod(b, mp->m_sb.sb_rextents); div64_u64_rem(b, mp->m_sb.sb_rextents, &b);
if (b + len > mp->m_sb.sb_rextents) if (b + len > mp->m_sb.sb_rextents)
b = mp->m_sb.sb_rextents - len; b = mp->m_sb.sb_rextents - len;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment