Commit ab108d99 authored by David Sterba's avatar David Sterba

btrfs: use precalculated sectorsize_bits from fs_info

We do a lot of calculations where we divide or multiply by sectorsize.
We also know and make sure that sectorsize is a power of two, so this
means all divisions can be turned to shifts and avoid eg. expensive
u64/u32 divisions.

The type is u32 as it's more register friendly on x86_64 compared to u8
and the resulting assembly is smaller (movzbl vs movl).

There's also superblock s_blocksize_bits but it's usually one more
pointer dereference farther than fs_info.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e940e9a7
......@@ -935,6 +935,8 @@ struct btrfs_fs_info {
/* Cached block sizes */
u32 nodesize;
u32 sectorsize;
/* ilog2 of sectorsize, use to avoid 64bit division */
u32 sectorsize_bits;
u32 stripesize;
/* Block groups and devices containing active swapfiles. */
......
......@@ -2814,6 +2814,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
/* Usable values until the real ones are cached from the superblock */
fs_info->nodesize = 4096;
fs_info->sectorsize = 4096;
fs_info->sectorsize_bits = ilog2(4096);
fs_info->stripesize = 4096;
spin_lock_init(&fs_info->swapfile_pins_lock);
......@@ -3078,6 +3079,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
/* Cache block sizes */
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
fs_info->stripesize = stripesize;
/*
......
......@@ -2145,7 +2145,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(fs_info->super_copy));
num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
num_csums = csum_bytes >> fs_info->sectorsize_bits;
num_csums += num_csums_per_leaf - 1;
num_csums = div64_u64(num_csums, num_csums_per_leaf);
return num_csums;
......
......@@ -369,7 +369,7 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
* a single leaf so it will also fit inside a u32
*/
diff = disk_bytenr - item_start_offset;
diff = diff / fs_info->sectorsize;
diff = diff >> fs_info->sectorsize_bits;
diff = diff * csum_size;
count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
inode->i_sb->s_blocksize_bits);
......
......@@ -416,7 +416,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
nrbits = div_u64(block_group->length, block_group->fs_info->sectorsize);
nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
start_bit = find_next_bit_le(bitmap, nrbits, 0);
while (start_bit < nrbits) {
......@@ -540,8 +540,8 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
end = found_end;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
first = div_u64(*start - found_start, fs_info->sectorsize);
last = div_u64(end - found_start, fs_info->sectorsize);
first = (*start - found_start) >> fs_info->sectorsize_bits;
last = (end - found_start) >> fs_info->sectorsize_bits;
if (bit)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
......
......@@ -868,7 +868,6 @@ int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = btrfs_inode_sectorsize(inode);
const u8 blocksize_bits = inode->vfs_inode.i_sb->s_blocksize_bits;
const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int index = 0;
......@@ -890,7 +889,7 @@ int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
index += (int)num_sectors * csum_size;
if (index == len)
goto out;
disk_bytenr += num_sectors * sectorsize;
disk_bytenr += num_sectors * fs_info->sectorsize;
}
}
out:
......
......@@ -2300,7 +2300,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
u64 offset;
u64 nsectors64;
u32 nsectors;
int sectorsize = sparity->sctx->fs_info->sectorsize;
u32 sectorsize_bits = sparity->sctx->fs_info->sectorsize_bits;
if (len >= sparity->stripe_len) {
bitmap_set(bitmap, 0, sparity->nsectors);
......@@ -2309,8 +2309,8 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
start -= sparity->logic_start;
start = div64_u64_rem(start, sparity->stripe_len, &offset);
offset = div_u64(offset, sectorsize);
nsectors64 = div_u64(len, sectorsize);
offset = offset >> sectorsize_bits;
nsectors64 = len >> sectorsize_bits;
ASSERT(nsectors64 < UINT_MAX);
nsectors = (u32)nsectors64;
......@@ -2386,10 +2386,10 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
if (!sum)
return 0;
index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
index = (logical - sum->bytenr) >> sctx->fs_info->sectorsize_bits;
ASSERT(index < UINT_MAX);
num_sectors = sum->len / sctx->fs_info->sectorsize;
num_sectors = sum->len >> sctx->fs_info->sectorsize_bits;
memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
if (index == num_sectors - 1) {
list_del(&sum->list);
......@@ -2776,7 +2776,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
nsectors = map->stripe_len >> fs_info->sectorsize_bits;
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);
......
......@@ -134,6 +134,7 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
fs_info->sectorsize_bits = ilog2(sectorsize);
set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
test_mnt->mnt_sb->s_fs_info = fs_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment