Commit b3470b5d authored by David Sterba's avatar David Sterba

btrfs: add dedicated members for start and length of a block group

The on-disk format of block group item makes use of the key that stores
the offset and length. This is further used in the code, although this
makes thing harder to understand. The key is also packed so the
offset/length is not properly aligned as u64.

Add start (key.objectid) and length (key.offset) members to block group
and remove the embedded key.  When the item is searched or written, a
local variable for key is used.
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 0222dfdd
This diff is collapsed.
......@@ -43,10 +43,11 @@ struct btrfs_caching_control {
#define CACHING_CTL_WAKE_UP SZ_2M
struct btrfs_block_group_cache {
struct btrfs_key key;
struct btrfs_fs_info *fs_info;
struct inode *inode;
spinlock_t lock;
u64 start;
u64 length;
u64 pinned;
u64 reserved;
u64 used;
......
......@@ -75,8 +75,8 @@ void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache)
struct btrfs_fs_info *fs_info = cache->fs_info;
u64 start, end;
start = cache->key.objectid;
end = start + cache->key.offset - 1;
start = cache->start;
end = start + cache->length - 1;
clear_extent_bits(&fs_info->freed_extents[0],
start, end, EXTENT_UPTODATE);
......@@ -2560,7 +2560,7 @@ static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
if (!cache)
return 0;
bytenr = cache->key.objectid;
bytenr = cache->start;
btrfs_put_block_group(cache);
return bytenr;
......@@ -2796,7 +2796,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
while (start <= end) {
readonly = false;
if (!cache ||
start >= cache->key.objectid + cache->key.offset) {
start >= cache->start + cache->length) {
if (cache)
btrfs_put_block_group(cache);
total_unpinned = 0;
......@@ -2809,7 +2809,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
empty_cluster <<= 1;
}
len = cache->key.objectid + cache->key.offset - start;
len = cache->start + cache->length - start;
len = min(len, end + 1 - start);
if (start < cache->last_byte_to_unpin) {
......@@ -2925,8 +2925,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
ret = -EROFS;
if (!trans->aborted)
ret = btrfs_discard_extent(fs_info,
block_group->key.objectid,
block_group->key.offset,
block_group->start,
block_group->length,
&trimmed);
list_del_init(&block_group->bg_list);
......@@ -3492,7 +3492,7 @@ static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
goto release_cluster;
offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
ffe_ctl->num_bytes, cluster_bg->key.objectid,
ffe_ctl->num_bytes, cluster_bg->start,
&ffe_ctl->max_extent_size);
if (offset) {
/* We have a block, we're done */
......@@ -3903,7 +3903,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
continue;
btrfs_grab_block_group(block_group, delalloc);
ffe_ctl.search_start = block_group->key.objectid;
ffe_ctl.search_start = block_group->start;
/*
* this can happen if we end up cycling through all the
......@@ -3983,7 +3983,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
/* move on to the next group */
if (ffe_ctl.search_start + num_bytes >
block_group->key.objectid + block_group->key.offset) {
block_group->start + block_group->length) {
btrfs_add_free_space(block_group, ffe_ctl.found_offset,
num_bytes);
goto loop;
......@@ -5497,7 +5497,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
}
factor = btrfs_bg_type_to_factor(block_group->flags);
free_bytes += (block_group->key.offset -
free_bytes += (block_group->length -
block_group->used) * factor;
spin_unlock(&block_group->lock);
......@@ -5645,13 +5645,13 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = btrfs_next_block_group(cache)) {
if (cache->key.objectid >= range_end) {
if (cache->start >= range_end) {
btrfs_put_block_group(cache);
break;
}
start = max(range->start, cache->key.objectid);
end = min(range_end, cache->key.objectid + cache->key.offset);
start = max(range->start, cache->start);
end = min(range_end, cache->start + cache->length);
if (end - start >= range->minlen) {
if (!btrfs_block_group_cache_done(cache)) {
......
......@@ -107,7 +107,7 @@ struct inode *lookup_free_space_inode(
return inode;
inode = __lookup_free_space_inode(fs_info->tree_root, path,
block_group->key.objectid);
block_group->start);
if (IS_ERR(inode))
return inode;
......@@ -201,7 +201,7 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
return ret;
return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
ino, block_group->key.objectid);
ino, block_group->start);
}
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
......@@ -882,13 +882,13 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
path, block_group->key.objectid);
path, block_group->start);
btrfs_free_path(path);
if (ret <= 0)
goto out;
spin_lock(&ctl->tree_lock);
matched = (ctl->free_space == (block_group->key.offset - used -
matched = (ctl->free_space == (block_group->length - used -
block_group->bytes_super));
spin_unlock(&ctl->tree_lock);
......@@ -896,7 +896,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
__btrfs_remove_free_space_cache(ctl);
btrfs_warn(fs_info,
"block group %llu has wrong amount of free space",
block_group->key.objectid);
block_group->start);
ret = -1;
}
out:
......@@ -909,7 +909,7 @@ int load_free_space_cache(struct btrfs_block_group_cache *block_group)
btrfs_warn(fs_info,
"failed to load free space cache for block group %llu, rebuilding it now",
block_group->key.objectid);
block_group->start);
}
iput(inode);
......@@ -1067,9 +1067,9 @@ static noinline_for_stack int write_pinned_extent_entries(
*/
unpin = block_group->fs_info->pinned_extents;
start = block_group->key.objectid;
start = block_group->start;
while (start < block_group->key.objectid + block_group->key.offset) {
while (start < block_group->start + block_group->length) {
ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end,
EXTENT_DIRTY, NULL);
......@@ -1077,13 +1077,12 @@ static noinline_for_stack int write_pinned_extent_entries(
return 0;
/* This pinned extent is out of our range */
if (extent_start >= block_group->key.objectid +
block_group->key.offset)
if (extent_start >= block_group->start + block_group->length)
return 0;
extent_start = max(extent_start, start);
extent_end = min(block_group->key.objectid +
block_group->key.offset, extent_end + 1);
extent_end = min(block_group->start + block_group->length,
extent_end + 1);
len = extent_end - extent_start;
*entries += 1;
......@@ -1174,7 +1173,7 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
block_group->start);
#endif
}
}
......@@ -1221,7 +1220,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
{
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
block_group, &block_group->io_ctl,
path, block_group->key.objectid);
path, block_group->start);
}
/**
......@@ -1400,7 +1399,7 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
#ifdef DEBUG
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
block_group->start);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
......@@ -1657,7 +1656,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
u64 size = block_group->length;
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
......@@ -2034,7 +2033,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* so allow those block groups to still be allowed to have a bitmap
* entry.
*/
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
return false;
return true;
......@@ -2516,7 +2515,7 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
spin_lock_init(&ctl->tree_lock);
ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid;
ctl->start = block_group->start;
ctl->private = block_group;
ctl->op = &free_space_op;
INIT_LIST_HEAD(&ctl->trimming_ranges);
......@@ -3379,7 +3378,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
mutex_lock(&fs_info->chunk_mutex);
em_tree = &fs_info->mapping_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
em = lookup_extent_mapping(em_tree, block_group->start,
1);
BUG_ON(!em); /* logic error, can't happen */
remove_extent_mapping(em_tree, em);
......
This diff is collapsed.
......@@ -4038,7 +4038,7 @@ static void get_block_group_info(struct list_head *groups_list,
space->flags = 0;
list_for_each_entry(block_group, groups_list, list) {
space->flags = block_group->flags;
space->total_bytes += block_group->key.offset;
space->total_bytes += block_group->length;
space->used_bytes += block_group->used;
}
}
......
......@@ -248,8 +248,8 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
if (!cache)
return NULL;
start = cache->key.objectid;
end = start + cache->key.offset - 1;
start = cache->start;
end = start + cache->length - 1;
btrfs_put_block_group(cache);
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
......
......@@ -1563,8 +1563,8 @@ static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
static int in_block_group(u64 bytenr,
struct btrfs_block_group_cache *block_group)
{
if (bytenr >= block_group->key.objectid &&
bytenr < block_group->key.objectid + block_group->key.offset)
if (bytenr >= block_group->start &&
bytenr < block_group->start + block_group->length)
return 1;
return 0;
}
......@@ -3863,7 +3863,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
u64 start, end, last;
int ret;
last = rc->block_group->key.objectid + rc->block_group->key.offset;
last = rc->block_group->start + rc->block_group->length;
while (1) {
cond_resched();
if (rc->search_start >= last) {
......@@ -3980,7 +3980,7 @@ int prepare_to_relocate(struct reloc_control *rc)
return -ENOMEM;
memset(&rc->cluster, 0, sizeof(rc->cluster));
rc->search_start = rc->block_group->key.objectid;
rc->search_start = rc->block_group->start;
rc->extents_found = 0;
rc->nodes_relocated = 0;
rc->merging_rsv_size = 0;
......@@ -4248,7 +4248,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root);
BUG_ON(IS_ERR(inode));
BTRFS_I(inode)->index_cnt = group->key.objectid;
BTRFS_I(inode)->index_cnt = group->start;
err = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
......@@ -4291,7 +4291,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
btrfs_info(fs_info,
"relocating block group %llu flags %s",
block_group->key.objectid, buf);
block_group->start, buf);
}
/*
......@@ -4364,8 +4364,8 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
btrfs_wait_block_group_reservations(rc->block_group);
btrfs_wait_nocow_writers(rc->block_group);
btrfs_wait_ordered_roots(fs_info, U64_MAX,
rc->block_group->key.objectid,
rc->block_group->key.offset);
rc->block_group->start,
rc->block_group->length);
while (1) {
mutex_lock(&fs_info->cleaner_mutex);
......
......@@ -404,8 +404,8 @@ static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache,
* round_down() can only handle power of 2, while RAID56 full
* stripe length can be 64KiB * n, so we need to manually round down.
*/
ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) *
cache->full_stripe_len + cache->key.objectid;
ret = div64_u64(bytenr - cache->start, cache->full_stripe_len) *
cache->full_stripe_len + cache->start;
return ret;
}
......@@ -3583,8 +3583,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_wait_block_group_reservations(cache);
btrfs_wait_nocow_writers(cache);
ret = btrfs_wait_ordered_roots(fs_info, U64_MAX,
cache->key.objectid,
cache->key.offset);
cache->start,
cache->length);
if (ret > 0) {
struct btrfs_trans_handle *trans;
......
......@@ -301,8 +301,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
spin_lock(&cache->lock);
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
cache->key.objectid, cache->key.offset,
cache->used, cache->pinned,
cache->start, cache->length, cache->used, cache->pinned,
cache->reserved, cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
......
......@@ -402,7 +402,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
down_read(&sinfo->groups_sem);
list_for_each_entry(block_group, &sinfo->block_groups[index], list) {
if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes))
val += block_group->key.offset;
val += block_group->length;
else
val += block_group->used;
}
......
......@@ -218,9 +218,8 @@ btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
return NULL;
}
cache->key.objectid = 0;
cache->key.offset = length;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->start = 0;
cache->length = length;
cache->full_stripe_len = fs_info->sectorsize;
cache->fs_info = fs_info;
......
......@@ -48,7 +48,7 @@ static int __check_free_space_extents(struct btrfs_trans_handle *trans,
if (flags & BTRFS_FREE_SPACE_USING_BITMAPS) {
if (path->slots[0] != 0)
goto invalid;
end = cache->key.objectid + cache->key.offset;
end = cache->start + cache->length;
i = 0;
while (++path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
......@@ -155,7 +155,7 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, cache->key.offset},
{cache->start, cache->length},
};
return check_free_space_extents(trans, fs_info, cache, path,
......@@ -172,8 +172,8 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid,
cache->key.offset);
cache->start,
cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
......@@ -190,13 +190,12 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid + alignment,
cache->key.offset - alignment},
{cache->start + alignment, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid, alignment);
cache->start, alignment);
if (ret) {
test_err("could not remove free space");
return ret;
......@@ -214,14 +213,13 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, cache->key.offset - alignment},
{cache->start, cache->length - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid +
cache->key.offset - alignment,
alignment);
cache->start + cache->length - alignment,
alignment);
if (ret) {
test_err("could not remove free space");
return ret;
......@@ -238,14 +236,13 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, alignment},
{cache->key.objectid + 2 * alignment,
cache->key.offset - 2 * alignment},
{cache->start, alignment},
{cache->start + 2 * alignment, cache->length - 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid + alignment,
cache->start + alignment,
alignment);
if (ret) {
test_err("could not remove free space");
......@@ -263,19 +260,18 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, 2 * alignment},
{cache->start, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid,
cache->key.offset);
cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -283,7 +279,7 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + alignment,
cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -301,20 +297,19 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid + alignment, 2 * alignment},
{cache->start + alignment, 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid,
cache->key.offset);
cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + 2 * alignment,
cache->start + 2 * alignment,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -322,7 +317,7 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + alignment,
cache->start + alignment,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -340,19 +335,18 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, 3 * alignment},
{cache->start, 3 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid,
cache->key.offset);
cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -360,16 +354,14 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + 2 * alignment,
alignment);
cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + alignment,
alignment);
cache->start + alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
......@@ -386,21 +378,20 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
u32 alignment)
{
const struct free_space_extent extents[] = {
{cache->key.objectid, alignment},
{cache->key.objectid + 2 * alignment, alignment},
{cache->key.objectid + 4 * alignment, alignment},
{cache->start, alignment},
{cache->start + 2 * alignment, alignment},
{cache->start + 4 * alignment, alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, cache, path,
cache->key.objectid,
cache->key.offset);
cache->start, cache->length);
if (ret) {
test_err("could not remove free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid,
ret = __add_to_free_space_tree(trans, cache, path, cache->start,
alignment);
if (ret) {
test_err("could not add free space");
......@@ -408,16 +399,14 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + 4 * alignment,
alignment);
cache->start + 4 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
}
ret = __add_to_free_space_tree(trans, cache, path,
cache->key.objectid + 2 * alignment,
alignment);
cache->start + 2 * alignment, alignment);
if (ret) {
test_err("could not add free space");
return ret;
......
......@@ -3194,16 +3194,16 @@ static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_off
if (bargs->usage_min == 0)
user_thresh_min = 0;
else
user_thresh_min = div_factor_fine(cache->key.offset,
bargs->usage_min);
user_thresh_min = div_factor_fine(cache->length,
bargs->usage_min);
if (bargs->usage_max == 0)
user_thresh_max = 1;
else if (bargs->usage_max > 100)
user_thresh_max = cache->key.offset;
user_thresh_max = cache->length;
else
user_thresh_max = div_factor_fine(cache->key.offset,
bargs->usage_max);
user_thresh_max = div_factor_fine(cache->length,
bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
ret = 0;
......@@ -3225,10 +3225,9 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
if (bargs->usage_min == 0)
user_thresh = 1;
else if (bargs->usage > 100)
user_thresh = cache->key.offset;
user_thresh = cache->length;
else
user_thresh = div_factor_fine(cache->key.offset,
bargs->usage);
user_thresh = div_factor_fine(cache->length, bargs->usage);
if (chunk_used < user_thresh)
ret = 0;
......
......@@ -713,8 +713,8 @@ TRACE_EVENT(btrfs_add_block_group,
),
TP_fast_assign_btrfs(fs_info,
__entry->offset = block_group->key.objectid;
__entry->size = block_group->key.offset;
__entry->offset = block_group->start;
__entry->size = block_group->length;
__entry->flags = block_group->flags;
__entry->bytes_used = block_group->used;
__entry->bytes_super = block_group->bytes_super;
......@@ -1197,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->len = len;
......@@ -1245,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->bytes = bytes;
......@@ -1272,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->bg_objectid = block_group->start;
),
TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
......@@ -1296,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = cluster->window_start;
__entry->max_size = cluster->max_size;
......@@ -1856,8 +1856,8 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
),
TP_fast_assign_btrfs(bg_cache->fs_info,
__entry->bytenr = bg_cache->key.objectid,
__entry->len = bg_cache->key.offset,
__entry->bytenr = bg_cache->start,
__entry->len = bg_cache->length,
__entry->used = bg_cache->used;
__entry->flags = bg_cache->flags;
),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment