Commit 98173255 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: zoned: calculate free space from zone capacity

Now that we introduced capacity in a block group, we need to calculate free
space using the capacity instead of the length. Thus, bytes we account
capacity - alloc_pointer as free, and account bytes [capacity, length] as
zone unusable.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent c46c4247
...@@ -2484,7 +2484,8 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran ...@@ -2484,7 +2484,8 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
*/ */
trace_btrfs_add_block_group(fs_info, cache, 1); trace_btrfs_add_block_group(fs_info, cache, 1);
btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
cache->bytes_super, 0, &cache->space_info); cache->bytes_super, cache->zone_unusable,
&cache->space_info);
btrfs_update_global_block_rsv(fs_info); btrfs_update_global_block_rsv(fs_info);
link_block_group(cache); link_block_group(cache);
...@@ -2599,7 +2600,9 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) ...@@ -2599,7 +2600,9 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (!--cache->ro) { if (!--cache->ro) {
if (btrfs_is_zoned(cache->fs_info)) { if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */ /* Migrate zone_unusable bytes back */
cache->zone_unusable = cache->alloc_offset - cache->used; cache->zone_unusable =
(cache->alloc_offset - cache->used) +
(cache->length - cache->zone_capacity);
sinfo->bytes_zone_unusable += cache->zone_unusable; sinfo->bytes_zone_unusable += cache->zone_unusable;
sinfo->bytes_readonly -= cache->zone_unusable; sinfo->bytes_readonly -= cache->zone_unusable;
} }
......
...@@ -3796,7 +3796,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, ...@@ -3796,7 +3796,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
goto out; goto out;
} }
avail = block_group->length - block_group->alloc_offset; WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
avail = block_group->zone_capacity - block_group->alloc_offset;
if (avail < num_bytes) { if (avail < num_bytes) {
if (ffe_ctl->max_extent_size < avail) { if (ffe_ctl->max_extent_size < avail) {
/* /*
......
...@@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group, ...@@ -2539,10 +2539,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start; u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable; u64 to_free, to_unusable;
const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold); const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
bool initial = (size == block_group->length);
WARN_ON(!initial && offset + size > block_group->zone_capacity);
spin_lock(&ctl->tree_lock); spin_lock(&ctl->tree_lock);
if (!used) if (!used)
to_free = size; to_free = size;
else if (initial)
to_free = block_group->zone_capacity;
else if (offset >= block_group->alloc_offset) else if (offset >= block_group->alloc_offset)
to_free = size; to_free = size;
else if (offset + size <= block_group->alloc_offset) else if (offset + size <= block_group->alloc_offset)
...@@ -2755,7 +2760,7 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group, ...@@ -2755,7 +2760,7 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
*/ */
if (btrfs_is_zoned(fs_info)) { if (btrfs_is_zoned(fs_info)) {
btrfs_info(fs_info, "free space %llu", btrfs_info(fs_info, "free space %llu",
block_group->length - block_group->alloc_offset); block_group->zone_capacity - block_group->alloc_offset);
return; return;
} }
......
...@@ -1265,8 +1265,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) ...@@ -1265,8 +1265,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
return; return;
WARN_ON(cache->bytes_super != 0); WARN_ON(cache->bytes_super != 0);
unusable = cache->alloc_offset - cache->used; unusable = (cache->alloc_offset - cache->used) +
free = cache->length - cache->alloc_offset; (cache->length - cache->zone_capacity);
free = cache->zone_capacity - cache->alloc_offset;
/* We only need ->free_space in ALLOC_SEQ block groups */ /* We only need ->free_space in ALLOC_SEQ block groups */
cache->last_byte_to_unpin = (u64)-1; cache->last_byte_to_unpin = (u64)-1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment