Commit 3349b57f authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: convert block group bit field to use bit helpers

We use a bit field in the btrfs_block_group for different flags, however
this is awkward because we have to hold the block_group->lock for any
modification of any of these fields, and makes the code clunky for a few
of these flags.  Convert these to a properly flags setup so we can
utilize the bit helpers.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 723de71d
...@@ -772,7 +772,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) ...@@ -772,7 +772,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
WARN_ON(cache->caching_ctl); WARN_ON(cache->caching_ctl);
cache->caching_ctl = caching_ctl; cache->caching_ctl = caching_ctl;
cache->cached = BTRFS_CACHE_STARTED; cache->cached = BTRFS_CACHE_STARTED;
cache->has_caching_ctl = 1; set_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &cache->runtime_flags);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
write_lock(&fs_info->block_group_cache_lock); write_lock(&fs_info->block_group_cache_lock);
...@@ -988,11 +988,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ...@@ -988,11 +988,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
kobject_put(kobj); kobject_put(kobj);
} }
if (block_group->has_caching_ctl)
if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags))
caching_ctl = btrfs_get_caching_control(block_group); caching_ctl = btrfs_get_caching_control(block_group);
if (block_group->cached == BTRFS_CACHE_STARTED) if (block_group->cached == BTRFS_CACHE_STARTED)
btrfs_wait_block_group_cache_done(block_group); btrfs_wait_block_group_cache_done(block_group);
if (block_group->has_caching_ctl) { if (test_bit(BLOCK_GROUP_FLAG_HAS_CACHING_CTL, &block_group->runtime_flags)) {
write_lock(&fs_info->block_group_cache_lock); write_lock(&fs_info->block_group_cache_lock);
if (!caching_ctl) { if (!caching_ctl) {
struct btrfs_caching_control *ctl; struct btrfs_caching_control *ctl;
...@@ -1034,12 +1035,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ...@@ -1034,12 +1035,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
< block_group->zone_unusable); < block_group->zone_unusable);
WARN_ON(block_group->space_info->disk_total WARN_ON(block_group->space_info->disk_total
< block_group->length * factor); < block_group->length * factor);
WARN_ON(block_group->zone_is_active && WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&block_group->runtime_flags) &&
block_group->space_info->active_total_bytes block_group->space_info->active_total_bytes
< block_group->length); < block_group->length);
} }
block_group->space_info->total_bytes -= block_group->length; block_group->space_info->total_bytes -= block_group->length;
if (block_group->zone_is_active) if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
block_group->space_info->active_total_bytes -= block_group->length; block_group->space_info->active_total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -= block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable); (block_group->length - block_group->zone_unusable);
...@@ -1069,7 +1071,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, ...@@ -1069,7 +1071,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
goto out; goto out;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
block_group->removed = 1; set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
/* /*
* At this point trimming or scrub can't start on this block group, * At this point trimming or scrub can't start on this block group,
* because we removed the block group from the rbtree * because we removed the block group from the rbtree
...@@ -2409,7 +2412,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ...@@ -2409,7 +2412,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
ret = insert_block_group_item(trans, block_group); ret = insert_block_group_item(trans, block_group);
if (ret) if (ret)
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
if (!block_group->chunk_item_inserted) { if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
&block_group->runtime_flags)) {
mutex_lock(&fs_info->chunk_mutex); mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
...@@ -3955,7 +3959,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) ...@@ -3955,7 +3959,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
while (block_group) { while (block_group) {
btrfs_wait_block_group_cache_done(block_group); btrfs_wait_block_group_cache_done(block_group);
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->iref) if (test_bit(BLOCK_GROUP_FLAG_IREF,
&block_group->runtime_flags))
break; break;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
block_group = btrfs_next_block_group(block_group); block_group = btrfs_next_block_group(block_group);
...@@ -3968,7 +3973,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) ...@@ -3968,7 +3973,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
} }
inode = block_group->inode; inode = block_group->inode;
block_group->iref = 0; clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags);
block_group->inode = NULL; block_group->inode = NULL;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
ASSERT(block_group->io_ctl.inode == NULL); ASSERT(block_group->io_ctl.inode == NULL);
...@@ -4110,7 +4115,7 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) ...@@ -4110,7 +4115,7 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
cleanup = (atomic_dec_and_test(&block_group->frozen) && cleanup = (atomic_dec_and_test(&block_group->frozen) &&
block_group->removed); test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
if (cleanup) { if (cleanup) {
......
...@@ -46,6 +46,18 @@ enum btrfs_chunk_alloc_enum { ...@@ -46,6 +46,18 @@ enum btrfs_chunk_alloc_enum {
CHUNK_ALLOC_FORCE_FOR_EXTENT, CHUNK_ALLOC_FORCE_FOR_EXTENT,
}; };
/* Block group flags set at runtime */
enum btrfs_block_group_flags {
BLOCK_GROUP_FLAG_IREF,
BLOCK_GROUP_FLAG_HAS_CACHING_CTL,
BLOCK_GROUP_FLAG_REMOVED,
BLOCK_GROUP_FLAG_TO_COPY,
BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
};
struct btrfs_caching_control { struct btrfs_caching_control {
struct list_head list; struct list_head list;
struct mutex mutex; struct mutex mutex;
...@@ -95,16 +107,9 @@ struct btrfs_block_group { ...@@ -95,16 +107,9 @@ struct btrfs_block_group {
/* For raid56, this is a full stripe, without parity */ /* For raid56, this is a full stripe, without parity */
unsigned long full_stripe_len; unsigned long full_stripe_len;
unsigned long runtime_flags;
unsigned int ro; unsigned int ro;
unsigned int iref:1;
unsigned int has_caching_ctl:1;
unsigned int removed:1;
unsigned int to_copy:1;
unsigned int relocating_repair:1;
unsigned int chunk_item_inserted:1;
unsigned int zone_is_active:1;
unsigned int zoned_data_reloc_ongoing:1;
int disk_cache_state; int disk_cache_state;
......
...@@ -546,7 +546,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info, ...@@ -546,7 +546,7 @@ static int mark_block_group_to_copy(struct btrfs_fs_info *fs_info,
continue; continue;
spin_lock(&cache->lock); spin_lock(&cache->lock);
cache->to_copy = 1; set_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
...@@ -577,7 +577,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, ...@@ -577,7 +577,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
return true; return true;
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
return true; return true;
} }
...@@ -611,7 +611,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev, ...@@ -611,7 +611,7 @@ bool btrfs_finish_block_group_to_copy(struct btrfs_device *srcdev,
/* Last stripe on this device */ /* Last stripe on this device */
spin_lock(&cache->lock); spin_lock(&cache->lock);
cache->to_copy = 0; clear_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
return true; return true;
......
...@@ -3804,7 +3804,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, ...@@ -3804,7 +3804,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
block_group->start == fs_info->data_reloc_bg || block_group->start == fs_info->data_reloc_bg ||
fs_info->data_reloc_bg == 0); fs_info->data_reloc_bg == 0);
if (block_group->ro || block_group->zoned_data_reloc_ongoing) { if (block_group->ro ||
test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
ret = 1; ret = 1;
goto out; goto out;
} }
...@@ -3881,7 +3882,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group, ...@@ -3881,7 +3882,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
* regular extents) at the same time to the same zone, which * regular extents) at the same time to the same zone, which
* easily break the write pointer. * easily break the write pointer.
*/ */
block_group->zoned_data_reloc_ongoing = 1; set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
fs_info->data_reloc_bg = 0; fs_info->data_reloc_bg = 0;
} }
spin_unlock(&fs_info->relocation_bg_lock); spin_unlock(&fs_info->relocation_bg_lock);
......
...@@ -126,10 +126,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group, ...@@ -126,10 +126,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
block_group->disk_cache_state = BTRFS_DC_CLEAR; block_group->disk_cache_state = BTRFS_DC_CLEAR;
} }
if (!block_group->iref) { if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
block_group->inode = igrab(inode); block_group->inode = igrab(inode);
block_group->iref = 1;
}
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return inode; return inode;
...@@ -241,8 +239,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans, ...@@ -241,8 +239,7 @@ int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
clear_nlink(inode); clear_nlink(inode);
/* One for the block groups ref */ /* One for the block groups ref */
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->iref) { if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
block_group->iref = 0;
block_group->inode = NULL; block_group->inode = NULL;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
iput(inode); iput(inode);
...@@ -2876,7 +2873,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group, ...@@ -2876,7 +2873,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
if (btrfs_is_zoned(fs_info)) { if (btrfs_is_zoned(fs_info)) {
btrfs_info(fs_info, "free space %llu active %d", btrfs_info(fs_info, "free space %llu active %d",
block_group->zone_capacity - block_group->alloc_offset, block_group->zone_capacity - block_group->alloc_offset,
block_group->zone_is_active); test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&block_group->runtime_flags));
return; return;
} }
...@@ -4008,7 +4006,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group, ...@@ -4008,7 +4006,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
*trimmed = 0; *trimmed = 0;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return 0; return 0;
} }
...@@ -4038,7 +4036,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group, ...@@ -4038,7 +4036,7 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
*trimmed = 0; *trimmed = 0;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return 0; return 0;
} }
...@@ -4060,7 +4058,7 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group, ...@@ -4060,7 +4058,7 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
*trimmed = 0; *trimmed = 0;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return 0; return 0;
} }
......
...@@ -3257,7 +3257,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx, ...@@ -3257,7 +3257,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
} }
/* Block group removed? */ /* Block group removed? */
spin_lock(&bg->lock); spin_lock(&bg->lock);
if (bg->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
spin_unlock(&bg->lock); spin_unlock(&bg->lock);
ret = 0; ret = 0;
break; break;
...@@ -3597,7 +3597,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, ...@@ -3597,7 +3597,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
* kthread or relocation. * kthread or relocation.
*/ */
spin_lock(&bg->lock); spin_lock(&bg->lock);
if (!bg->removed) if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
ret = -EINVAL; ret = -EINVAL;
spin_unlock(&bg->lock); spin_unlock(&bg->lock);
...@@ -3756,7 +3756,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3756,7 +3756,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) { if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (!cache->to_copy) { if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
goto skip; goto skip;
...@@ -3773,7 +3773,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3773,7 +3773,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* repair extents. * repair extents.
*/ */
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->removed) { if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
goto skip; goto skip;
...@@ -3933,8 +3933,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -3933,8 +3933,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* balance is triggered or it becomes used and unused again. * balance is triggered or it becomes used and unused again.
*/ */
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (!cache->removed && !cache->ro && cache->reserved == 0 && if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
cache->used == 0) { !cache->ro && cache->reserved == 0 && cache->used == 0) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
btrfs_discard_queue_work(&fs_info->discard_ctl, btrfs_discard_queue_work(&fs_info->discard_ctl,
......
...@@ -305,7 +305,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, ...@@ -305,7 +305,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
ASSERT(found); ASSERT(found);
spin_lock(&found->lock); spin_lock(&found->lock);
found->total_bytes += block_group->length; found->total_bytes += block_group->length;
if (block_group->zone_is_active) if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
found->active_total_bytes += block_group->length; found->active_total_bytes += block_group->length;
found->disk_total += block_group->length * factor; found->disk_total += block_group->length * factor;
found->bytes_used += block_group->used; found->bytes_used += block_group->used;
......
...@@ -5595,7 +5595,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, ...@@ -5595,7 +5595,7 @@ int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
if (ret) if (ret)
goto out; goto out;
bg->chunk_item_inserted = 1; set_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, &bg->runtime_flags);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size); ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
...@@ -6154,7 +6154,7 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical) ...@@ -6154,7 +6154,7 @@ static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
cache = btrfs_lookup_block_group(fs_info, logical); cache = btrfs_lookup_block_group(fs_info, logical);
spin_lock(&cache->lock); spin_lock(&cache->lock);
ret = cache->to_copy; ret = test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags);
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
...@@ -8244,7 +8244,7 @@ static int relocating_repair_kthread(void *data) ...@@ -8244,7 +8244,7 @@ static int relocating_repair_kthread(void *data)
if (!cache) if (!cache)
goto out; goto out;
if (!cache->relocating_repair) if (!test_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags))
goto out; goto out;
ret = btrfs_may_alloc_data_chunk(fs_info, target); ret = btrfs_may_alloc_data_chunk(fs_info, target);
...@@ -8282,12 +8282,11 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical) ...@@ -8282,12 +8282,11 @@ bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
return true; return true;
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->relocating_repair) { if (test_and_set_bit(BLOCK_GROUP_FLAG_RELOCATING_REPAIR, &cache->runtime_flags)) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_put_block_group(cache); btrfs_put_block_group(cache);
return true; return true;
} }
cache->relocating_repair = 1;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
kthread_run(relocating_repair_kthread, cache, kthread_run(relocating_repair_kthread, cache,
......
...@@ -1436,7 +1436,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1436,7 +1436,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out; goto out;
} else if (map->num_stripes == num_conventional) { } else if (map->num_stripes == num_conventional) {
cache->alloc_offset = last_alloc; cache->alloc_offset = last_alloc;
cache->zone_is_active = 1; set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
goto out; goto out;
} }
} }
...@@ -1452,7 +1452,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1452,7 +1452,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
} }
cache->alloc_offset = alloc_offsets[0]; cache->alloc_offset = alloc_offsets[0];
cache->zone_capacity = caps[0]; cache->zone_capacity = caps[0];
cache->zone_is_active = test_bit(0, active); if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
break; break;
case BTRFS_BLOCK_GROUP_DUP: case BTRFS_BLOCK_GROUP_DUP:
if (map->type & BTRFS_BLOCK_GROUP_DATA) { if (map->type & BTRFS_BLOCK_GROUP_DATA) {
...@@ -1486,7 +1487,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1486,7 +1487,9 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out; goto out;
} }
} else { } else {
cache->zone_is_active = test_bit(0, active); if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&cache->runtime_flags);
} }
cache->alloc_offset = alloc_offsets[0]; cache->alloc_offset = alloc_offsets[0];
cache->zone_capacity = min(caps[0], caps[1]); cache->zone_capacity = min(caps[0], caps[1]);
...@@ -1530,7 +1533,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) ...@@ -1530,7 +1533,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
if (!ret) { if (!ret) {
cache->meta_write_pointer = cache->alloc_offset + cache->start; cache->meta_write_pointer = cache->alloc_offset + cache->start;
if (cache->zone_is_active) { if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) {
btrfs_get_block_group(cache); btrfs_get_block_group(cache);
spin_lock(&fs_info->zone_active_bgs_lock); spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&cache->active_bg_list, list_add_tail(&cache->active_bg_list,
...@@ -1871,7 +1874,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) ...@@ -1871,7 +1874,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
spin_lock(&space_info->lock); spin_lock(&space_info->lock);
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (block_group->zone_is_active) { if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
ret = true; ret = true;
goto out_unlock; goto out_unlock;
} }
...@@ -1897,7 +1900,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) ...@@ -1897,7 +1900,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
} }
/* Successfully activated all the zones */ /* Successfully activated all the zones */
block_group->zone_is_active = 1; set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
space_info->active_total_bytes += block_group->length; space_info->active_total_bytes += block_group->length;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
btrfs_try_granting_tickets(fs_info, space_info); btrfs_try_granting_tickets(fs_info, space_info);
...@@ -1960,7 +1963,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ ...@@ -1960,7 +1963,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
int i; int i;
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (!block_group->zone_is_active) { if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return 0; return 0;
} }
...@@ -2001,7 +2004,8 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ ...@@ -2001,7 +2004,8 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
* Bail out if someone already deactivated the block group, or * Bail out if someone already deactivated the block group, or
* allocated space is left in the block group. * allocated space is left in the block group.
*/ */
if (!block_group->zone_is_active) { if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&block_group->runtime_flags)) {
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
btrfs_dec_block_group_ro(block_group); btrfs_dec_block_group_ro(block_group);
return 0; return 0;
...@@ -2014,7 +2018,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ ...@@ -2014,7 +2018,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
} }
} }
block_group->zone_is_active = 0; clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
block_group->alloc_offset = block_group->zone_capacity; block_group->alloc_offset = block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0; block_group->free_space_ctl->free_space = 0;
btrfs_clear_treelog_bg(block_group); btrfs_clear_treelog_bg(block_group);
...@@ -2222,13 +2226,14 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica ...@@ -2222,13 +2226,14 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica
ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA));
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
if (!block_group->zoned_data_reloc_ongoing) if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))
goto out; goto out;
/* All relocation extents are written. */ /* All relocation extents are written. */
if (block_group->start + block_group->alloc_offset == logical + length) { if (block_group->start + block_group->alloc_offset == logical + length) {
/* Now, release this block group for further allocations. */ /* Now, release this block group for further allocations. */
block_group->zoned_data_reloc_ongoing = 0; clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
&block_group->runtime_flags);
} }
out: out:
...@@ -2300,7 +2305,9 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, ...@@ -2300,7 +2305,9 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
list) { list) {
if (!spin_trylock(&bg->lock)) if (!spin_trylock(&bg->lock))
continue; continue;
if (btrfs_zoned_bg_is_full(bg) || bg->zone_is_active) { if (btrfs_zoned_bg_is_full(bg) ||
test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&bg->runtime_flags)) {
spin_unlock(&bg->lock); spin_unlock(&bg->lock);
continue; continue;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment