Commit f66e0209 authored by Filipe Manana's avatar Filipe Manana Committed by David Sterba

btrfs: stop reserving excessive space for block group item updates

Space for block group item updates, necessary after allocating or
deallocating an extent from a block group, is reserved in the delayed
refs block reserve. Currently we do this by incrementing the transaction
handle's delayed_ref_updates counter and then calling
btrfs_update_delayed_refs_rsv(), which will increase the size of the
delayed refs block reserve by an amount that corresponds to the same
amount we use for delayed refs, given by btrfs_calc_delayed_ref_bytes().

That is an excessive amount because it corresponds to the amount of space
needed to insert one item in a btree (btrfs_calc_insert_metadata_size())
times 2 when the free space tree feature is enabled. All we need is an
amount as given by btrfs_calc_metadata_size(), since we only need to
update an existing block group item in the extent tree (or block group
tree if this feature is enabled). By using btrfs_calc_metadata_size() we
will need to reserve 4 times less space when using the free space tree
and 2 times less space when not using it, putting less pressure on space
reservation.

So use helpers to reserve and release space for block group item updates
that use btrfs_calc_metadata_size() for calculation of the space.
Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 398fb913
......@@ -1286,7 +1286,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
if (remove_rsv)
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
btrfs_free_path(path);
return ret;
}
......@@ -3369,7 +3369,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
if (should_put)
btrfs_put_block_group(cache);
if (drop_reserve)
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
......@@ -3516,7 +3516,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
/* If its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
......@@ -3545,6 +3545,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_block_group *cache;
u64 old_val;
bool reclaim = false;
bool bg_already_dirty = true;
int factor;
/* Block accounting for super block */
......@@ -3613,7 +3614,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
trans->delayed_ref_updates++;
bg_already_dirty = false;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
......@@ -3633,7 +3634,8 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_put_block_group(cache);
/* Modified block groups are accounted for in the delayed_refs_rsv. */
btrfs_update_delayed_refs_rsv(trans);
if (!bg_already_dirty)
btrfs_inc_delayed_refs_rsv_bg_updates(info);
return 0;
}
......
......@@ -125,6 +125,41 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
trans->delayed_ref_csum_deletions = 0;
}
/*
* Adjust the size of the delayed refs block reserve for 1 block group item
* update.
*/
void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
spin_lock(&delayed_rsv->lock);
/*
* Updating a block group item does not result in new nodes/leaves and
* does not require changing the free space tree, only the extent tree
* or the block group tree, so this is all we need.
*/
delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
delayed_rsv->full = false;
spin_unlock(&delayed_rsv->lock);
}
/*
* Adjust the size of the delayed refs block reserve to release space for 1
* block group item update.
*/
void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
u64 released;
released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
if (released > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
0, released, 0);
}
/*
* Transfer bytes to our delayed refs rsv.
*
......
......@@ -436,6 +436,8 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
......
......@@ -4774,7 +4774,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment