Commit a8fdc051 authored by Filipe Manana's avatar Filipe Manana Committed by David Sterba

btrfs: remove obsolete delayed ref throttling logic when truncating items

We have this logic encapsulated in btrfs_should_throttle_delayed_refs()
where we try to estimate if running the current amount of delayed
references we have will take more than half a second, and if so, the
caller btrfs_should_throttle_delayed_refs() should do something to
prevent more and more delayed refs from being accumulated.

This logic was added in commit 0a2b2a84 ("Btrfs: throttle delayed
refs better") and then further refined in commit a79b7d4b ("Btrfs:
async delayed refs"). The idea back then was that the caller of
btrfs_should_throttle_delayed_refs() would release its transaction
handle (by calling btrfs_end_transaction()) when that function returned
true, then btrfs_end_transaction() would trigger an async job to run
delayed references in a workqueue, and later start/join a transaction
again and do more work.

However we don't run delayed references asynchronously anymore, that
was removed in commit db2462a6 ("btrfs: don't run delayed refs in
the end transaction logic"). That makes the logic that tries to estimate
how long we will take to run our current delayed references, at
btrfs_should_throttle_delayed_refs(), pointless as we don't take any
action to run delayed references anymore. We do have other type of
throttling, which consists of checking the size and reserved space of
the delayed and global block reserves, as well as if fluhsing delayed
references for the current transaction was already started, etc - this
is all done by btrfs_should_end_transaction(), and the only user of
btrfs_should_throttle_delayed_refs() does periodically call
btrfs_should_end_transaction().

So remove btrfs_should_throttle_delayed_refs() and the infrastructure
that keeps track of the average time used for running delayed references,
as well as adapting btrfs_truncate_inode_items() to call
btrfs_check_space_for_delayed_refs() instead.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4e8313e5
...@@ -53,22 +53,6 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info) ...@@ -53,22 +53,6 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
return ret; return ret;
} }
bool btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
{
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
u64 avg_runtime;
u64 val;
smp_mb();
avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
if (val >= NSEC_PER_SEC / 2)
return true;
return btrfs_check_space_for_delayed_refs(trans->fs_info);
}
/* /*
* Release a ref head's reservation. * Release a ref head's reservation.
* *
......
...@@ -385,7 +385,6 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info, ...@@ -385,7 +385,6 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info, void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *src, struct btrfs_block_rsv *src,
u64 num_bytes); u64 num_bytes);
bool btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info); bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
/* /*
......
...@@ -2965,7 +2965,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) ...@@ -2965,7 +2965,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
atomic64_set(&fs_info->free_chunk_space, 0); atomic64_set(&fs_info->free_chunk_space, 0);
fs_info->tree_mod_log = RB_ROOT; fs_info->tree_mod_log = RB_ROOT;
fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
btrfs_init_ref_verify(fs_info); btrfs_init_ref_verify(fs_info);
fs_info->thread_pool_size = min_t(unsigned long, fs_info->thread_pool_size = min_t(unsigned long,
......
...@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( ...@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
} }
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *locked_ref, struct btrfs_delayed_ref_head *locked_ref)
unsigned long *run_refs)
{ {
struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
...@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, ...@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
return -EAGAIN; return -EAGAIN;
} }
(*run_refs)++;
ref->in_tree = 0; ref->in_tree = 0;
rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree); rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node); RB_CLEAR_NODE(&ref->ref_node);
...@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *locked_ref = NULL; struct btrfs_delayed_ref_head *locked_ref = NULL;
ktime_t start = ktime_get();
int ret; int ret;
unsigned long count = 0; unsigned long count = 0;
unsigned long actual_count = 0;
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
do { do {
...@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_lock(&locked_ref->lock); spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref); btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
&actual_count);
if (ret < 0 && ret != -EAGAIN) { if (ret < 0 && ret != -EAGAIN) {
/* /*
* Error, btrfs_run_delayed_refs_for_head already * Error, btrfs_run_delayed_refs_for_head already
...@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, ...@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
cond_resched(); cond_resched();
} while ((nr != -1 && count < nr) || locked_ref); } while ((nr != -1 && count < nr) || locked_ref);
/*
* We don't want to include ref heads since we can have empty ref heads
* and those will drastically skew our runtime down since we just do
* accounting, no actual extent tree updates.
*/
if (actual_count > 0) {
u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
u64 avg;
/*
* We weigh the current average higher than our current runtime
* to avoid large swings in the average.
*/
spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
spin_unlock(&delayed_refs->lock);
}
return 0; return 0;
} }
......
...@@ -407,7 +407,6 @@ struct btrfs_fs_info { ...@@ -407,7 +407,6 @@ struct btrfs_fs_info {
* Must be written and read while holding btrfs_fs_info::commit_root_sem. * Must be written and read while holding btrfs_fs_info::commit_root_sem.
*/ */
u64 last_reloc_trans; u64 last_reloc_trans;
u64 avg_delayed_ref_runtime;
/* /*
* This is updated to the current trans every time a full commit is * This is updated to the current trans every time a full commit is
......
...@@ -527,7 +527,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -527,7 +527,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
while (1) { while (1) {
u64 clear_start = 0, clear_len = 0, extent_start = 0; u64 clear_start = 0, clear_len = 0, extent_start = 0;
bool should_throttle = false; bool refill_delayed_refs_rsv = false;
fi = NULL; fi = NULL;
leaf = path->nodes[0]; leaf = path->nodes[0];
...@@ -685,10 +685,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -685,10 +685,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
break; break;
} }
if (be_nice) { if (be_nice && btrfs_check_space_for_delayed_refs(fs_info))
if (btrfs_should_throttle_delayed_refs(trans)) refill_delayed_refs_rsv = true;
should_throttle = true;
}
} }
if (found_type == BTRFS_INODE_ITEM_KEY) if (found_type == BTRFS_INODE_ITEM_KEY)
...@@ -696,7 +694,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -696,7 +694,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0 || if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot || path->slots[0] != pending_del_slot ||
should_throttle) { refill_delayed_refs_rsv) {
if (pending_del_nr) { if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path, ret = btrfs_del_items(trans, root, path,
pending_del_slot, pending_del_slot,
...@@ -719,7 +717,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, ...@@ -719,7 +717,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* actually allocate, so just bail if we're short and * actually allocate, so just bail if we're short and
* let the normal reservation dance happen higher up. * let the normal reservation dance happen higher up.
*/ */
if (should_throttle) { if (refill_delayed_refs_rsv) {
ret = btrfs_delayed_refs_rsv_refill(fs_info, ret = btrfs_delayed_refs_rsv_refill(fs_info,
BTRFS_RESERVE_NO_FLUSH); BTRFS_RESERVE_NO_FLUSH);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment