Commit 66b53bae authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: cleanup btrfs_discard_update_discardable usage

This passes in the block_group and the free_space_ctl, but we can get
this from the block group itself.  Part of this is because we call it
from __load_free_space_cache, which can be called for the inode cache as
well.

Move that call into the block group specific load section, wrap it in
the right lock that we need for the assertion (but otherwise this is
safe without the lock because this happens in single-thread context).

Fix up the arguments to only take the block group.  Add a lockdep_assert
as well for good measure to make sure we don't mess up the locking
again.
Reviewed-by: default avatarFilipe Manana <fdmanana@suse.com>
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2ca08c56
......@@ -563,15 +563,14 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
/**
* btrfs_discard_update_discardable - propagate discard counters
* @block_group: block_group of interest
* @ctl: free_space_ctl of @block_group
*
* This propagates deltas of counters up to the discard_ctl. It maintains a
* current counter and a previous counter passing the delta up to the global
* stat. Then the current counter value becomes the previous counter value.
*/
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
struct btrfs_free_space_ctl *ctl)
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
{
struct btrfs_free_space_ctl *ctl;
struct btrfs_discard_ctl *discard_ctl;
s32 extents_delta;
s64 bytes_delta;
......@@ -581,8 +580,10 @@ void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
!btrfs_is_block_group_data_only(block_group))
return;
ctl = block_group->free_space_ctl;
discard_ctl = &block_group->fs_info->discard_ctl;
lockdep_assert_held(&ctl->tree_lock);
extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
ctl->discardable_extents[BTRFS_STAT_PREV];
if (extents_delta) {
......
......@@ -28,8 +28,7 @@ bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl);
/* Update operations */
void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl);
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group,
struct btrfs_free_space_ctl *ctl);
void btrfs_discard_update_discardable(struct btrfs_block_group *block_group);
/* Setup/cleanup operations */
void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info);
......
......@@ -828,7 +828,6 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
merge_space_tree(ctl);
ret = 1;
out:
btrfs_discard_update_discardable(ctl->private, ctl);
io_ctl_free(&io_ctl);
return ret;
free_cache:
......@@ -929,6 +928,9 @@ int load_free_space_cache(struct btrfs_block_group *block_group)
block_group->start);
}
spin_lock(&ctl->tree_lock);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
iput(inode);
return ret;
}
......@@ -2508,7 +2510,7 @@ int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
if (ret)
kmem_cache_free(btrfs_free_space_cachep, info);
out:
btrfs_discard_update_discardable(block_group, ctl);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
if (ret) {
......@@ -2643,7 +2645,7 @@ int btrfs_remove_free_space(struct btrfs_block_group *block_group,
goto again;
}
out_lock:
btrfs_discard_update_discardable(block_group, ctl);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
out:
return ret;
......@@ -2779,7 +2781,7 @@ void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
spin_lock(&ctl->tree_lock);
__btrfs_remove_free_space_cache_locked(ctl);
if (ctl->private)
btrfs_discard_update_discardable(ctl->private, ctl);
btrfs_discard_update_discardable(ctl->private);
spin_unlock(&ctl->tree_lock);
}
......@@ -2801,7 +2803,7 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
cond_resched_lock(&ctl->tree_lock);
}
__btrfs_remove_free_space_cache_locked(ctl);
btrfs_discard_update_discardable(block_group, ctl);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
}
......@@ -2885,7 +2887,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
link_free_space(ctl, entry);
}
out:
btrfs_discard_update_discardable(block_group, ctl);
btrfs_discard_update_discardable(block_group);
spin_unlock(&ctl->tree_lock);
if (align_gap_len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment