Commit ea6a478e authored by Josef Bacik's avatar Josef Bacik Committed by Chris Mason

Btrfs: Fix for lockdep warnings with alloc_mutex and pinned_mutex

This the lockdep complaint by having a different mutex to gaurd caching the
block group, so you don't end up with this backwards dependancy.  Thank you,
Signed-off-by: default avatarJosef Bacik <jbacik@redhat.com>
parent 0e6bd956
...@@ -580,6 +580,7 @@ struct btrfs_block_group_cache { ...@@ -580,6 +580,7 @@ struct btrfs_block_group_cache {
struct btrfs_block_group_item item; struct btrfs_block_group_item item;
spinlock_t lock; spinlock_t lock;
struct mutex alloc_mutex; struct mutex alloc_mutex;
struct mutex cache_mutex;
u64 pinned; u64 pinned;
u64 reserved; u64 reserved;
u64 flags; u64 flags;
......
...@@ -170,7 +170,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, ...@@ -170,7 +170,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
start = extent_end + 1; start = extent_end + 1;
} else if (extent_start > start && extent_start < end) { } else if (extent_start > start && extent_start < end) {
size = extent_start - start; size = extent_start - start;
ret = btrfs_add_free_space_lock(block_group, start, ret = btrfs_add_free_space(block_group, start,
size); size);
BUG_ON(ret); BUG_ON(ret);
start = extent_end + 1; start = extent_end + 1;
...@@ -181,7 +181,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, ...@@ -181,7 +181,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group,
if (start < end) { if (start < end) {
size = end - start; size = end - start;
ret = btrfs_add_free_space_lock(block_group, start, size); ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret); BUG_ON(ret);
} }
mutex_unlock(&info->pinned_mutex); mutex_unlock(&info->pinned_mutex);
...@@ -2842,17 +2842,19 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, ...@@ -2842,17 +2842,19 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
if (!block_group) if (!block_group)
goto new_group_no_lock; goto new_group_no_lock;
mutex_lock(&block_group->alloc_mutex); if (unlikely(!block_group->cached)) {
if (unlikely(!block_group_bits(block_group, data))) mutex_lock(&block_group->cache_mutex);
goto new_group;
ret = cache_block_group(root, block_group); ret = cache_block_group(root, block_group);
if (ret) { mutex_unlock(&block_group->cache_mutex);
mutex_unlock(&block_group->alloc_mutex); if (ret)
break; break;
} }
if (block_group->ro) mutex_lock(&block_group->alloc_mutex);
if (unlikely(!block_group_bits(block_group, data)))
goto new_group;
if (unlikely(block_group->ro))
goto new_group; goto new_group;
free_space = btrfs_find_free_space(block_group, search_start, free_space = btrfs_find_free_space(block_group, search_start,
...@@ -3273,12 +3275,12 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, ...@@ -3273,12 +3275,12 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
mutex_lock(&block_group->alloc_mutex); mutex_lock(&block_group->cache_mutex);
cache_block_group(root, block_group); cache_block_group(root, block_group);
mutex_unlock(&block_group->cache_mutex);
ret = btrfs_remove_free_space_lock(block_group, ins->objectid, ret = btrfs_remove_free_space(block_group, ins->objectid,
ins->offset); ins->offset);
mutex_unlock(&block_group->alloc_mutex);
BUG_ON(ret); BUG_ON(ret);
ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
ref_generation, owner, ins); ref_generation, owner, ins);
...@@ -5801,6 +5803,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -5801,6 +5803,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
spin_lock_init(&cache->lock); spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex); mutex_init(&cache->alloc_mutex);
mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->list);
read_extent_buffer(leaf, &cache->item, read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0]),
...@@ -5854,6 +5857,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -5854,6 +5857,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->key.offset = size; cache->key.offset = size;
spin_lock_init(&cache->lock); spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex); mutex_init(&cache->alloc_mutex);
mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->list);
btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY); btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment