Commit bf77467a authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: introduce BTRFS_NESTING_LEFT/BTRFS_NESTING_RIGHT

Our lockdep maps are based on rootid+level, however in some cases we
will lock adjacent blocks on the same level, namely in searching forward
or in split/balance.  Because of this lockdep will complain, so we need
a separate subclass to indicate to lockdep that these are different
locks.

lock leaf -> BTRFS_NESTING_NORMAL
  cow leaf -> BTRFS_NESTING_COW
    split leaf
       lock left -> BTRFS_NESTING_LEFT
       lock right -> BTRFS_NESTING_RIGHT

The above graph illustrates the need for this new nesting subclass.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9631e4cc
...@@ -1896,7 +1896,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -1896,7 +1896,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
left = NULL; left = NULL;
if (left) { if (left) {
btrfs_tree_lock(left); __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
btrfs_set_lock_blocking_write(left); btrfs_set_lock_blocking_write(left);
wret = btrfs_cow_block(trans, root, left, wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left, parent, pslot - 1, &left,
...@@ -1912,7 +1912,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -1912,7 +1912,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
right = NULL; right = NULL;
if (right) { if (right) {
btrfs_tree_lock(right); __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
btrfs_set_lock_blocking_write(right); btrfs_set_lock_blocking_write(right);
wret = btrfs_cow_block(trans, root, right, wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right, parent, pslot + 1, &right,
...@@ -2076,7 +2076,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, ...@@ -2076,7 +2076,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (left) { if (left) {
u32 left_nr; u32 left_nr;
btrfs_tree_lock(left); __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
btrfs_set_lock_blocking_write(left); btrfs_set_lock_blocking_write(left);
left_nr = btrfs_header_nritems(left); left_nr = btrfs_header_nritems(left);
...@@ -2131,7 +2131,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, ...@@ -2131,7 +2131,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (right) { if (right) {
u32 right_nr; u32 right_nr;
btrfs_tree_lock(right); __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
btrfs_set_lock_blocking_write(right); btrfs_set_lock_blocking_write(right);
right_nr = btrfs_header_nritems(right); right_nr = btrfs_header_nritems(right);
...@@ -3806,7 +3806,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -3806,7 +3806,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (IS_ERR(right)) if (IS_ERR(right))
return 1; return 1;
btrfs_tree_lock(right); __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
btrfs_set_lock_blocking_write(right); btrfs_set_lock_blocking_write(right);
free_space = btrfs_leaf_free_space(right); free_space = btrfs_leaf_free_space(right);
...@@ -4045,7 +4045,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -4045,7 +4045,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
if (IS_ERR(left)) if (IS_ERR(left))
return 1; return 1;
btrfs_tree_lock(left); __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
btrfs_set_lock_blocking_write(left); btrfs_set_lock_blocking_write(left);
free_space = btrfs_leaf_free_space(left); free_space = btrfs_leaf_free_space(left);
...@@ -5467,7 +5467,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, ...@@ -5467,7 +5467,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
if (!ret) { if (!ret) {
btrfs_set_path_blocking(path); btrfs_set_path_blocking(path);
__btrfs_tree_read_lock(next, __btrfs_tree_read_lock(next,
BTRFS_NESTING_NORMAL, BTRFS_NESTING_RIGHT,
path->recurse); path->recurse);
} }
next_rw_lock = BTRFS_READ_LOCK; next_rw_lock = BTRFS_READ_LOCK;
...@@ -5504,7 +5504,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, ...@@ -5504,7 +5504,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
if (!ret) { if (!ret) {
btrfs_set_path_blocking(path); btrfs_set_path_blocking(path);
__btrfs_tree_read_lock(next, __btrfs_tree_read_lock(next,
BTRFS_NESTING_NORMAL, BTRFS_NESTING_RIGHT,
path->recurse); path->recurse);
} }
next_rw_lock = BTRFS_READ_LOCK; next_rw_lock = BTRFS_READ_LOCK;
......
...@@ -32,6 +32,18 @@ enum btrfs_lock_nesting { ...@@ -32,6 +32,18 @@ enum btrfs_lock_nesting {
*/ */
BTRFS_NESTING_COW, BTRFS_NESTING_COW,
/*
* Oftentimes we need to lock adjacent nodes on the same level while
* still holding the lock on the original node we searched to, such as
* for searching forward or for split/balance.
*
* Because of this we need to indicate to lockdep that this is
* acceptable by having a different subclass for each of these
* operations.
*/
BTRFS_NESTING_LEFT,
BTRFS_NESTING_RIGHT,
/* /*
* We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
* add this in here and add a static_assert to keep us from going over * add this in here and add a static_assert to keep us from going over
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment