Commit 3074bc0f authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

Revert "bcachefs: Add more assertions for locking btree iterators out of order"

Figured out the bug we were chasing, and it had nothing to do with
locking btree iterators/paths out of order.

This reverts commit ff08733dd298c969aec7c7828095458f73fd5374.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent aae4eea6
...@@ -157,7 +157,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, ...@@ -157,7 +157,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) && (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) { btree_node_lock_increment(trans, b, level, want))) {
mark_btree_node_locked(trans, path, level, want); mark_btree_node_locked(path, level, want);
return true; return true;
} else { } else {
return false; return false;
...@@ -193,7 +193,7 @@ static bool bch2_btree_node_upgrade(struct btree_trans *trans, ...@@ -193,7 +193,7 @@ static bool bch2_btree_node_upgrade(struct btree_trans *trans,
return false; return false;
success: success:
mark_btree_node_intent_locked(trans, path, level); mark_btree_node_intent_locked(path, level);
return true; return true;
} }
...@@ -1045,7 +1045,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) ...@@ -1045,7 +1045,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
t != BTREE_NODE_UNLOCKED) { t != BTREE_NODE_UNLOCKED) {
btree_node_unlock(path, b->c.level); btree_node_unlock(path, b->c.level);
six_lock_increment(&b->c.lock, (enum six_lock_type) t); six_lock_increment(&b->c.lock, (enum six_lock_type) t);
mark_btree_node_locked(trans, path, b->c.level, (enum six_lock_type) t); mark_btree_node_locked(path, b->c.level, (enum six_lock_type) t);
} }
btree_path_level_init(trans, path, b); btree_path_level_init(trans, path, b);
...@@ -1122,7 +1122,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, ...@@ -1122,7 +1122,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
path->l[i].b = NULL; path->l[i].b = NULL;
mark_btree_node_locked(trans, path, path->level, lock_type); mark_btree_node_locked(path, path->level, lock_type);
btree_path_level_init(trans, path, b); btree_path_level_init(trans, path, b);
return 0; return 0;
} }
...@@ -1214,7 +1214,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans, ...@@ -1214,7 +1214,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
if (unlikely(ret)) if (unlikely(ret))
goto err; goto err;
mark_btree_node_locked(trans, path, level, lock_type); mark_btree_node_locked(path, level, lock_type);
btree_path_level_init(trans, path, b); btree_path_level_init(trans, path, b);
if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 && if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
...@@ -1295,9 +1295,6 @@ static int __btree_path_traverse_all(struct btree_trans *trans, int ret, ...@@ -1295,9 +1295,6 @@ static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
path = trans->paths + trans->sorted[i]; path = trans->paths + trans->sorted[i];
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
#ifdef CONFIG_BCACHEFS_DEBUG
trans->traverse_all_idx = path->idx;
#endif
ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_); ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
if (ret) if (ret)
...@@ -1319,11 +1316,6 @@ static int __btree_path_traverse_all(struct btree_trans *trans, int ret, ...@@ -1319,11 +1316,6 @@ static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
out: out:
bch2_btree_cache_cannibalize_unlock(c); bch2_btree_cache_cannibalize_unlock(c);
#ifdef CONFIG_BCACHEFS_DEBUG
trans->traverse_all_idx = U8_MAX;
#endif
trans->in_traverse_all = false;
trace_trans_traverse_all(trans->ip, trace_ip); trace_trans_traverse_all(trans->ip, trace_ip);
return ret; return ret;
} }
......
...@@ -297,7 +297,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -297,7 +297,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
if (!ck) if (!ck)
goto retry; goto retry;
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent); mark_btree_node_locked(path, 0, SIX_LOCK_intent);
path->locks_want = 1; path->locks_want = 1;
} else { } else {
enum six_lock_type lock_want = __btree_lock_want(path, 0); enum six_lock_type lock_want = __btree_lock_want(path, 0);
...@@ -319,7 +319,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -319,7 +319,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
goto retry; goto retry;
} }
mark_btree_node_locked(trans, path, 0, lock_want); mark_btree_node_locked(path, 0, lock_want);
} }
path->l[0].lock_seq = ck->c.lock.state.seq; path->l[0].lock_seq = ck->c.lock.state.seq;
......
...@@ -57,8 +57,7 @@ static inline void mark_btree_node_unlocked(struct btree_path *path, ...@@ -57,8 +57,7 @@ static inline void mark_btree_node_unlocked(struct btree_path *path,
path->nodes_intent_locked &= ~(1 << level); path->nodes_intent_locked &= ~(1 << level);
} }
static inline void mark_btree_node_locked(struct btree_trans *trans, static inline void mark_btree_node_locked(struct btree_path *path,
struct btree_path *path,
unsigned level, unsigned level,
enum six_lock_type type) enum six_lock_type type)
{ {
...@@ -68,20 +67,12 @@ static inline void mark_btree_node_locked(struct btree_trans *trans, ...@@ -68,20 +67,12 @@ static inline void mark_btree_node_locked(struct btree_trans *trans,
path->nodes_locked |= 1 << level; path->nodes_locked |= 1 << level;
path->nodes_intent_locked |= type << level; path->nodes_intent_locked |= type << level;
#ifdef CONFIG_BCACHEFS_DEBUG
path->ip_locked = _RET_IP_;
btree_trans_sort_paths(trans);
BUG_ON(trans->in_traverse_all &&
trans->traverse_all_idx != U8_MAX &&
path->sorted_idx > trans->paths[trans->traverse_all_idx].sorted_idx);
#endif
} }
static inline void mark_btree_node_intent_locked(struct btree_trans *trans, static inline void mark_btree_node_intent_locked(struct btree_path *path,
struct btree_path *path,
unsigned level) unsigned level)
{ {
mark_btree_node_locked(trans, path, level, SIX_LOCK_intent); mark_btree_node_locked(path, level, SIX_LOCK_intent);
} }
static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level) static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
...@@ -120,9 +111,6 @@ static inline void __bch2_btree_path_unlock(struct btree_path *path) ...@@ -120,9 +111,6 @@ static inline void __bch2_btree_path_unlock(struct btree_path *path)
while (path->nodes_locked) while (path->nodes_locked)
btree_node_unlock(path, __ffs(path->nodes_locked)); btree_node_unlock(path, __ffs(path->nodes_locked));
#ifdef CONFIG_BCACHEFS_DEBUG
path->ip_locked = 0;
#endif
} }
static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type) static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
......
...@@ -255,7 +255,6 @@ struct btree_path { ...@@ -255,7 +255,6 @@ struct btree_path {
} l[BTREE_MAX_DEPTH]; } l[BTREE_MAX_DEPTH];
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
unsigned long ip_allocated; unsigned long ip_allocated;
unsigned long ip_locked;
#endif #endif
}; };
...@@ -369,7 +368,6 @@ struct btree_trans { ...@@ -369,7 +368,6 @@ struct btree_trans {
struct bpos locking_pos; struct bpos locking_pos;
u8 locking_btree_id; u8 locking_btree_id;
u8 locking_level; u8 locking_level;
u8 traverse_all_idx;
pid_t pid; pid_t pid;
#endif #endif
unsigned long ip; unsigned long ip;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment