Commit f13f5a8c authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: move some checks to expensive_debug_checks

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 76a0537b
...@@ -257,6 +257,8 @@ do { \ ...@@ -257,6 +257,8 @@ do { \
BCH_DEBUG_PARAM(expensive_debug_checks, \ BCH_DEBUG_PARAM(expensive_debug_checks, \
"Enables various runtime debugging checks that " \ "Enables various runtime debugging checks that " \
"significantly affect performance") \ "significantly affect performance") \
BCH_DEBUG_PARAM(debug_check_iterators, \
"Enables extra verification for btree iterators") \
BCH_DEBUG_PARAM(debug_check_bkeys, \ BCH_DEBUG_PARAM(debug_check_bkeys, \
"Run bkey_debugcheck (primarily checking GC/allocation "\ "Run bkey_debugcheck (primarily checking GC/allocation "\
"information) when iterating over keys") \ "information) when iterating over keys") \
......
...@@ -1023,7 +1023,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, ...@@ -1023,7 +1023,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
k = p; k = p;
} }
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { if (btree_keys_expensive_checks(b)) {
BUG_ON(ret >= orig_k); BUG_ON(ret >= orig_k);
for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t); for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
...@@ -1644,10 +1644,11 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter, ...@@ -1644,10 +1644,11 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
void bch2_btree_node_iter_advance(struct btree_node_iter *iter, void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
struct btree *b) struct btree *b)
{ {
#ifdef CONFIG_BCACHEFS_DEBUG if (btree_keys_expensive_checks(b)) {
bch2_btree_node_iter_verify(iter, b); bch2_btree_node_iter_verify(iter, b);
bch2_btree_node_iter_next_check(iter, b); bch2_btree_node_iter_next_check(iter, b);
#endif }
__bch2_btree_node_iter_advance(iter, b); __bch2_btree_node_iter_advance(iter, b);
} }
...@@ -1710,7 +1711,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite ...@@ -1710,7 +1711,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite
iter->data[0].k = __btree_node_key_to_offset(b, prev); iter->data[0].k = __btree_node_key_to_offset(b, prev);
iter->data[0].end = end; iter->data[0].end = end;
out: out:
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { if (btree_keys_expensive_checks(b)) {
struct btree_node_iter iter2 = *iter; struct btree_node_iter iter2 = *iter;
if (prev) if (prev)
......
...@@ -429,6 +429,9 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter, ...@@ -429,6 +429,9 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
struct btree_node_iter tmp = l->iter; struct btree_node_iter tmp = l->iter;
struct bkey_packed *k; struct bkey_packed *k;
if (!debug_check_iterators(iter->trans->c))
return;
if (iter->uptodate > BTREE_ITER_NEED_PEEK) if (iter->uptodate > BTREE_ITER_NEED_PEEK)
return; return;
...@@ -475,6 +478,9 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) ...@@ -475,6 +478,9 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{ {
struct btree_iter *linked; struct btree_iter *linked;
if (!debug_check_iterators(iter->trans->c))
return;
trans_for_each_iter_with_node(iter->trans, b, linked) trans_for_each_iter_with_node(iter->trans, b, linked)
__bch2_btree_iter_verify(linked, b); __bch2_btree_iter_verify(linked, b);
} }
......
...@@ -788,7 +788,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *, ...@@ -788,7 +788,8 @@ static bool bch2_extent_merge_inline(struct bch_fs *,
struct bkey_packed *, struct bkey_packed *,
bool); bool);
static void verify_extent_nonoverlapping(struct btree *b, static void verify_extent_nonoverlapping(struct bch_fs *c,
struct btree *b,
struct btree_node_iter *_iter, struct btree_node_iter *_iter,
struct bkey_i *insert) struct bkey_i *insert)
{ {
...@@ -797,6 +798,9 @@ static void verify_extent_nonoverlapping(struct btree *b, ...@@ -797,6 +798,9 @@ static void verify_extent_nonoverlapping(struct btree *b,
struct bkey_packed *k; struct bkey_packed *k;
struct bkey uk; struct bkey uk;
if (!expensive_debug_checks(c))
return;
iter = *_iter; iter = *_iter;
k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard); k = bch2_btree_node_iter_prev_filter(&iter, b, KEY_TYPE_discard);
BUG_ON(k && BUG_ON(k &&
...@@ -847,7 +851,7 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter, ...@@ -847,7 +851,7 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b)); BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, l->b));
EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size); EBUG_ON(bkey_deleted(&insert->k) || !insert->k.size);
verify_extent_nonoverlapping(l->b, &l->iter, insert); verify_extent_nonoverlapping(c, l->b, &l->iter, insert);
node_iter = l->iter; node_iter = l->iter;
k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard); k = bch2_btree_node_iter_prev_filter(&node_iter, l->b, KEY_TYPE_discard);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment