Commit e879389f authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Fix bch2_btree_node_fill() for !path

We shouldn't be doing the unlock/relock dance when we're not using a
path - this fixes an assertion pop when called from btree node scan.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8cf2036e
...@@ -709,7 +709,6 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, ...@@ -709,7 +709,6 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
u32 seq;
if (unlikely(level >= BTREE_MAX_DEPTH)) { if (unlikely(level >= BTREE_MAX_DEPTH)) {
int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u", int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
...@@ -775,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, ...@@ -775,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
} }
set_btree_node_read_in_flight(b); set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */ if (path) {
if (path && sync) u32 seq = six_lock_seq(&b->c.lock);
bch2_trans_unlock_noassert(trans);
bch2_btree_node_read(trans, b, sync);
if (!sync) /* Unlock before doing IO: */
return NULL; six_unlock_intent(&b->c.lock);
bch2_trans_unlock_noassert(trans);
if (path) { bch2_btree_node_read(trans, b, sync);
int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path);
if (ret) {
BUG_ON(!trans->restarted);
return ERR_PTR(ret);
}
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) { if (!sync)
BUG_ON(!path); return NULL;
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); if (!six_relock_type(&b->c.lock, lock_type, seq))
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); b = NULL;
} else {
bch2_btree_node_read(trans, b, sync);
if (lock_type == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock);
} }
return b; return b;
...@@ -1135,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans, ...@@ -1135,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b;
BUG_ON(path && !btree_node_locked(path, level + 1)); BUG_ON(path && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH); BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k); struct btree *b = btree_cache_find(bc, k);
if (b) if (b)
return 0; return 0;
b = bch2_btree_node_fill(trans, path, k, btree_id, b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false); level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b); if (!IS_ERR_OR_NULL(b))
six_unlock_read(&b->c.lock);
return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
} }
void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment