Commit f21566f1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill BTREE_ITER_NODES

We really only need to distinguish between btree iterators and btree key
cache iterators - this is more prep work for btree_path.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent deb0e573
...@@ -28,15 +28,14 @@ static void btree_iter_copy(struct btree_trans *, struct btree_iter *, struct bt ...@@ -28,15 +28,14 @@ static void btree_iter_copy(struct btree_trans *, struct btree_iter *, struct bt
static inline int btree_iter_cmp(const struct btree_iter *l, static inline int btree_iter_cmp(const struct btree_iter *l,
const struct btree_iter *r) const struct btree_iter *r)
{ {
return cmp_int(l->btree_id, r->btree_id) ?: return cmp_int(l->btree_id, r->btree_id) ?:
-cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?: -cmp_int(l->cached, r->cached) ?:
bkey_cmp(l->real_pos, r->real_pos); bkey_cmp(l->real_pos, r->real_pos) ?:
-cmp_int(l->level, r->level);
} }
static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
{ {
EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
/* Are we iterating over keys in all snapshots? */ /* Are we iterating over keys in all snapshots? */
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
p = bpos_successor(p); p = bpos_successor(p);
...@@ -50,8 +49,6 @@ static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) ...@@ -50,8 +49,6 @@ static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
{ {
EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
/* Are we iterating over keys in all snapshots? */ /* Are we iterating over keys in all snapshots? */
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) { if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
p = bpos_predecessor(p); p = bpos_predecessor(p);
...@@ -210,7 +207,7 @@ static inline bool btree_iter_get_locks(struct btree_trans *trans, ...@@ -210,7 +207,7 @@ static inline bool btree_iter_get_locks(struct btree_trans *trans,
(upgrade (upgrade
? trace_node_upgrade_fail ? trace_node_upgrade_fail
: trace_node_relock_fail)(trans->ip, trace_ip, : trace_node_relock_fail)(trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED, iter->cached,
iter->btree_id, &iter->real_pos, iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq, l, iter->l[l].lock_seq,
is_btree_node(iter, l) is_btree_node(iter, l)
...@@ -246,9 +243,9 @@ static inline bool btree_iter_get_locks(struct btree_trans *trans, ...@@ -246,9 +243,9 @@ static inline bool btree_iter_get_locks(struct btree_trans *trans,
} }
static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b, static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
enum btree_iter_type type) bool cached)
{ {
return type != BTREE_ITER_CACHED return !cached
? container_of(_b, struct btree, c)->key.k.p ? container_of(_b, struct btree, c)->key.k.p
: container_of(_b, struct bkey_cached, c)->key.pos; : container_of(_b, struct bkey_cached, c)->key.pos;
} }
...@@ -301,8 +298,8 @@ bool __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -301,8 +298,8 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
* Within the same btree, cached iterators come before non * Within the same btree, cached iterators come before non
* cached iterators: * cached iterators:
*/ */
if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) { if (linked->cached != iter->cached) {
if (btree_iter_is_cached(iter)) { if (iter->cached) {
deadlock_iter = linked; deadlock_iter = linked;
reason = 4; reason = 4;
} }
...@@ -322,7 +319,7 @@ bool __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -322,7 +319,7 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
/* Must lock btree nodes in key order: */ /* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) && if (btree_node_locked(linked, level) &&
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b, bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
btree_iter_type(linked))) <= 0) { linked->cached)) <= 0) {
deadlock_iter = linked; deadlock_iter = linked;
reason = 7; reason = 7;
} }
...@@ -332,10 +329,10 @@ bool __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -332,10 +329,10 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
trace_trans_restart_would_deadlock(trans->ip, ip, trace_trans_restart_would_deadlock(trans->ip, ip,
trans->in_traverse_all, reason, trans->in_traverse_all, reason,
deadlock_iter->btree_id, deadlock_iter->btree_id,
btree_iter_type(deadlock_iter), deadlock_iter->cached,
&deadlock_iter->real_pos, &deadlock_iter->real_pos,
iter->btree_id, iter->btree_id,
btree_iter_type(iter), iter->cached,
&pos); &pos);
btree_trans_restart(trans); btree_trans_restart(trans);
return false; return false;
...@@ -404,7 +401,7 @@ bool bch2_btree_iter_relock_intent(struct btree_trans *trans, ...@@ -404,7 +401,7 @@ bool bch2_btree_iter_relock_intent(struct btree_trans *trans,
l++) { l++) {
if (!bch2_btree_node_relock(trans, iter, l)) { if (!bch2_btree_node_relock(trans, iter, l)) {
trace_node_relock_fail(trans->ip, _RET_IP_, trace_node_relock_fail(trans->ip, _RET_IP_,
btree_iter_type(iter) == BTREE_ITER_CACHED, iter->cached,
iter->btree_id, &iter->real_pos, iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq, l, iter->l[l].lock_seq,
is_btree_node(iter, l) is_btree_node(iter, l)
...@@ -467,7 +464,7 @@ bool __bch2_btree_iter_upgrade(struct btree_trans *trans, ...@@ -467,7 +464,7 @@ bool __bch2_btree_iter_upgrade(struct btree_trans *trans,
*/ */
trans_for_each_iter(trans, linked) trans_for_each_iter(trans, linked)
if (linked != iter && if (linked != iter &&
btree_iter_type(linked) == btree_iter_type(iter) && linked->cached == iter->cached &&
linked->btree_id == iter->btree_id && linked->btree_id == iter->btree_id &&
linked->locks_want < new_locks_want) { linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want; linked->locks_want = new_locks_want;
...@@ -584,7 +581,7 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans, ...@@ -584,7 +581,7 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans,
tmp = l->iter; tmp = l->iter;
locked = btree_node_locked(iter, level); locked = btree_node_locked(iter, level);
if (btree_iter_type(iter) == BTREE_ITER_CACHED) { if (iter->cached) {
if (!level) if (!level)
bch2_btree_iter_verify_cached(trans, iter); bch2_btree_iter_verify_cached(trans, iter);
return; return;
...@@ -600,13 +597,6 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans, ...@@ -600,13 +597,6 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans,
BUG_ON(!btree_iter_pos_in_node(iter, l->b)); BUG_ON(!btree_iter_pos_in_node(iter, l->b));
/*
* node iterators don't use leaf node iterator:
*/
if (btree_iter_type(iter) == BTREE_ITER_NODES &&
level <= iter->min_depth)
goto unlock;
bch2_btree_node_iter_verify(&l->iter, l->b); bch2_btree_node_iter_verify(&l->iter, l->b);
/* /*
...@@ -630,7 +620,7 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans, ...@@ -630,7 +620,7 @@ static void bch2_btree_iter_verify_level(struct btree_trans *trans,
msg = "after"; msg = "after";
goto err; goto err;
} }
unlock:
if (!locked) if (!locked)
btree_node_unlock(iter, level); btree_node_unlock(iter, level);
return; return;
...@@ -661,7 +651,6 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) ...@@ -661,7 +651,6 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
{ {
struct btree_trans *trans = iter->trans; struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
enum btree_iter_type type = btree_iter_type(iter);
unsigned i; unsigned i;
EBUG_ON(iter->btree_id >= BTREE_ID_NR); EBUG_ON(iter->btree_id >= BTREE_ID_NR);
...@@ -672,14 +661,11 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) ...@@ -672,14 +661,11 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) && BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)); (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
BUG_ON(type == BTREE_ITER_NODES && BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
BUG_ON(type != BTREE_ITER_NODES &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
!btree_type_has_snapshots(iter->btree_id)); !btree_type_has_snapshots(iter->btree_id));
for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) { for (i = 0; i < (!iter->cached ? BTREE_MAX_DEPTH : 1); i++) {
if (!iter->l[i].b) { if (!iter->l[i].b) {
BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i); BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
break; break;
...@@ -693,15 +679,11 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) ...@@ -693,15 +679,11 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
{ {
enum btree_iter_type type = btree_iter_type(iter);
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) && BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
iter->pos.snapshot != iter->snapshot); iter->pos.snapshot != iter->snapshot);
BUG_ON((type == BTREE_ITER_KEYS || BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
type == BTREE_ITER_CACHED) && bkey_cmp(iter->pos, iter->k.p) > 0);
(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
bkey_cmp(iter->pos, iter->k.p) > 0));
} }
void bch2_trans_verify_iters(struct btree_trans *trans, struct btree *b) void bch2_trans_verify_iters(struct btree_trans *trans, struct btree *b)
...@@ -1036,7 +1018,7 @@ static inline void btree_iter_level_init(struct btree_trans *trans, ...@@ -1036,7 +1018,7 @@ static inline void btree_iter_level_init(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
struct btree *b) struct btree *b)
{ {
BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED); BUG_ON(iter->cached);
btree_iter_verify_new_node(trans, iter, b); btree_iter_verify_new_node(trans, iter, b);
...@@ -1058,7 +1040,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) ...@@ -1058,7 +1040,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
struct btree_iter *iter; struct btree_iter *iter;
trans_for_each_iter(trans, iter) trans_for_each_iter(trans, iter)
if (btree_iter_type(iter) != BTREE_ITER_CACHED && if (!iter->cached &&
btree_iter_pos_in_node(iter, b)) { btree_iter_pos_in_node(iter, b)) {
/* /*
* bch2_trans_node_drop() has already been called - * bch2_trans_node_drop() has already been called -
...@@ -1417,7 +1399,7 @@ static int btree_iter_traverse_one(struct btree_trans *trans, ...@@ -1417,7 +1399,7 @@ static int btree_iter_traverse_one(struct btree_trans *trans,
goto out; goto out;
} }
if (btree_iter_type(iter) == BTREE_ITER_CACHED) { if (iter->cached) {
ret = bch2_btree_iter_traverse_cached(trans, iter); ret = bch2_btree_iter_traverse_cached(trans, iter);
goto out; goto out;
} }
...@@ -1451,8 +1433,8 @@ static int btree_iter_traverse_one(struct btree_trans *trans, ...@@ -1451,8 +1433,8 @@ static int btree_iter_traverse_one(struct btree_trans *trans,
if (unlikely(ret)) { if (unlikely(ret)) {
if (ret == 1) { if (ret == 1) {
/* /*
* Got to the end of the btree (in * No nodes at this level - got to the end of
* BTREE_ITER_NODES mode) * the btree:
*/ */
ret = 0; ret = 0;
goto out; goto out;
...@@ -1477,7 +1459,7 @@ static int btree_iter_traverse_one(struct btree_trans *trans, ...@@ -1477,7 +1459,7 @@ static int btree_iter_traverse_one(struct btree_trans *trans,
out: out:
BUG_ON((ret == -EINTR) != !!trans->restarted); BUG_ON((ret == -EINTR) != !!trans->restarted);
trace_iter_traverse(trans->ip, trace_ip, trace_iter_traverse(trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED, iter->cached,
iter->btree_id, &iter->real_pos, ret); iter->btree_id, &iter->real_pos, ret);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
return ret; return ret;
...@@ -1533,42 +1515,44 @@ bch2_btree_iter_traverse(struct btree_iter *iter) ...@@ -1533,42 +1515,44 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
{ {
struct btree *b; struct btree *b = NULL;
int ret; int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES); EBUG_ON(iter->cached);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
ret = btree_iter_traverse(iter); ret = btree_iter_traverse(iter);
if (ret) if (ret)
return NULL; goto out;
b = btree_iter_node(iter, iter->level); b = btree_iter_node(iter, iter->level);
if (!b) if (!b)
return NULL; goto out;
BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0); BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
iter->pos = iter->real_pos = b->key.k.p; bkey_init(&iter->k);
iter->k.p = iter->pos = iter->real_pos = b->key.k.p;
iter->trans->iters_sorted = false; iter->trans->iters_sorted = false;
bch2_btree_iter_verify(iter);
iter->should_be_locked = true; iter->should_be_locked = true;
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
return b; return b;
} }
struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{ {
struct btree *b; struct btree *b = NULL;
int ret; int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES); EBUG_ON(iter->cached);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
/* already got to end? */ /* already got to end? */
if (!btree_iter_node(iter, iter->level)) if (!btree_iter_node(iter, iter->level))
return NULL; goto out;
bch2_trans_cond_resched(iter->trans); bch2_trans_cond_resched(iter->trans);
...@@ -1579,12 +1563,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) ...@@ -1579,12 +1563,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = btree_iter_traverse(iter); ret = btree_iter_traverse(iter);
if (ret) if (ret)
return NULL; goto out;
/* got to end? */ /* got to end? */
b = btree_iter_node(iter, iter->level); b = btree_iter_node(iter, iter->level);
if (!b) if (!b)
return NULL; goto out;
if (bpos_cmp(iter->pos, b->key.k.p) < 0) { if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
/* /*
...@@ -1601,17 +1585,21 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) ...@@ -1601,17 +1585,21 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
ret = btree_iter_traverse(iter); ret = btree_iter_traverse(iter);
if (ret) if (ret) {
return NULL; b = NULL;
goto out;
}
b = iter->l[iter->level].b; b = iter->l[iter->level].b;
} }
iter->pos = iter->real_pos = b->key.k.p; bkey_init(&iter->k);
iter->k.p = iter->pos = iter->real_pos = b->key.k.p;
iter->trans->iters_sorted = false; iter->trans->iters_sorted = false;
bch2_btree_iter_verify(iter);
iter->should_be_locked = true; iter->should_be_locked = true;
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
return b; return b;
} }
...@@ -1636,7 +1624,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p ...@@ -1636,7 +1624,7 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p
iter->should_be_locked = false; iter->should_be_locked = false;
trans->iters_sorted = false; trans->iters_sorted = false;
if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) { if (unlikely(iter->cached)) {
btree_node_unlock(iter, 0); btree_node_unlock(iter, 0);
iter->l[0].b = BTREE_ITER_NO_NODE_CACHED; iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
...@@ -1734,7 +1722,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) ...@@ -1734,7 +1722,7 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); EBUG_ON(iter->cached || iter->level);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
...@@ -1824,7 +1812,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) ...@@ -1824,7 +1812,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS); EBUG_ON(iter->cached || iter->level);
EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES); EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
...@@ -1891,8 +1879,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) ...@@ -1891,8 +1879,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS && EBUG_ON(iter->level);
btree_iter_type(iter) != BTREE_ITER_CACHED);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
...@@ -1912,28 +1899,21 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) ...@@ -1912,28 +1899,21 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (unlikely(ret)) if (unlikely(ret))
return bkey_s_c_err(ret); return bkey_s_c_err(ret);
if (btree_iter_type(iter) == BTREE_ITER_CACHED || if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
struct bkey_i *next_update; struct bkey_i *next_update;
struct bkey_cached *ck;
next_update = btree_trans_peek_updates(iter); next_update = btree_trans_peek_updates(iter);
switch (btree_iter_type(iter)) { if (!iter->cached) {
case BTREE_ITER_KEYS:
k = btree_iter_level_peek_all(iter, &iter->l[0]); k = btree_iter_level_peek_all(iter, &iter->l[0]);
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0); EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0);
break; } else {
case BTREE_ITER_CACHED: struct bkey_cached *ck = (void *) iter->l[0].b;
ck = (void *) iter->l[0].b;
EBUG_ON(iter->btree_id != ck->key.btree_id || EBUG_ON(iter->btree_id != ck->key.btree_id ||
bkey_cmp(iter->pos, ck->key.pos)); bkey_cmp(iter->pos, ck->key.pos));
BUG_ON(!ck->valid); BUG_ON(!ck->valid);
k = bkey_i_to_s_c(ck->k); k = bkey_i_to_s_c(ck->k);
break;
case BTREE_ITER_NODES:
BUG();
} }
if (next_update && if (next_update &&
...@@ -2345,14 +2325,12 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, ...@@ -2345,14 +2325,12 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
EBUG_ON(trans->restarted); EBUG_ON(trans->restarted);
if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
btree_node_type_is_extents(btree_id) && btree_node_type_is_extents(btree_id))
!(flags & BTREE_ITER_NOT_EXTENTS) &&
!(flags & BTREE_ITER_ALL_SNAPSHOTS))
flags |= BTREE_ITER_IS_EXTENTS; flags |= BTREE_ITER_IS_EXTENTS;
if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && if (!btree_type_has_snapshots(btree_id) &&
!btree_type_has_snapshots(btree_id)) !(flags & __BTREE_ITER_ALL_SNAPSHOTS))
flags &= ~BTREE_ITER_ALL_SNAPSHOTS; flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS)) if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
...@@ -2366,7 +2344,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, ...@@ -2366,7 +2344,7 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
real_pos = bpos_nosnap_successor(pos); real_pos = bpos_nosnap_successor(pos);
trans_for_each_iter(trans, iter) { trans_for_each_iter(trans, iter) {
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE)) if (iter->cached != (flags & BTREE_ITER_CACHED))
continue; continue;
if (iter->btree_id != btree_id) if (iter->btree_id != btree_id)
...@@ -2397,9 +2375,9 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, ...@@ -2397,9 +2375,9 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
trans->iters_live |= 1ULL << iter->idx; trans->iters_live |= 1ULL << iter->idx;
trans->iters_touched |= 1ULL << iter->idx; trans->iters_touched |= 1ULL << iter->idx;
iter->flags = flags; iter->cached = flags & BTREE_ITER_CACHED;
iter->flags = flags;
iter->snapshot = pos.snapshot; iter->snapshot = pos.snapshot;
/* /*
* If the iterator has locks_want greater than requested, we explicitly * If the iterator has locks_want greater than requested, we explicitly
...@@ -2450,8 +2428,8 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans, ...@@ -2450,8 +2428,8 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
struct btree_iter *iter = struct btree_iter *iter =
__bch2_trans_get_iter(trans, btree_id, pos, __bch2_trans_get_iter(trans, btree_id, pos,
locks_want, depth, locks_want, depth,
BTREE_ITER_NODES|
BTREE_ITER_NOT_EXTENTS| BTREE_ITER_NOT_EXTENTS|
__BTREE_ITER_ALL_SNAPSHOTS|
BTREE_ITER_ALL_SNAPSHOTS| BTREE_ITER_ALL_SNAPSHOTS|
flags); flags);
...@@ -2705,21 +2683,20 @@ int bch2_trans_exit(struct btree_trans *trans) ...@@ -2705,21 +2683,20 @@ int bch2_trans_exit(struct btree_trans *trans)
static void __maybe_unused static void __maybe_unused
bch2_btree_iter_node_to_text(struct printbuf *out, bch2_btree_iter_node_to_text(struct printbuf *out,
struct btree_bkey_cached_common *_b, struct btree_bkey_cached_common *_b,
enum btree_iter_type type) bool cached)
{ {
pr_buf(out, " l=%u %s:", pr_buf(out, " l=%u %s:",
_b->level, bch2_btree_ids[_b->btree_id]); _b->level, bch2_btree_ids[_b->btree_id]);
bch2_bpos_to_text(out, btree_node_pos(_b, type)); bch2_bpos_to_text(out, btree_node_pos(_b, cached));
} }
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
static bool trans_has_btree_nodes_locked(struct btree_trans *trans) static bool trans_has_locks(struct btree_trans *trans)
{ {
struct btree_iter *iter; struct btree_iter *iter;
trans_for_each_iter(trans, iter) trans_for_each_iter(trans, iter)
if (btree_iter_type(iter) != BTREE_ITER_CACHED && if (iter->nodes_locked)
iter->nodes_locked)
return true; return true;
return false; return false;
} }
...@@ -2735,7 +2712,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -2735,7 +2712,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->btree_trans_lock); mutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) { list_for_each_entry(trans, &c->btree_trans_list, list) {
if (!trans_has_btree_nodes_locked(trans)) if (!trans_has_locks(trans))
continue; continue;
pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip); pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
...@@ -2746,7 +2723,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -2746,7 +2723,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
pr_buf(out, " iter %u %c %s:", pr_buf(out, " iter %u %c %s:",
iter->idx, iter->idx,
btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b', iter->cached ? 'c' : 'b',
bch2_btree_ids[iter->btree_id]); bch2_btree_ids[iter->btree_id]);
bch2_bpos_to_text(out, iter->pos); bch2_bpos_to_text(out, iter->pos);
pr_buf(out, "\n"); pr_buf(out, "\n");
...@@ -2757,7 +2734,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -2757,7 +2734,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
btree_node_intent_locked(iter, l) ? "i" : "r", l); btree_node_intent_locked(iter, l) ? "i" : "r", l);
bch2_btree_iter_node_to_text(out, bch2_btree_iter_node_to_text(out,
(void *) iter->l[l].b, (void *) iter->l[l].b,
btree_iter_type(iter)); iter->cached);
pr_buf(out, "\n"); pr_buf(out, "\n");
} }
} }
...@@ -2768,7 +2745,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -2768,7 +2745,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
iter = &trans->iters[trans->locking_iter_idx]; iter = &trans->iters[trans->locking_iter_idx];
pr_buf(out, " locking iter %u %c l=%u %s:", pr_buf(out, " locking iter %u %c l=%u %s:",
trans->locking_iter_idx, trans->locking_iter_idx,
btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b', iter->cached ? 'c' : 'b',
trans->locking_level, trans->locking_level,
bch2_btree_ids[trans->locking_btree_id]); bch2_btree_ids[trans->locking_btree_id]);
bch2_bpos_to_text(out, trans->locking_pos); bch2_bpos_to_text(out, trans->locking_pos);
...@@ -2776,7 +2753,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -2776,7 +2753,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
pr_buf(out, " node "); pr_buf(out, " node ");
bch2_btree_iter_node_to_text(out, bch2_btree_iter_node_to_text(out,
(void *) b, (void *) b,
btree_iter_type(iter)); iter->cached);
pr_buf(out, "\n"); pr_buf(out, "\n");
} }
} }
......
...@@ -176,44 +176,38 @@ struct btree_node_iter { ...@@ -176,44 +176,38 @@ struct btree_node_iter {
} data[MAX_BSETS]; } data[MAX_BSETS];
}; };
enum btree_iter_type {
BTREE_ITER_KEYS,
BTREE_ITER_NODES,
BTREE_ITER_CACHED,
};
#define BTREE_ITER_TYPE ((1 << 2) - 1)
/* /*
* Iterate over all possible positions, synthesizing deleted keys for holes: * Iterate over all possible positions, synthesizing deleted keys for holes:
*/ */
#define BTREE_ITER_SLOTS (1 << 2) #define BTREE_ITER_SLOTS (1 << 0)
/* /*
* Indicates that intent locks should be taken on leaf nodes, because we expect * Indicates that intent locks should be taken on leaf nodes, because we expect
* to be doing updates: * to be doing updates:
*/ */
#define BTREE_ITER_INTENT (1 << 3) #define BTREE_ITER_INTENT (1 << 1)
/* /*
* Causes the btree iterator code to prefetch additional btree nodes from disk: * Causes the btree iterator code to prefetch additional btree nodes from disk:
*/ */
#define BTREE_ITER_PREFETCH (1 << 4) #define BTREE_ITER_PREFETCH (1 << 2)
/* /*
* Indicates that this iterator should not be reused until transaction commit, * Indicates that this iterator should not be reused until transaction commit,
* either because a pending update references it or because the update depends * either because a pending update references it or because the update depends
* on that particular key being locked (e.g. by the str_hash code, for hash * on that particular key being locked (e.g. by the str_hash code, for hash
* table consistency) * table consistency)
*/ */
#define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 5) #define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 3)
/* /*
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
* @pos or the first key strictly greater than @pos * @pos or the first key strictly greater than @pos
*/ */
#define BTREE_ITER_IS_EXTENTS (1 << 6) #define BTREE_ITER_IS_EXTENTS (1 << 4)
#define BTREE_ITER_NOT_EXTENTS (1 << 7) #define BTREE_ITER_NOT_EXTENTS (1 << 5)
#define BTREE_ITER_ERROR (1 << 8) #define BTREE_ITER_ERROR (1 << 6)
#define BTREE_ITER_CACHED_NOFILL (1 << 9) #define BTREE_ITER_CACHED (1 << 7)
#define BTREE_ITER_CACHED_NOCREATE (1 << 10) #define BTREE_ITER_CACHED_NOFILL (1 << 8)
#define BTREE_ITER_WITH_UPDATES (1 << 11) #define BTREE_ITER_CACHED_NOCREATE (1 << 9)
#define BTREE_ITER_WITH_UPDATES (1 << 10)
#define __BTREE_ITER_ALL_SNAPSHOTS (1 << 11)
#define BTREE_ITER_ALL_SNAPSHOTS (1 << 12) #define BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
enum btree_iter_uptodate { enum btree_iter_uptodate {
...@@ -256,7 +250,8 @@ struct btree_iter { ...@@ -256,7 +250,8 @@ struct btree_iter {
struct bpos real_pos; struct bpos real_pos;
enum btree_id btree_id:4; enum btree_id btree_id:4;
enum btree_iter_uptodate uptodate:3; bool cached:1;
enum btree_iter_uptodate uptodate:2;
/* /*
* True if we've returned a key (and thus are expected to keep it * True if we've returned a key (and thus are expected to keep it
* locked), false after set_pos - for avoiding spurious transaction * locked), false after set_pos - for avoiding spurious transaction
...@@ -282,17 +277,6 @@ struct btree_iter { ...@@ -282,17 +277,6 @@ struct btree_iter {
struct bkey k; struct bkey k;
}; };
static inline enum btree_iter_type
btree_iter_type(const struct btree_iter *iter)
{
return iter->flags & BTREE_ITER_TYPE;
}
static inline bool btree_iter_is_cached(const struct btree_iter *iter)
{
return btree_iter_type(iter) == BTREE_ITER_CACHED;
}
static inline struct btree_iter_level *iter_l(struct btree_iter *iter) static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
{ {
return iter->l + iter->level; return iter->l + iter->level;
......
...@@ -56,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans, ...@@ -56,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
bch2_btree_node_lock_write(trans, iter, b); bch2_btree_node_lock_write(trans, iter, b);
if (btree_iter_type(iter) == BTREE_ITER_CACHED) if (iter->cached)
return; return;
if (unlikely(btree_node_just_written(b)) && if (unlikely(btree_node_just_written(b)) &&
...@@ -509,10 +509,10 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -509,10 +509,10 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
/* /*
* peek_slot() doesn't work on a BTREE_ITER_NODES iter; those * peek_slot() doesn't yet work on iterators that point to
* iterator types should probably go away * interior nodes:
*/ */
if (btree_iter_type(i->iter) != BTREE_ITER_KEYS) if (i->cached || i->level)
continue; continue;
old = bch2_btree_iter_peek_slot(i->iter); old = bch2_btree_iter_peek_slot(i->iter);
...@@ -1005,7 +1005,7 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, ...@@ -1005,7 +1005,7 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
.bkey_type = __btree_node_type(iter->level, iter->btree_id), .bkey_type = __btree_node_type(iter->level, iter->btree_id),
.btree_id = iter->btree_id, .btree_id = iter->btree_id,
.level = iter->level, .level = iter->level,
.cached = btree_iter_is_cached(iter), .cached = iter->cached,
.iter = iter, .iter = iter,
.k = k, .k = k,
.ip_allocated = _RET_IP_, .ip_allocated = _RET_IP_,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment