Commit f21566f1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill BTREE_ITER_NODES

We really only need to distinguish between btree iterators and btree key
cache iterators - this is more prep work for btree_path.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent deb0e573
This diff is collapsed.
...@@ -176,44 +176,38 @@ struct btree_node_iter { ...@@ -176,44 +176,38 @@ struct btree_node_iter {
} data[MAX_BSETS]; } data[MAX_BSETS];
}; };
enum btree_iter_type {
BTREE_ITER_KEYS,
BTREE_ITER_NODES,
BTREE_ITER_CACHED,
};
#define BTREE_ITER_TYPE ((1 << 2) - 1)
/* /*
* Iterate over all possible positions, synthesizing deleted keys for holes: * Iterate over all possible positions, synthesizing deleted keys for holes:
*/ */
#define BTREE_ITER_SLOTS (1 << 2) #define BTREE_ITER_SLOTS (1 << 0)
/* /*
* Indicates that intent locks should be taken on leaf nodes, because we expect * Indicates that intent locks should be taken on leaf nodes, because we expect
* to be doing updates: * to be doing updates:
*/ */
#define BTREE_ITER_INTENT (1 << 3) #define BTREE_ITER_INTENT (1 << 1)
/* /*
* Causes the btree iterator code to prefetch additional btree nodes from disk: * Causes the btree iterator code to prefetch additional btree nodes from disk:
*/ */
#define BTREE_ITER_PREFETCH (1 << 4) #define BTREE_ITER_PREFETCH (1 << 2)
/* /*
* Indicates that this iterator should not be reused until transaction commit, * Indicates that this iterator should not be reused until transaction commit,
* either because a pending update references it or because the update depends * either because a pending update references it or because the update depends
* on that particular key being locked (e.g. by the str_hash code, for hash * on that particular key being locked (e.g. by the str_hash code, for hash
* table consistency) * table consistency)
*/ */
#define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 5) #define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 3)
/* /*
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
* @pos or the first key strictly greater than @pos * @pos or the first key strictly greater than @pos
*/ */
#define BTREE_ITER_IS_EXTENTS (1 << 6) #define BTREE_ITER_IS_EXTENTS (1 << 4)
#define BTREE_ITER_NOT_EXTENTS (1 << 7) #define BTREE_ITER_NOT_EXTENTS (1 << 5)
#define BTREE_ITER_ERROR (1 << 8) #define BTREE_ITER_ERROR (1 << 6)
#define BTREE_ITER_CACHED_NOFILL (1 << 9) #define BTREE_ITER_CACHED (1 << 7)
#define BTREE_ITER_CACHED_NOCREATE (1 << 10) #define BTREE_ITER_CACHED_NOFILL (1 << 8)
#define BTREE_ITER_WITH_UPDATES (1 << 11) #define BTREE_ITER_CACHED_NOCREATE (1 << 9)
#define BTREE_ITER_WITH_UPDATES (1 << 10)
#define __BTREE_ITER_ALL_SNAPSHOTS (1 << 11)
#define BTREE_ITER_ALL_SNAPSHOTS (1 << 12) #define BTREE_ITER_ALL_SNAPSHOTS (1 << 12)
enum btree_iter_uptodate { enum btree_iter_uptodate {
...@@ -256,7 +250,8 @@ struct btree_iter { ...@@ -256,7 +250,8 @@ struct btree_iter {
struct bpos real_pos; struct bpos real_pos;
enum btree_id btree_id:4; enum btree_id btree_id:4;
enum btree_iter_uptodate uptodate:3; bool cached:1;
enum btree_iter_uptodate uptodate:2;
/* /*
* True if we've returned a key (and thus are expected to keep it * True if we've returned a key (and thus are expected to keep it
* locked), false after set_pos - for avoiding spurious transaction * locked), false after set_pos - for avoiding spurious transaction
...@@ -282,17 +277,6 @@ struct btree_iter { ...@@ -282,17 +277,6 @@ struct btree_iter {
struct bkey k; struct bkey k;
}; };
static inline enum btree_iter_type
btree_iter_type(const struct btree_iter *iter)
{
return iter->flags & BTREE_ITER_TYPE;
}
static inline bool btree_iter_is_cached(const struct btree_iter *iter)
{
return btree_iter_type(iter) == BTREE_ITER_CACHED;
}
static inline struct btree_iter_level *iter_l(struct btree_iter *iter) static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
{ {
return iter->l + iter->level; return iter->l + iter->level;
......
...@@ -56,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans, ...@@ -56,7 +56,7 @@ inline void bch2_btree_node_lock_for_insert(struct btree_trans *trans,
bch2_btree_node_lock_write(trans, iter, b); bch2_btree_node_lock_write(trans, iter, b);
if (btree_iter_type(iter) == BTREE_ITER_CACHED) if (iter->cached)
return; return;
if (unlikely(btree_node_just_written(b)) && if (unlikely(btree_node_just_written(b)) &&
...@@ -509,10 +509,10 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -509,10 +509,10 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
/* /*
* peek_slot() doesn't work on a BTREE_ITER_NODES iter; those * peek_slot() doesn't yet work on iterators that point to
* iterator types should probably go away * interior nodes:
*/ */
if (btree_iter_type(i->iter) != BTREE_ITER_KEYS) if (i->cached || i->level)
continue; continue;
old = bch2_btree_iter_peek_slot(i->iter); old = bch2_btree_iter_peek_slot(i->iter);
...@@ -1005,7 +1005,7 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter, ...@@ -1005,7 +1005,7 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
.bkey_type = __btree_node_type(iter->level, iter->btree_id), .bkey_type = __btree_node_type(iter->level, iter->btree_id),
.btree_id = iter->btree_id, .btree_id = iter->btree_id,
.level = iter->level, .level = iter->level,
.cached = btree_iter_is_cached(iter), .cached = iter->cached,
.iter = iter, .iter = iter,
.k = k, .k = k,
.ip_allocated = _RET_IP_, .ip_allocated = _RET_IP_,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment