Commit 14599cce authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Switch btree locking code to struct btree_bkey_cached_common

This is just some type safety cleanup.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 616928c3
...@@ -886,7 +886,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * ...@@ -886,7 +886,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
if (btree_node_read_locked(path, level + 1)) if (btree_node_read_locked(path, level + 1))
btree_node_unlock(trans, path, level + 1); btree_node_unlock(trans, path, level + 1);
ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type, ret = btree_node_lock(trans, path, &b->c, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip); lock_node_check_fn, (void *) k, trace_ip);
if (unlikely(ret)) { if (unlikely(ret)) {
if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused)) if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
......
...@@ -787,7 +787,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, ...@@ -787,7 +787,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
} }
lock_type = __btree_lock_want(path, path->level); lock_type = __btree_lock_want(path, path->level);
ret = btree_node_lock(trans, path, b, SPOS_MAX, ret = btree_node_lock(trans, path, &b->c, SPOS_MAX,
path->level, lock_type, path->level, lock_type,
lock_root_check_fn, rootp, lock_root_check_fn, rootp,
trace_ip); trace_ip);
......
...@@ -18,7 +18,7 @@ static inline void six_lock_readers_add(struct six_lock *lock, int nr) ...@@ -18,7 +18,7 @@ static inline void six_lock_readers_add(struct six_lock *lock, int nr)
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans, struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
struct btree_path *skip, struct btree_path *skip,
struct btree *b, struct btree_bkey_cached_common *b,
unsigned level) unsigned level)
{ {
struct btree_path *path; struct btree_path *path;
...@@ -30,7 +30,7 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans, ...@@ -30,7 +30,7 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
return ret; return ret;
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
if (path != skip && path->l[level].b == b) { if (path != skip && &path->l[level].b->c == b) {
int t = btree_node_locked_type(path, level); int t = btree_node_locked_type(path, level);
if (t != BTREE_NODE_UNLOCKED) if (t != BTREE_NODE_UNLOCKED)
...@@ -52,7 +52,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans, ...@@ -52,7 +52,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans,
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b) void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{ {
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).n[SIX_LOCK_read]; int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
/* /*
* Must drop our read locks before calling six_lock_write() - * Must drop our read locks before calling six_lock_write() -
...@@ -78,7 +78,7 @@ static inline bool path_has_read_locks(struct btree_path *path) ...@@ -78,7 +78,7 @@ static inline bool path_has_read_locks(struct btree_path *path)
/* Slowpath: */ /* Slowpath: */
int __bch2_btree_node_lock(struct btree_trans *trans, int __bch2_btree_node_lock(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
struct btree *b, struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level, struct bpos pos, unsigned level,
enum six_lock_type type, enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p, six_lock_should_sleep_fn should_sleep_fn, void *p,
...@@ -142,7 +142,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -142,7 +142,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
/* Must lock btree nodes in key order: */ /* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) && if (btree_node_locked(linked, level) &&
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b, bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c,
linked->cached)) <= 0) { linked->cached)) <= 0) {
reason = 7; reason = 7;
goto deadlock; goto deadlock;
...@@ -216,7 +216,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, ...@@ -216,7 +216,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) || if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) && (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) { btree_node_lock_increment(trans, &b->c, level, want))) {
mark_btree_node_locked(trans, path, level, want); mark_btree_node_locked(trans, path, level, want);
return true; return true;
} }
...@@ -260,7 +260,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, ...@@ -260,7 +260,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
goto success; goto success;
if (btree_node_lock_seq_matches(path, b, level) && if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) { btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(trans, path, level); btree_node_unlock(trans, path, level);
goto success; goto success;
} }
......
...@@ -193,7 +193,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *, ...@@ -193,7 +193,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *,
static inline int btree_node_lock_type(struct btree_trans *trans, static inline int btree_node_lock_type(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
struct btree *b, struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level, struct bpos pos, unsigned level,
enum six_lock_type type, enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p) six_lock_should_sleep_fn should_sleep_fn, void *p)
...@@ -202,7 +202,7 @@ static inline int btree_node_lock_type(struct btree_trans *trans, ...@@ -202,7 +202,7 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
u64 start_time; u64 start_time;
int ret; int ret;
if (six_trylock_type(&b->c.lock, type)) if (six_trylock_type(&b->lock, type))
return 0; return 0;
start_time = local_clock(); start_time = local_clock();
...@@ -212,8 +212,8 @@ static inline int btree_node_lock_type(struct btree_trans *trans, ...@@ -212,8 +212,8 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
trans->locking_btree_id = path->btree_id; trans->locking_btree_id = path->btree_id;
trans->locking_level = level; trans->locking_level = level;
trans->locking_lock_type = type; trans->locking_lock_type = type;
trans->locking = &b->c; trans->locking = b;
ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p); ret = six_lock_type(&b->lock, type, should_sleep_fn, p);
trans->locking = NULL; trans->locking = NULL;
if (ret) if (ret)
...@@ -228,15 +228,16 @@ static inline int btree_node_lock_type(struct btree_trans *trans, ...@@ -228,15 +228,16 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
* iterators: * iterators:
*/ */
static inline bool btree_node_lock_increment(struct btree_trans *trans, static inline bool btree_node_lock_increment(struct btree_trans *trans,
struct btree *b, unsigned level, struct btree_bkey_cached_common *b,
unsigned level,
enum btree_node_locked_type want) enum btree_node_locked_type want)
{ {
struct btree_path *path; struct btree_path *path;
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
if (path->l[level].b == b && if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) { btree_node_locked_type(path, level) >= want) {
six_lock_increment(&b->c.lock, want); six_lock_increment(&b->lock, want);
return true; return true;
} }
...@@ -244,14 +245,16 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans, ...@@ -244,14 +245,16 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
} }
int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *, int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
struct btree *, struct bpos, unsigned, struct btree_bkey_cached_common *,
struct bpos, unsigned,
enum six_lock_type, enum six_lock_type,
six_lock_should_sleep_fn, void *, six_lock_should_sleep_fn, void *,
unsigned long); unsigned long);
static inline int btree_node_lock(struct btree_trans *trans, static inline int btree_node_lock(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
struct btree *b, struct bpos pos, unsigned level, struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level,
enum six_lock_type type, enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p, six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip) unsigned long ip)
...@@ -261,12 +264,12 @@ static inline int btree_node_lock(struct btree_trans *trans, ...@@ -261,12 +264,12 @@ static inline int btree_node_lock(struct btree_trans *trans,
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->c.lock, type)) || if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, type) || btree_node_lock_increment(trans, b, level, type) ||
!(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type, !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
should_sleep_fn, p, ip))) { should_sleep_fn, p, ip))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->c.level].lock_taken_time = ktime_get_ns(); path->l[b->level].lock_taken_time = ktime_get_ns();
#endif #endif
} }
...@@ -361,7 +364,9 @@ static inline void btree_path_set_level_up(struct btree_trans *trans, ...@@ -361,7 +364,9 @@ static inline void btree_path_set_level_up(struct btree_trans *trans,
/* debug */ /* debug */
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *, struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
struct btree_path *, struct btree *, unsigned); struct btree_path *,
struct btree_bkey_cached_common *b,
unsigned);
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
......
...@@ -822,7 +822,7 @@ static inline int trans_lock_write(struct btree_trans *trans) ...@@ -822,7 +822,7 @@ static inline int trans_lock_write(struct btree_trans *trans)
goto fail; goto fail;
ret = btree_node_lock_type(trans, i->path, ret = btree_node_lock_type(trans, i->path,
insert_l(i)->b, &insert_l(i)->b->c,
i->path->pos, i->level, i->path->pos, i->level,
SIX_LOCK_write, NULL, NULL); SIX_LOCK_write, NULL, NULL);
BUG_ON(ret); BUG_ON(ret);
......
...@@ -452,7 +452,7 @@ TRACE_EVENT(btree_node_upgrade_fail, ...@@ -452,7 +452,7 @@ TRACE_EVENT(btree_node_upgrade_fail,
TRACE_BPOS_assign(pos, path->pos); TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level); __entry->locked = btree_node_locked(path, level);
c = bch2_btree_node_lock_counts(trans, NULL, path->l[level].b, level), c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
__entry->self_read_count = c.n[SIX_LOCK_read]; __entry->self_read_count = c.n[SIX_LOCK_read];
__entry->self_intent_count = c.n[SIX_LOCK_intent]; __entry->self_intent_count = c.n[SIX_LOCK_intent];
c = six_lock_counts(&path->l[level].b->c.lock); c = six_lock_counts(&path->l[level].b->c.lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment