Commit 1fb4fe63 authored by Kent Overstreet's avatar Kent Overstreet

six locks: Kill six_lock_state union

As suggested by Linus, this drops the six_lock_state union in favor of
raw bitmasks.

On the one hand, bitfields give more type-level structure to the code.
However, a significant amount of the code was working with
six_lock_state as a u64/atomic64_t, and the conversions from the
bitfields to the u64 were deemed a bit too out-there.

More significantly, because bitfield order is poorly defined (#ifdef
__LITTLE_ENDIAN_BITFIELD can be used, but is gross), incrementing the
sequence number would overflow into the rest of the bitfield if the
compiler didn't put the sequence number at the high end of the word.

The new code is a bit saner when we're on an architecture without real
atomic64_t support - all accesses to lock->state now go through
atomic64_*() operations.

On architectures with real atomic64_t support, we additionally use
atomic bit ops for setting/clearing individual bits.

Text size: 7467 bytes -> 4649 bytes - compilers still suck at
bitfields.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c4bd3491
......@@ -735,7 +735,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock);
seq = b->c.lock.state.seq;
seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
......@@ -859,7 +859,7 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
}
if (unlikely(btree_node_read_in_flight(b))) {
u32 seq = b->c.lock.state.seq;
u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
......@@ -957,7 +957,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
}
if (unlikely(btree_node_read_in_flight(b))) {
u32 seq = b->c.lock.state.seq;
u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
......
......@@ -483,7 +483,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
struct btree_node_entry *bne;
bool reinit_iter = false;
EBUG_ON(!(b->c.lock.state.seq & 1));
EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
BUG_ON(bset_written(b, bset(b, &b->set[1])));
BUG_ON(btree_node_just_written(b));
......
......@@ -652,9 +652,9 @@ void bch2_btree_path_level_init(struct btree_trans *trans,
BUG_ON(path->cached);
EBUG_ON(!btree_path_pos_in_node(path, b));
EBUG_ON(b->c.lock.state.seq & 1);
EBUG_ON(six_lock_seq(&b->c.lock) & 1);
path->l[b->c.level].lock_seq = b->c.lock.state.seq;
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
path->l[b->c.level].b = b;
__btree_path_level_init(path, b->c.level);
}
......
......@@ -49,7 +49,7 @@ static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
* write lock. The lock sequence number is incremented by taking and
* releasing write locks and is even when unlocked:
*/
return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
return path->l[level].lock_seq >> 1 == six_lock_seq(&b->c.lock) >> 1;
}
static inline struct btree *btree_node_parent(struct btree_path *path,
......
......@@ -251,7 +251,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
}
path->l[0].b = (void *) ck;
path->l[0].lock_seq = ck->c.lock.state.seq;
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
ret = bch2_btree_node_lock_write(trans, path, &ck->c);
......@@ -506,7 +506,7 @@ bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree
mark_btree_node_locked(trans, path, 0, lock_want);
}
path->l[0].lock_seq = ck->c.lock.state.seq;
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
fill:
path->uptodate = BTREE_ITER_UPTODATE;
......@@ -588,7 +588,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
mark_btree_node_locked(trans, path, 0, lock_want);
}
path->l[0].lock_seq = ck->c.lock.state.seq;
path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
fill:
if (!ck->valid)
......
......@@ -175,7 +175,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
struct btree_path *linked;
EBUG_ON(path->l[b->c.level].b != b);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
EBUG_ON(path->l[b->c.level].lock_seq + 1 != six_lock_seq(&b->c.lock));
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
......@@ -283,7 +283,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans,
bool lock_may_not_fail)
{
EBUG_ON(&path->l[b->level].b->c != b);
EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
EBUG_ON(!btree_node_intent_locked(path, b->level));
/*
......
......@@ -688,7 +688,7 @@ static void btree_update_nodes_written(struct btree_update *as)
bch2_trans_unlock(&trans);
btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent);
path->l[b->c.level].lock_seq = b->c.lock.state.seq;
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
path->l[b->c.level].b = b;
bch2_btree_node_lock_write_nofail(&trans, path, &b->c);
......
This diff is collapsed.
......@@ -68,39 +68,6 @@
#define SIX_LOCK_SEPARATE_LOCKFNS
union six_lock_state {
struct {
atomic64_t counter;
};
struct {
u64 v;
};
struct {
/* for waitlist_bitnr() */
unsigned long l;
};
struct {
unsigned read_lock:26;
unsigned write_locking:1;
unsigned intent_lock:1;
unsigned nospin:1;
unsigned waiters:3;
/*
* seq works much like in seqlocks: it's incremented every time
* we lock and unlock for write.
*
* If it's odd write lock is held, even unlocked.
*
* Thus readers can unlock, and then lock again later iff it
* hasn't been modified in the meantime.
*/
u32 seq;
};
};
enum six_lock_type {
SIX_LOCK_read,
SIX_LOCK_intent,
......@@ -108,7 +75,7 @@ enum six_lock_type {
};
struct six_lock {
union six_lock_state state;
atomic64_t state;
unsigned intent_lock_recurse;
struct task_struct *owner;
unsigned __percpu *readers;
......@@ -148,6 +115,11 @@ do { \
__six_lock_init((lock), #lock, &__key, flags); \
} while (0)
static inline u32 six_lock_seq(const struct six_lock *lock)
{
return atomic64_read(&lock->state) >> 32;
}
bool six_trylock_ip_type(struct six_lock *lock, enum six_lock_type type,
unsigned long ip);
......
......@@ -420,7 +420,9 @@ TRACE_EVENT(btree_path_relock_fail,
else
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
__entry->node_lock_seq = is_btree_node(path, level)
? six_lock_seq(&path->l[level].b->c.lock)
: 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
......@@ -475,7 +477,9 @@ TRACE_EVENT(btree_path_upgrade_fail,
__entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_read];
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
__entry->node_lock_seq = is_btree_node(path, level)
? six_lock_seq(&path->l[level].b->c.lock)
: 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment