btree_locking.h 6.18 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_LOCKING_H
#define _BCACHEFS_BTREE_LOCKING_H

/*
 * Only for internal btree use:
 *
 * The btree iterator tracks what locks it wants to take, and what locks it
 * currently has - here we have wrappers for locking/unlocking btree nodes and
 * updating the iterator state
 */

#include "btree_iter.h"
#include "six.h"

/* matches six lock types */
enum btree_node_locked_type {
	BTREE_NODE_UNLOCKED		= -1,
	BTREE_NODE_READ_LOCKED		= SIX_LOCK_read,
	BTREE_NODE_INTENT_LOCKED	= SIX_LOCK_intent,
};

static inline int btree_node_locked_type(struct btree_iter *iter,
					 unsigned level)
{
	/*
	 * We're relying on the fact that if nodes_intent_locked is set
	 * nodes_locked must be set as well, so that we can compute without
	 * branches:
	 */
	return BTREE_NODE_UNLOCKED +
		((iter->nodes_locked >> level) & 1) +
		((iter->nodes_intent_locked >> level) & 1);
}

static inline bool btree_node_intent_locked(struct btree_iter *iter,
					    unsigned level)
{
	return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
}

static inline bool btree_node_read_locked(struct btree_iter *iter,
					  unsigned level)
{
	return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
}

static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
{
	return iter->nodes_locked & (1 << level);
}

static inline void mark_btree_node_unlocked(struct btree_iter *iter,
					    unsigned level)
{
	iter->nodes_locked &= ~(1 << level);
	iter->nodes_intent_locked &= ~(1 << level);
}

static inline void mark_btree_node_locked(struct btree_iter *iter,
					  unsigned level,
					  enum six_lock_type type)
{
	/* relying on this to avoid a branch */
	BUILD_BUG_ON(SIX_LOCK_read   != 0);
	BUILD_BUG_ON(SIX_LOCK_intent != 1);

	iter->nodes_locked |= 1 << level;
	iter->nodes_intent_locked |= type << level;
}

static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
						 unsigned level)
{
	mark_btree_node_locked(iter, level, SIX_LOCK_intent);
}

static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
{
	return level < iter->locks_want
		? SIX_LOCK_intent
		: SIX_LOCK_read;
}

static inline enum btree_node_locked_type
btree_lock_want(struct btree_iter *iter, int level)
{
	if (level < iter->level)
		return BTREE_NODE_UNLOCKED;
	if (level < iter->locks_want)
		return BTREE_NODE_INTENT_LOCKED;
	if (level == iter->level)
		return BTREE_NODE_READ_LOCKED;
	return BTREE_NODE_UNLOCKED;
}

97
static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
98 99 100 101 102 103
{
	int lock_type = btree_node_locked_type(iter, level);

	EBUG_ON(level >= BTREE_MAX_DEPTH);

	if (lock_type != BTREE_NODE_UNLOCKED)
104
		six_unlock_type(&iter->l[level].b->c.lock, lock_type);
105 106 107
	mark_btree_node_unlocked(iter, level);
}

108 109
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
110
	EBUG_ON(!level && iter->trans->nounlock);
111 112 113 114

	__btree_node_unlock(iter, level);
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
{
	btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);

	while (iter->nodes_locked)
		btree_node_unlock(iter, __ffs(iter->nodes_locked));
}

static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
{
	switch (type) {
	case SIX_LOCK_read:
		return BCH_TIME_btree_lock_contended_read;
	case SIX_LOCK_intent:
		return BCH_TIME_btree_lock_contended_intent;
	case SIX_LOCK_write:
		return BCH_TIME_btree_lock_contended_write;
	default:
		BUG();
	}
}

/*
 * wrapper around six locks that just traces lock contended time
 */
static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
					  enum six_lock_type type)
{
	u64 start_time = local_clock();

145
	six_lock_type(&b->c.lock, type, NULL, NULL);
146 147 148 149 150 151
	bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
}

static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
					enum six_lock_type type)
{
152
	if (!six_trylock_type(&b->c.lock, type))
153 154 155
		__btree_node_lock_type(c, b, type);
}

156 157 158 159 160 161 162 163 164 165
/*
 * Lock a btree node if we already have it locked on one of our linked
 * iterators:
 */
static inline bool btree_node_lock_increment(struct btree_iter *iter,
					     struct btree *b, unsigned level,
					     enum btree_node_locked_type want)
{
	struct btree_iter *linked;

166
	trans_for_each_iter(iter->trans, linked)
167
		if (linked->l[level].b == b &&
168
		    btree_node_locked_type(linked, level) >= want) {
169
			six_lock_increment(&b->c.lock, want);
170 171 172 173 174 175
			return true;
		}

	return false;
}

176
bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
177
			    struct btree_iter *, enum six_lock_type);
178 179 180 181

static inline bool btree_node_lock(struct btree *b, struct bpos pos,
				   unsigned level,
				   struct btree_iter *iter,
182
				   enum six_lock_type type)
183 184 185
{
	EBUG_ON(level >= BTREE_MAX_DEPTH);

186
	return likely(six_trylock_type(&b->c.lock, type)) ||
187
		btree_node_lock_increment(iter, b, level, type) ||
188
		__bch2_btree_node_lock(b, pos, level, iter, type);
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
}

bool __bch2_btree_node_relock(struct btree_iter *, unsigned);

static inline bool bch2_btree_node_relock(struct btree_iter *iter,
					  unsigned level)
{
	EBUG_ON(btree_node_locked(iter, level) &&
		btree_node_locked_type(iter, level) !=
		__btree_lock_want(iter, level));

	return likely(btree_node_locked(iter, level)) ||
		__bch2_btree_node_relock(iter, level);
}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
/*
 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
 * succeed:
 */
static inline void
bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
{
	struct btree_iter *linked;

	EBUG_ON(iter->l[b->c.level].b != b);
	EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);

	trans_for_each_iter_with_node(iter->trans, b, linked)
		linked->l[b->c.level].lock_seq += 2;

	six_unlock_write(&b->c.lock);
}

222 223 224 225 226 227
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);

void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);

static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{
228 229
	EBUG_ON(iter->l[b->c.level].b != b);
	EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
230

231
	if (unlikely(!six_trylock_write(&b->c.lock)))
232 233 234 235 236 237
		__bch2_btree_node_lock_write(b, iter);
}

#endif /* _BCACHEFS_BTREE_LOCKING_H */