Commit e5af273f authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: trans->restarted

Start tracking when btree transactions have been restarted - and assert
that we're always calling bch2_trans_begin() immediately after
transaction restart.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 3cc5288a
...@@ -655,8 +655,10 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -655,8 +655,10 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* Parent node must be locked, else we could read in a btree node that's * Parent node must be locked, else we could read in a btree node that's
* been freed: * been freed:
*/ */
if (iter && !bch2_btree_node_relock(iter, level + 1)) if (iter && !bch2_btree_node_relock(iter, level + 1)) {
btree_trans_restart(iter->trans);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
}
b = bch2_btree_node_mem_alloc(c); b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b)) if (IS_ERR(b))
...@@ -695,11 +697,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -695,11 +697,15 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (iter && if (iter &&
(!bch2_trans_relock(iter->trans) || (!bch2_trans_relock(iter->trans) ||
!bch2_btree_iter_relock_intent(iter))) !bch2_btree_iter_relock_intent(iter))) {
BUG_ON(!iter->trans->restarted);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) if (!six_relock_type(&b->c.lock, lock_type, seq)) {
btree_trans_restart(iter->trans);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
}
return b; return b;
} }
...@@ -824,7 +830,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter * ...@@ -824,7 +830,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
if (!btree_node_lock(b, k->k.p, level, iter, lock_type, if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
lock_node_check_fn, (void *) k, trace_ip)) { lock_node_check_fn, (void *) k, trace_ip)) {
if (b->hash_val != btree_ptr_hash_val(k)) if (!trans->restarted)
goto retry; goto retry;
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
...@@ -840,6 +846,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter * ...@@ -840,6 +846,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
trace_ip, trace_ip,
iter->btree_id, iter->btree_id,
&iter->real_pos); &iter->real_pos);
btree_trans_restart(trans);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
} }
...@@ -858,8 +865,10 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter * ...@@ -858,8 +865,10 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_iter *
*/ */
if (iter && if (iter &&
(!bch2_trans_relock(trans) || (!bch2_trans_relock(trans) ||
!bch2_btree_iter_relock_intent(iter))) !bch2_btree_iter_relock_intent(iter))) {
BUG_ON(!trans->restarted);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) if (!six_relock_type(&b->c.lock, lock_type, seq))
goto retry; goto retry;
......
...@@ -1735,7 +1735,8 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id) ...@@ -1735,7 +1735,8 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
BTREE_ITER_NOT_EXTENTS| BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS); BTREE_ITER_ALL_SNAPSHOTS);
while ((k = bch2_btree_iter_peek(iter)).k && while ((bch2_trans_begin(&trans),
k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k))) { !(ret = bkey_err(k))) {
c->gc_gens_pos = iter->pos; c->gc_gens_pos = iter->pos;
......
...@@ -316,7 +316,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -316,7 +316,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
} }
if (unlikely(deadlock_iter)) { if (unlikely(deadlock_iter)) {
trace_trans_restart_would_deadlock(iter->trans->ip, ip, trace_trans_restart_would_deadlock(trans->ip, ip,
trans->in_traverse_all, reason, trans->in_traverse_all, reason,
deadlock_iter->btree_id, deadlock_iter->btree_id,
btree_iter_type(deadlock_iter), btree_iter_type(deadlock_iter),
...@@ -324,6 +324,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, ...@@ -324,6 +324,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
iter->btree_id, iter->btree_id,
btree_iter_type(iter), btree_iter_type(iter),
&pos); &pos);
btree_trans_restart(trans);
return false; return false;
} }
...@@ -404,6 +405,7 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter) ...@@ -404,6 +405,7 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
? iter->l[l].b->c.lock.state.seq ? iter->l[l].b->c.lock.state.seq
: 0); : 0);
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE); btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
btree_trans_restart(iter->trans);
return false; return false;
} }
} }
...@@ -414,7 +416,11 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter) ...@@ -414,7 +416,11 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
__flatten __flatten
bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip) bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{ {
return btree_iter_get_locks(iter, false, trace_ip); bool ret = btree_iter_get_locks(iter, false, trace_ip);
if (!ret)
btree_trans_restart(iter->trans);
return ret;
} }
bool __bch2_btree_iter_upgrade(struct btree_iter *iter, bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
...@@ -457,6 +463,8 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter, ...@@ -457,6 +463,8 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
btree_iter_get_locks(linked, true, _THIS_IP_); btree_iter_get_locks(linked, true, _THIS_IP_);
} }
if (iter->should_be_locked)
btree_trans_restart(iter->trans);
return false; return false;
} }
...@@ -505,11 +513,15 @@ bool bch2_trans_relock(struct btree_trans *trans) ...@@ -505,11 +513,15 @@ bool bch2_trans_relock(struct btree_trans *trans)
{ {
struct btree_iter *iter; struct btree_iter *iter;
if (unlikely(trans->restarted))
return false;
trans_for_each_iter(trans, iter) trans_for_each_iter(trans, iter)
if (btree_iter_should_be_locked(iter) && if (btree_iter_should_be_locked(iter) &&
!bch2_btree_iter_relock(iter, _RET_IP_)) { !bch2_btree_iter_relock(iter, _RET_IP_)) {
trace_trans_restart_relock(trans->ip, _RET_IP_, trace_trans_restart_relock(trans->ip, _RET_IP_,
iter->btree_id, &iter->real_pos); iter->btree_id, &iter->real_pos);
BUG_ON(!trans->restarted);
return false; return false;
} }
return true; return true;
...@@ -1088,11 +1100,12 @@ static int lock_root_check_fn(struct six_lock *lock, void *p) ...@@ -1088,11 +1100,12 @@ static int lock_root_check_fn(struct six_lock *lock, void *p)
return b == *rootp ? 0 : -1; return b == *rootp ? 0 : -1;
} }
static inline int btree_iter_lock_root(struct btree_iter *iter, static inline int btree_iter_lock_root(struct btree_trans *trans,
struct btree_iter *iter,
unsigned depth_want, unsigned depth_want,
unsigned long trace_ip) unsigned long trace_ip)
{ {
struct bch_fs *c = iter->trans->c; struct bch_fs *c = trans->c;
struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b; struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
enum six_lock_type lock_type; enum six_lock_type lock_type;
unsigned i; unsigned i;
...@@ -1120,8 +1133,11 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, ...@@ -1120,8 +1133,11 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level, if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
iter, lock_type, iter, lock_type,
lock_root_check_fn, rootp, lock_root_check_fn, rootp,
trace_ip))) trace_ip))) {
return -EINTR; if (trans->restarted)
return -EINTR;
continue;
}
if (likely(b == READ_ONCE(*rootp) && if (likely(b == READ_ONCE(*rootp) &&
b->c.level == iter->level && b->c.level == iter->level &&
...@@ -1199,10 +1215,10 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, ...@@ -1199,10 +1215,10 @@ static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
btree_node_unlock(iter, plevel); btree_node_unlock(iter, plevel);
} }
static __always_inline int btree_iter_down(struct btree_iter *iter, static __always_inline int btree_iter_down(struct btree_trans *trans,
struct btree_iter *iter,
unsigned long trace_ip) unsigned long trace_ip)
{ {
struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter_level *l = &iter->l[iter->level]; struct btree_iter_level *l = &iter->l[iter->level];
struct btree *b; struct btree *b;
...@@ -1257,6 +1273,8 @@ static int __btree_iter_traverse_all(struct btree_trans *trans, int ret, ...@@ -1257,6 +1273,8 @@ static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
trans->in_traverse_all = true; trans->in_traverse_all = true;
retry_all: retry_all:
trans->restarted = false;
nr_sorted = 0; nr_sorted = 0;
trans_for_each_iter(trans, iter) { trans_for_each_iter(trans, iter) {
...@@ -1319,11 +1337,11 @@ static int __btree_iter_traverse_all(struct btree_trans *trans, int ret, ...@@ -1319,11 +1337,11 @@ static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
} }
if (hweight64(trans->iters_live) > 1) if (hweight64(trans->iters_live) > 1)
ret = -EINTR; ret = btree_trans_restart(trans);
else else
trans_for_each_iter(trans, iter) trans_for_each_iter(trans, iter)
if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) { if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
ret = -EINTR; ret = btree_trans_restart(trans);
break; break;
} }
out: out:
...@@ -1414,8 +1432,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter, ...@@ -1414,8 +1432,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
*/ */
while (iter->level > depth_want) { while (iter->level > depth_want) {
ret = btree_iter_node(iter, iter->level) ret = btree_iter_node(iter, iter->level)
? btree_iter_down(iter, trace_ip) ? btree_iter_down(trans, iter, trace_ip)
: btree_iter_lock_root(iter, depth_want, trace_ip); : btree_iter_lock_root(trans, iter, depth_want, trace_ip);
if (unlikely(ret)) { if (unlikely(ret)) {
if (ret == 1) { if (ret == 1) {
/* /*
...@@ -1443,6 +1461,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter, ...@@ -1443,6 +1461,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
iter->uptodate = BTREE_ITER_NEED_PEEK; iter->uptodate = BTREE_ITER_NEED_PEEK;
out: out:
BUG_ON((ret == -EINTR) != !!trans->restarted);
trace_iter_traverse(trans->ip, trace_ip, trace_iter_traverse(trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED, btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos, ret); iter->btree_id, &iter->real_pos, ret);
...@@ -1589,6 +1608,8 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p ...@@ -1589,6 +1608,8 @@ static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_p
int cmp = bpos_cmp(new_pos, iter->real_pos); int cmp = bpos_cmp(new_pos, iter->real_pos);
unsigned l = iter->level; unsigned l = iter->level;
EBUG_ON(iter->trans->restarted);
if (!cmp) if (!cmp)
goto out; goto out;
...@@ -2158,6 +2179,8 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans, ...@@ -2158,6 +2179,8 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
struct btree_iter *iter, *best = NULL; struct btree_iter *iter, *best = NULL;
struct bpos real_pos, pos_min = POS_MIN; struct bpos real_pos, pos_min = POS_MIN;
EBUG_ON(trans->restarted);
if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES && if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
btree_node_type_is_extents(btree_id) && btree_node_type_is_extents(btree_id) &&
!(flags & BTREE_ITER_NOT_EXTENTS) && !(flags & BTREE_ITER_NOT_EXTENTS) &&
...@@ -2322,6 +2345,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) ...@@ -2322,6 +2345,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
if (old_bytes) { if (old_bytes) {
trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes); trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
btree_trans_restart(trans);
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
} }
} }
...@@ -2396,6 +2420,8 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags) ...@@ -2396,6 +2420,8 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
if (!(flags & TRANS_RESET_NOTRAVERSE) && if (!(flags & TRANS_RESET_NOTRAVERSE) &&
trans->iters_linked) trans->iters_linked)
bch2_btree_iter_traverse_all(trans); bch2_btree_iter_traverse_all(trans);
trans->restarted = false;
} }
static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c) static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
......
...@@ -117,6 +117,14 @@ bool bch2_btree_iter_relock(struct btree_iter *, unsigned long); ...@@ -117,6 +117,14 @@ bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
bool bch2_trans_relock(struct btree_trans *); bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *); void bch2_trans_unlock(struct btree_trans *);
__always_inline
static inline int btree_trans_restart(struct btree_trans *trans)
{
trans->restarted = true;
bch2_trans_unlock(trans);
return -EINTR;
}
bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned); bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter, static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
......
...@@ -215,7 +215,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, ...@@ -215,7 +215,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
if (!bch2_btree_node_relock(ck_iter, 0)) { if (!bch2_btree_node_relock(ck_iter, 0)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_); trace_transaction_restart_ip(trans->ip, _THIS_IP_);
ret = -EINTR; ret = btree_trans_restart(trans);
goto err; goto err;
} }
...@@ -234,6 +234,10 @@ static int btree_key_cache_fill(struct btree_trans *trans, ...@@ -234,6 +234,10 @@ static int btree_key_cache_fill(struct btree_trans *trans,
} }
} }
/*
* XXX: not allowed to be holding read locks when we take a write lock,
* currently
*/
bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter); bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
if (new_k) { if (new_k) {
kfree(ck->k); kfree(ck->k);
...@@ -300,10 +304,8 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter) ...@@ -300,10 +304,8 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want, if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
bkey_cached_check_fn, iter, _THIS_IP_)) { bkey_cached_check_fn, iter, _THIS_IP_)) {
if (ck->key.btree_id != iter->btree_id || if (!trans->restarted)
bpos_cmp(ck->key.pos, iter->pos)) {
goto retry; goto retry;
}
trace_transaction_restart_ip(trans->ip, _THIS_IP_); trace_transaction_restart_ip(trans->ip, _THIS_IP_);
ret = -EINTR; ret = -EINTR;
...@@ -323,10 +325,10 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter) ...@@ -323,10 +325,10 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
iter->l[0].b = (void *) ck; iter->l[0].b = (void *) ck;
fill: fill:
if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) { if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
if (!btree_node_intent_locked(iter, 0)) if (!iter->locks_want &&
bch2_btree_iter_upgrade(iter, 1); !!__bch2_btree_iter_upgrade(iter, 1)) {
if (!btree_node_intent_locked(iter, 0)) {
trace_transaction_restart_ip(trans->ip, _THIS_IP_); trace_transaction_restart_ip(trans->ip, _THIS_IP_);
BUG_ON(!trans->restarted);
ret = -EINTR; ret = -EINTR;
goto err; goto err;
} }
...@@ -342,9 +344,12 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter) ...@@ -342,9 +344,12 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
iter->uptodate = BTREE_ITER_NEED_PEEK; iter->uptodate = BTREE_ITER_NEED_PEEK;
if ((iter->flags & BTREE_ITER_INTENT) && if ((iter->flags & BTREE_ITER_INTENT) &&
!iter->locks_want && !bch2_btree_iter_upgrade(iter, 1)) {
__bch2_btree_iter_upgrade(iter, 1)) BUG_ON(!trans->restarted);
ret = -EINTR; ret = -EINTR;
}
BUG_ON(!ret && !btree_node_locked(iter, 0));
return ret; return ret;
err: err:
......
...@@ -380,9 +380,10 @@ struct btree_trans { ...@@ -380,9 +380,10 @@ struct btree_trans {
int srcu_idx; int srcu_idx;
u8 nr_updates; u8 nr_updates;
unsigned used_mempool:1; bool used_mempool:1;
unsigned error:1; bool error:1;
unsigned in_traverse_all:1; bool in_traverse_all:1;
bool restarted:1;
/* /*
* For when bch2_trans_update notices we'll be splitting a compressed * For when bch2_trans_update notices we'll be splitting a compressed
* extent: * extent:
......
...@@ -1006,6 +1006,7 @@ bch2_btree_update_start(struct btree_iter *iter, unsigned level, ...@@ -1006,6 +1006,7 @@ bch2_btree_update_start(struct btree_iter *iter, unsigned level,
if (flags & BTREE_INSERT_JOURNAL_RECLAIM) { if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
bch2_btree_update_free(as); bch2_btree_update_free(as);
btree_trans_restart(trans);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -384,6 +384,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, ...@@ -384,6 +384,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
if (race_fault()) { if (race_fault()) {
trace_trans_restart_fault_inject(trans->ip, trace_ip); trace_trans_restart_fault_inject(trans->ip, trace_ip);
trans->restarted = true;
return -EINTR; return -EINTR;
} }
...@@ -520,10 +521,17 @@ static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree ...@@ -520,10 +521,17 @@ static noinline int maybe_do_btree_merge(struct btree_trans *trans, struct btree
u64s_delta -= !bkey_deleted(old.k) ? old.k->u64s : 0; u64s_delta -= !bkey_deleted(old.k) ? old.k->u64s : 0;
} }
return u64s_delta <= 0 if (u64s_delta > 0)
? (bch2_foreground_maybe_merge(trans, iter, iter->level, return 0;
trans->flags & ~BTREE_INSERT_NOUNLOCK) ?: -EINTR)
: 0; ret = bch2_foreground_maybe_merge(trans, iter, iter->level,
trans->flags & ~BTREE_INSERT_NOUNLOCK);
if (!ret) {
ret = -EINTR;
trans->restarted = true;
}
return ret;
} }
/* /*
...@@ -587,6 +595,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, ...@@ -587,6 +595,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trace_trans_restart_upgrade(trans->ip, trace_ip, trace_trans_restart_upgrade(trans->ip, trace_ip,
iter->btree_id, iter->btree_id,
&iter->real_pos); &iter->real_pos);
trans->restarted = true;
return -EINTR; return -EINTR;
} }
} else { } else {
...@@ -696,6 +705,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -696,6 +705,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
trace_trans_restart_btree_node_split(trans->ip, trace_ip, trace_trans_restart_btree_node_split(trans->ip, trace_ip,
i->iter->btree_id, i->iter->btree_id,
&i->iter->real_pos); &i->iter->real_pos);
trans->restarted = true;
ret = -EINTR; ret = -EINTR;
} }
break; break;
...@@ -704,7 +714,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -704,7 +714,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas); ret = bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas);
if (ret) if (ret)
return ret; break;
if (bch2_trans_relock(trans)) if (bch2_trans_relock(trans))
return 0; return 0;
...@@ -716,12 +726,15 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -716,12 +726,15 @@ int bch2_trans_commit_error(struct btree_trans *trans,
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) && if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
!(trans->flags & BTREE_INSERT_JOURNAL_RESERVED)) !(trans->flags & BTREE_INSERT_JOURNAL_RESERVED)) {
return -EAGAIN; trans->restarted = true;
ret = -EAGAIN;
break;
}
ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_CHECK); ret = bch2_trans_journal_res_get(trans, JOURNAL_RES_GET_CHECK);
if (ret) if (ret)
return ret; break;
if (bch2_trans_relock(trans)) if (bch2_trans_relock(trans))
return 0; return 0;
...@@ -737,7 +750,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -737,7 +750,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
wait_event_freezable(c->journal.reclaim_wait, wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c))); (ret = journal_reclaim_wait_done(c)));
if (ret < 0) if (ret < 0)
return ret; break;
if (bch2_trans_relock(trans)) if (bch2_trans_relock(trans))
return 0; return 0;
...@@ -750,6 +763,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -750,6 +763,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
break; break;
} }
BUG_ON((ret == EINTR || ret == -EAGAIN) && !trans->restarted);
BUG_ON(ret == -ENOSPC && (flags & BTREE_INSERT_NOFAIL)); BUG_ON(ret == -ENOSPC && (flags & BTREE_INSERT_NOFAIL));
return ret; return ret;
...@@ -972,6 +986,7 @@ int __bch2_trans_commit(struct btree_trans *trans) ...@@ -972,6 +986,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
trace_trans_restart_upgrade(trans->ip, _RET_IP_, trace_trans_restart_upgrade(trans->ip, _RET_IP_,
i->iter->btree_id, i->iter->btree_id,
&i->iter->pos); &i->iter->pos);
trans->restarted = true;
ret = -EINTR; ret = -EINTR;
goto out; goto out;
} }
...@@ -994,6 +1009,7 @@ int __bch2_trans_commit(struct btree_trans *trans) ...@@ -994,6 +1009,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
goto err; goto err;
} }
retry: retry:
BUG_ON(trans->restarted);
memset(&trans->journal_res, 0, sizeof(trans->journal_res)); memset(&trans->journal_res, 0, sizeof(trans->journal_res));
ret = do_bch2_trans_commit(trans, &i, _RET_IP_); ret = do_bch2_trans_commit(trans, &i, _RET_IP_);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment