Commit 8b3e9bd6 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Always check for transaction restarts

On transaction restart iterators won't be locked anymore - make sure
we're always checking for errors.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 67b07638
...@@ -222,6 +222,8 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap, ...@@ -222,6 +222,8 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct btree_iter *iter; struct btree_iter *iter;
struct bkey_s_c_xattr xattr; struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL; struct posix_acl *acl = NULL;
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
retry: retry:
...@@ -240,7 +242,14 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap, ...@@ -240,7 +242,14 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
goto out; goto out;
} }
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
acl = ERR_PTR(ret);
goto out;
}
xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(xattr_val(xattr.v), acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len)); le16_to_cpu(xattr.v->x_val_len));
...@@ -358,6 +367,7 @@ int bch2_acl_chmod(struct btree_trans *trans, ...@@ -358,6 +367,7 @@ int bch2_acl_chmod(struct btree_trans *trans,
struct bkey_s_c_xattr xattr; struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new; struct bkey_i_xattr *new;
struct posix_acl *acl; struct posix_acl *acl;
struct bkey_s_c k;
int ret; int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc,
...@@ -368,7 +378,11 @@ int bch2_acl_chmod(struct btree_trans *trans, ...@@ -368,7 +378,11 @@ int bch2_acl_chmod(struct btree_trans *trans,
if (ret) if (ret)
return ret == -ENOENT ? 0 : ret; return ret == -ENOENT ? 0 : ret;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); k = bch2_btree_iter_peek_slot(iter);
xattr = bkey_s_c_to_xattr(k);
if (ret)
goto err;
acl = bch2_acl_from_disk(xattr_val(xattr.v), acl = bch2_acl_from_disk(xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len)); le16_to_cpu(xattr.v->x_val_len));
ret = PTR_ERR_OR_ZERO(acl); ret = PTR_ERR_OR_ZERO(acl);
......
...@@ -374,7 +374,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags) ...@@ -374,7 +374,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags)
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
goto err; goto err;
} }
bch2_btree_iter_next_slot(iter); bch2_btree_iter_advance(iter);
} }
} }
err: err:
......
...@@ -973,9 +973,9 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c, ...@@ -973,9 +973,9 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
return b; return b;
} }
void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, int bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
const struct bkey_i *k, const struct bkey_i *k,
enum btree_id btree_id, unsigned level) enum btree_id btree_id, unsigned level)
{ {
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
...@@ -985,9 +985,10 @@ void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, ...@@ -985,9 +985,10 @@ void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
b = btree_cache_find(bc, k); b = btree_cache_find(bc, k);
if (b) if (b)
return; return 0;
bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false); b = bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b);
} }
void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k) void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k)
......
...@@ -27,8 +27,8 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *, ...@@ -27,8 +27,8 @@ struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_iter *,
struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *, struct btree *bch2_btree_node_get_noiter(struct bch_fs *, const struct bkey_i *,
enum btree_id, unsigned, bool); enum btree_id, unsigned, bool);
void bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *, int bch2_btree_node_prefetch(struct bch_fs *, struct btree_iter *,
const struct bkey_i *, enum btree_id, unsigned); const struct bkey_i *, enum btree_id, unsigned);
void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *); void bch2_btree_node_evict(struct bch_fs *, const struct bkey_i *);
......
...@@ -1142,7 +1142,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter, ...@@ -1142,7 +1142,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
} }
noinline noinline
static void btree_iter_prefetch(struct btree_iter *iter) static int btree_iter_prefetch(struct btree_iter *iter)
{ {
struct bch_fs *c = iter->trans->c; struct bch_fs *c = iter->trans->c;
struct btree_iter_level *l = &iter->l[iter->level]; struct btree_iter_level *l = &iter->l[iter->level];
...@@ -1153,10 +1153,11 @@ static void btree_iter_prefetch(struct btree_iter *iter) ...@@ -1153,10 +1153,11 @@ static void btree_iter_prefetch(struct btree_iter *iter)
? (iter->level > 1 ? 0 : 2) ? (iter->level > 1 ? 0 : 2)
: (iter->level > 1 ? 1 : 16); : (iter->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(iter, iter->level); bool was_locked = btree_node_locked(iter, iter->level);
int ret = 0;
bch2_bkey_buf_init(&tmp); bch2_bkey_buf_init(&tmp);
while (nr) { while (nr && !ret) {
if (!bch2_btree_node_relock(iter, iter->level)) if (!bch2_btree_node_relock(iter, iter->level))
break; break;
...@@ -1166,14 +1167,15 @@ static void btree_iter_prefetch(struct btree_iter *iter) ...@@ -1166,14 +1167,15 @@ static void btree_iter_prefetch(struct btree_iter *iter)
break; break;
bch2_bkey_buf_unpack(&tmp, c, l->b, k); bch2_bkey_buf_unpack(&tmp, c, l->b, k);
bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id, ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
iter->level - 1); iter->level - 1);
} }
if (!was_locked) if (!was_locked)
btree_node_unlock(iter, iter->level); btree_node_unlock(iter, iter->level);
bch2_bkey_buf_exit(&tmp, c); bch2_bkey_buf_exit(&tmp, c);
return ret;
} }
static noinline void btree_node_mem_ptr_set(struct btree_iter *iter, static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
...@@ -1228,7 +1230,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter, ...@@ -1228,7 +1230,7 @@ static __always_inline int btree_iter_down(struct btree_iter *iter,
btree_node_mem_ptr_set(iter, level + 1, b); btree_node_mem_ptr_set(iter, level + 1, b);
if (iter->flags & BTREE_ITER_PREFETCH) if (iter->flags & BTREE_ITER_PREFETCH)
btree_iter_prefetch(iter); ret = btree_iter_prefetch(iter);
if (btree_node_read_locked(iter, level + 1)) if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1); btree_node_unlock(iter, level + 1);
......
...@@ -210,6 +210,9 @@ int bch2_dirent_rename(struct btree_trans *trans, ...@@ -210,6 +210,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out; goto out;
old_dst = bch2_btree_iter_peek_slot(dst_iter); old_dst = bch2_btree_iter_peek_slot(dst_iter);
ret = bkey_err(old_dst);
if (ret)
goto out;
if (mode != BCH_RENAME) if (mode != BCH_RENAME)
*dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum); *dst_inum = le64_to_cpu(bkey_s_c_to_dirent(old_dst).v->d_inum);
...@@ -225,6 +228,10 @@ int bch2_dirent_rename(struct btree_trans *trans, ...@@ -225,6 +228,10 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out; goto out;
old_src = bch2_btree_iter_peek_slot(src_iter); old_src = bch2_btree_iter_peek_slot(src_iter);
ret = bkey_err(old_src);
if (ret)
goto out;
*src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum); *src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum);
/* Create new dst key: */ /* Create new dst key: */
...@@ -329,20 +336,25 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum, ...@@ -329,20 +336,25 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
struct btree_iter *iter; struct btree_iter *iter;
struct bkey_s_c k; struct bkey_s_c k;
u64 inum = 0; u64 inum = 0;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
iter = __bch2_dirent_lookup_trans(&trans, dir_inum, iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
hash_info, name, 0); hash_info, name, 0);
if (IS_ERR(iter)) { ret = PTR_ERR_OR_ZERO(iter);
BUG_ON(PTR_ERR(iter) == -EINTR); if (ret)
goto out; goto out;
}
k = bch2_btree_iter_peek_slot(iter); k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto out;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
bch2_trans_iter_put(&trans, iter); bch2_trans_iter_put(&trans, iter);
out: out:
BUG_ON(ret == -EINTR);
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
return inum; return inum;
} }
......
...@@ -168,6 +168,10 @@ int bch2_unlink_trans(struct btree_trans *trans, ...@@ -168,6 +168,10 @@ int bch2_unlink_trans(struct btree_trans *trans,
goto err; goto err;
k = bch2_btree_iter_peek_slot(dirent_iter); k = bch2_btree_iter_peek_slot(dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT); inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
......
...@@ -2668,13 +2668,13 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, ...@@ -2668,13 +2668,13 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */ /* already reserved */
if (k.k->type == KEY_TYPE_reservation && if (k.k->type == KEY_TYPE_reservation &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) { bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
bch2_btree_iter_next_slot(iter); bch2_btree_iter_advance(iter);
continue; continue;
} }
if (bkey_extent_is_data(k.k) && if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) { !(mode & FALLOC_FL_ZERO_RANGE)) {
bch2_btree_iter_next_slot(iter); bch2_btree_iter_advance(iter);
continue; continue;
} }
......
...@@ -519,7 +519,7 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans, ...@@ -519,7 +519,7 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
if (k.k->p.snapshot == snapshot && if (k.k->p.snapshot == snapshot &&
k.k->type != KEY_TYPE_inode && k.k->type != KEY_TYPE_inode &&
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) { !bch2_btree_key_cache_find(c, BTREE_ID_inodes, SPOS(0, pos, snapshot))) {
bch2_btree_iter_next(iter); bch2_btree_iter_advance(iter);
continue; continue;
} }
......
...@@ -245,8 +245,12 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, ...@@ -245,8 +245,12 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
* writing to, because i_size could be up to one block * writing to, because i_size could be up to one block
* less: * less:
*/ */
if (!bkey_cmp(old.k->p, new->k.p)) if (!bkey_cmp(old.k->p, new->k.p)) {
old = bch2_btree_iter_next(iter); old = bch2_btree_iter_next(iter);
ret = bkey_err(old);
if (ret)
break;
}
if (old.k && !bkey_err(old) && if (old.k && !bkey_err(old) &&
old.k->p.inode == extent_iter->pos.inode && old.k->p.inode == extent_iter->pos.inode &&
......
...@@ -190,7 +190,7 @@ int bch2_migrate_index_update(struct bch_write_op *op) ...@@ -190,7 +190,7 @@ int bch2_migrate_index_update(struct bch_write_op *op)
} }
atomic_long_inc(&c->extent_migrate_raced); atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k); trace_move_race(&new->k);
bch2_btree_iter_next_slot(iter); bch2_btree_iter_advance(iter);
goto next; goto next;
} }
out: out:
......
...@@ -192,8 +192,9 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) ...@@ -192,8 +192,9 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
return k; return k;
} }
bch2_btree_iter_set_pos(iter, end); if (bkey_cmp(iter->pos, end) >= 0)
return bkey_s_c_null; bch2_btree_iter_set_pos(iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
} }
s64 bch2_remap_range(struct bch_fs *c, s64 bch2_remap_range(struct bch_fs *c,
......
...@@ -209,7 +209,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans, ...@@ -209,7 +209,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
iter = bch2_trans_copy_iter(trans, start); iter = bch2_trans_copy_iter(trans, start);
bch2_btree_iter_next_slot(iter); bch2_btree_iter_advance(iter);
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) { for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type && if (k.k->type != desc.key_type &&
......
...@@ -124,6 +124,7 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info ...@@ -124,6 +124,7 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode); struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
struct btree_iter *iter; struct btree_iter *iter;
struct bkey_s_c_xattr xattr; struct bkey_s_c_xattr xattr;
struct bkey_s_c k;
int ret; int ret;
iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, &hash, iter = bch2_hash_lookup(trans, bch2_xattr_hash_desc, &hash,
...@@ -134,7 +135,12 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info ...@@ -134,7 +135,12 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
if (ret) if (ret)
goto err; goto err;
xattr = bkey_s_c_to_xattr(bch2_btree_iter_peek_slot(iter)); k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
goto err;
xattr = bkey_s_c_to_xattr(k);
ret = le16_to_cpu(xattr.v->x_val_len); ret = le16_to_cpu(xattr.v->x_val_len);
if (buffer) { if (buffer) {
if (ret > size) if (ret > size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment