Commit 326568f1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Convert bch2_gc_done() for_each_btree_key2()

This converts bch2_gc_stripes_done() and bch2_gc_reflink_done() to the
new for_each_btree_key_commit() macro.

The new for_each_btree_key2() and for_each_btree_key_commit() macros
handles transaction retries, allowing us to avoid nested transactions -
which we want to avoid since they're tricky to do completely correctly
and upcoming assertions are going to be checking for that.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent eace11a7
...@@ -1321,21 +1321,19 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l, ...@@ -1321,21 +1321,19 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
static int bch2_alloc_write_key(struct btree_trans *trans, static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
struct bkey_s_c k,
bool metadata_only) bool metadata_only)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode); struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b; struct bucket gc, *b;
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a; struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new; struct bch_alloc_v4 old, new;
enum bch_data_type type; enum bch_data_type type;
int ret; int ret;
k = bch2_btree_iter_peek_slot(iter); if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
ret = bkey_err(k); return 1;
if (ret)
return ret;
bch2_alloc_to_v4(k, &old); bch2_alloc_to_v4(k, &old);
new = old; new = old;
...@@ -1428,23 +1426,13 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) ...@@ -1428,23 +1426,13 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
for_each_btree_key(&trans, iter, BTREE_ID_alloc, ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket), POS(ca->dev_idx, ca->mi.first_bucket),
BTREE_ITER_SLOTS| BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
BTREE_ITER_PREFETCH, k, ret) { NULL, NULL, BTREE_INSERT_LAZY_RW,
if (bkey_cmp(iter.pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) bch2_alloc_write_key(&trans, &iter, k, metadata_only));
break;
ret = commit_do(&trans, NULL, NULL, if (ret < 0) {
BTREE_INSERT_LAZY_RW,
bch2_alloc_write_key(&trans, &iter,
metadata_only));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) {
bch_err(c, "error writing alloc info: %i", ret); bch_err(c, "error writing alloc info: %i", ret);
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
break; break;
...@@ -1452,7 +1440,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) ...@@ -1452,7 +1440,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
} }
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
return ret; return ret < 0 ? ret : 0;
} }
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
...@@ -1536,72 +1524,79 @@ static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only) ...@@ -1536,72 +1524,79 @@ static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
}; };
} }
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only) static int bch2_gc_write_reflink_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
size_t *idx)
{ {
struct btree_trans trans; struct bch_fs *c = trans->c;
struct btree_iter iter; const __le64 *refcount = bkey_refcount_c(k);
struct bkey_s_c k;
struct reflink_gc *r;
size_t idx = 0;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
struct reflink_gc *r;
int ret = 0; int ret = 0;
if (metadata_only) if (!refcount)
return 0; return 0;
bch2_trans_init(&trans, c, 0, 0); while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
r->offset < k.k->p.offset)
++*idx;
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, if (!r ||
BTREE_ITER_PREFETCH, k, ret) { r->offset != k.k->p.offset ||
const __le64 *refcount = bkey_refcount_c(k); r->size != k.k->size) {
bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
return -EINVAL;
}
if (!refcount) if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
continue; "reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new;
r = genradix_ptr(&c->reflink_gc_table, idx++); new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if (!r || ret = PTR_ERR_OR_ZERO(new);
r->offset != k.k->p.offset || if (ret)
r->size != k.k->size) { return ret;
bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
ret = -EINVAL;
break;
}
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, bkey_reassemble(new, k);
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new;
new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); if (!r->refcount)
if (!new) { new->k.type = KEY_TYPE_deleted;
ret = -ENOMEM; else
break; *bkey_refcount(new) = cpu_to_le64(r->refcount);
}
bkey_reassemble(new, k); ret = bch2_trans_update(trans, iter, new, 0);
}
fsck_err:
printbuf_exit(&buf);
return ret;
}
if (!r->refcount) static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
new->k.type = KEY_TYPE_deleted; {
else struct btree_trans trans;
*bkey_refcount(new) = cpu_to_le64(r->refcount); struct btree_iter iter;
struct bkey_s_c k;
size_t idx = 0;
int ret = 0;
ret = commit_do(&trans, NULL, NULL, 0, if (metadata_only)
__bch2_btree_insert(&trans, BTREE_ID_reflink, new)); return 0;
kfree(new);
bch2_trans_init(&trans, c, 0, 0);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL,
bch2_gc_write_reflink_key(&trans, &iter, k, &idx));
if (ret)
break;
}
}
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
c->reflink_gc_nr = 0; c->reflink_gc_nr = 0;
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
printbuf_exit(&buf);
return ret; return ret;
} }
...@@ -1653,66 +1648,73 @@ static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only) ...@@ -1653,66 +1648,73 @@ static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
r->refcount = 0; r->refcount = 0;
} }
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only) static int bch2_gc_write_stripes_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
{ {
struct btree_trans trans; struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct gc_stripe *m;
const struct bch_stripe *s;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
const struct bch_stripe *s;
struct gc_stripe *m;
unsigned i; unsigned i;
int ret = 0; int ret = 0;
if (metadata_only) if (k.k->type != KEY_TYPE_stripe)
return 0; return 0;
bch2_trans_init(&trans, c, 0, 0); s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN, for (i = 0; i < s->nr_blocks; i++)
BTREE_ITER_PREFETCH, k, ret) { if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
if (k.k->type != KEY_TYPE_stripe) goto inconsistent;
continue; return 0;
s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
for (i = 0; i < s->nr_blocks; i++)
if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
goto inconsistent;
continue;
inconsistent: inconsistent:
if (fsck_err_on(true, c, if (fsck_err_on(true, c,
"stripe has wrong block sector count %u:\n" "stripe has wrong block sector count %u:\n"
" %s\n" " %s\n"
" should be %u", i, " should be %u", i,
(printbuf_reset(&buf), (printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf),
m ? m->block_sectors[i] : 0)) { m ? m->block_sectors[i] : 0)) {
struct bkey_i_stripe *new; struct bkey_i_stripe *new;
new = kmalloc(bkey_bytes(k.k), GFP_KERNEL); new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if (!new) { ret = PTR_ERR_OR_ZERO(new);
ret = -ENOMEM; if (ret)
break; return ret;
}
bkey_reassemble(&new->k_i, k); bkey_reassemble(&new->k_i, k);
for (i = 0; i < new->v.nr_blocks; i++) for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0); stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
ret = commit_do(&trans, NULL, NULL, 0, ret = bch2_trans_update(trans, iter, &new->k_i, 0);
__bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
kfree(new);
}
} }
fsck_err: fsck_err:
bch2_trans_iter_exit(&trans, &iter); printbuf_exit(&buf);
return ret;
}
bch2_trans_exit(&trans); static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
printbuf_exit(&buf); if (metadata_only)
return 0;
bch2_trans_init(&trans, c, 0, 0);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_stripes, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL,
bch2_gc_write_stripes_key(&trans, &iter, k));
bch2_trans_exit(&trans);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment