Commit 3f53d050 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch2_data_update_init() cleanup

Factor out some helpers - this function has gotten much too big.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 2102bdac
...@@ -20,6 +20,76 @@ ...@@ -20,6 +20,76 @@
#include "subvolume.h" #include "subvolume.h"
#include "trace.h" #include "trace.h"
static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr)
bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
}
static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
if (!bch2_dev_tryget(c, ptr->dev)) {
bkey_for_each_ptr(ptrs, ptr2) {
if (ptr2 == ptr)
break;
bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
}
return false;
}
}
return true;
}
static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
}
}
static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
if (ctxt) {
bool locked;
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
list_empty(&ctxt->ios));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
} else {
if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
bkey_for_each_ptr(ptrs, ptr2) {
if (ptr2 == ptr)
break;
bucket = PTR_BUCKET_POS(ca, ptr2);
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
}
return false;
}
}
}
return true;
}
static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k) static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
{ {
if (trace_move_extent_finish_enabled()) { if (trace_move_extent_finish_enabled()) {
...@@ -355,17 +425,11 @@ void bch2_data_update_read_done(struct data_update *m, ...@@ -355,17 +425,11 @@ void bch2_data_update_read_done(struct data_update *m,
void bch2_data_update_exit(struct data_update *update) void bch2_data_update_exit(struct data_update *update)
{ {
struct bch_fs *c = update->op.c; struct bch_fs *c = update->op.c;
struct bkey_ptrs_c ptrs = struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
if (c->opts.nocow_enabled)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(ca, ptr), 0);
bch2_dev_put(ca);
}
if (c->opts.nocow_enabled)
bkey_nocow_unlock(c, k);
bkey_put_dev_refs(c, k);
bch2_bkey_buf_exit(&update->k, c); bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res); bch2_disk_reservation_put(c, &update->op.res);
bch2_bio_free_pages_pool(c, &update->op.wbio.bio); bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
...@@ -546,7 +610,6 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -546,7 +610,6 @@ int bch2_data_update_init(struct btree_trans *trans,
const union bch_extent_entry *entry; const union bch_extent_entry *entry;
struct extent_ptr_decoded p; struct extent_ptr_decoded p;
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
unsigned ptrs_locked = 0;
int ret = 0; int ret = 0;
/* /*
...@@ -557,6 +620,15 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -557,6 +620,15 @@ int bch2_data_update_init(struct btree_trans *trans,
if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot))) if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
return -BCH_ERR_data_update_done; return -BCH_ERR_data_update_done;
if (!bkey_get_dev_refs(c, k))
return -BCH_ERR_data_update_done;
if (c->opts.nocow_enabled &&
!bkey_nocow_lock(c, ctxt, k)) {
bkey_put_dev_refs(c, k);
return -BCH_ERR_nocow_lock_blocked;
}
bch2_bkey_buf_init(&m->k); bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k); bch2_bkey_buf_reassemble(&m->k, c, k);
m->btree_id = btree_id; m->btree_id = btree_id;
...@@ -578,40 +650,24 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -578,40 +650,24 @@ int bch2_data_update_init(struct btree_trans *trans,
m->op.compression_opt = background_compression(io_opts); m->op.compression_opt = background_compression(io_opts);
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
bkey_for_each_ptr(ptrs, ptr) {
if (!bch2_dev_tryget(c, ptr->dev)) {
bkey_for_each_ptr(ptrs, ptr2) {
if (ptr2 == ptr)
break;
bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
}
return -BCH_ERR_data_update_done;
}
}
unsigned durability_have = 0, durability_removing = 0; unsigned durability_have = 0, durability_removing = 0;
i = 0; i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev); if (!p.ptr.cached) {
struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); rcu_read_lock();
bool locked; if (BIT(i) & m->data_opts.rewrite_ptrs) {
if (crc_is_compressed(p.crc))
rcu_read_lock(); reserve_sectors += k.k->size;
if (((1U << i) & m->data_opts.rewrite_ptrs)) {
BUG_ON(p.ptr.cached); m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
durability_removing += bch2_extent_ptr_desired_durability(c, &p);
if (crc_is_compressed(p.crc)) } else if (!(BIT(i) & m->data_opts.kill_ptrs)) {
reserve_sectors += k.k->size; bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
durability_have += bch2_extent_ptr_durability(c, &p);
m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); }
durability_removing += bch2_extent_ptr_desired_durability(c, &p); rcu_read_unlock();
} else if (!p.ptr.cached &&
!((1U << i) & m->data_opts.kill_ptrs)) {
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
durability_have += bch2_extent_ptr_durability(c, &p);
} }
rcu_read_unlock();
/* /*
* op->csum_type is normally initialized from the fs/file's * op->csum_type is normally initialized from the fs/file's
...@@ -626,24 +682,6 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -626,24 +682,6 @@ int bch2_data_update_init(struct btree_trans *trans,
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
m->op.incompressible = true; m->op.incompressible = true;
if (c->opts.nocow_enabled) {
if (ctxt) {
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
bucket, 0)) ||
list_empty(&ctxt->ios));
if (!locked)
bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
} else {
if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
ret = -BCH_ERR_nocow_lock_blocked;
goto err;
}
}
ptrs_locked |= (1U << i);
}
i++; i++;
} }
...@@ -664,7 +702,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -664,7 +702,7 @@ int bch2_data_update_init(struct btree_trans *trans,
/* if iter == NULL, it's just a promote */ /* if iter == NULL, it's just a promote */
if (iter) if (iter)
ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
goto done; goto out;
} }
m->op.nr_replicas = min(durability_removing, durability_required) + m->op.nr_replicas = min(durability_removing, durability_required) +
...@@ -684,8 +722,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -684,8 +722,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_data_update_to_text(&buf, m); bch2_data_update_to_text(&buf, m);
WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf); WARN(1, "trying to move an extent, but nr_replicas=0\n%s", buf.buf);
printbuf_exit(&buf); printbuf_exit(&buf);
ret = -BCH_ERR_data_update_done; goto out;
goto done;
} }
m->op.nr_replicas_required = m->op.nr_replicas; m->op.nr_replicas_required = m->op.nr_replicas;
...@@ -696,30 +733,16 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -696,30 +733,16 @@ int bch2_data_update_init(struct btree_trans *trans,
? 0 ? 0
: BCH_DISK_RESERVATION_NOFAIL); : BCH_DISK_RESERVATION_NOFAIL);
if (ret) if (ret)
goto err; goto out;
} }
if (bkey_extent_is_unwritten(k)) { if (bkey_extent_is_unwritten(k)) {
bch2_update_unwritten_extent(trans, m); bch2_update_unwritten_extent(trans, m);
goto done; goto out;
} }
return 0; return 0;
err: out:
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
if ((1U << i) & ptrs_locked)
bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
bch2_dev_put(ca);
i++;
}
bch2_bkey_buf_exit(&m->k, c);
bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
return ret;
done:
bch2_data_update_exit(m); bch2_data_update_exit(m);
return ret ?: -BCH_ERR_data_update_done; return ret ?: -BCH_ERR_data_update_done;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment