Commit 4f9ec59f authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: unify extent trigger

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 5a82ec3f
......@@ -945,11 +945,11 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
return 0;
}
static int __mark_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, unsigned flags)
static int __trigger_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, unsigned flags)
{
u64 journal_seq = trans->journal_res.seq;
bool gc = flags & BTREE_TRIGGER_GC;
struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
......@@ -959,9 +959,7 @@ static int __mark_extent(struct btree_trans *trans,
? BCH_DATA_btree
: BCH_DATA_user;
s64 dirty_sectors = 0;
int ret;
BUG_ON(!(flags & BTREE_TRIGGER_GC));
int ret = 0;
r.e.data_type = data_type;
r.e.nr_devs = 0;
......@@ -977,13 +975,13 @@ static int __mark_extent(struct btree_trans *trans,
if (p.ptr.cached) {
if (!stale) {
ret = update_cached_sectors(c, k, p.ptr.dev,
disk_sectors, journal_seq, true);
if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
__func__);
ret = !gc
? bch2_update_cached_sectors_list(trans, p.ptr.dev, disk_sectors)
: update_cached_sectors(c, k, p.ptr.dev, disk_sectors, 0, true);
bch2_fs_fatal_err_on(ret && gc, c, "%s(): no replicas entry while updating cached sectors",
__func__);
if (ret)
return ret;
}
}
} else if (!p.has_ec) {
dirty_sectors += disk_sectors;
......@@ -1003,100 +1001,46 @@ static int __mark_extent(struct btree_trans *trans,
}
if (r.e.nr_devs) {
ret = bch2_update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
if (ret) {
ret = !gc
? bch2_update_replicas_list(trans, &r.e, dirty_sectors)
: bch2_update_replicas(c, k, &r.e, dirty_sectors, 0, true);
if (unlikely(ret && gc)) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
printbuf_exit(&buf);
return ret;
}
if (ret)
return ret;
}
return 0;
}
int bch2_mark_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
return trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
}
static int __trans_mark_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, unsigned flags)
int bch2_trigger_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
struct bch_replicas_padded r;
enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
? BCH_DATA_btree
: BCH_DATA_user;
s64 dirty_sectors = 0;
int ret = 0;
r.e.data_type = data_type;
r.e.nr_devs = 0;
r.e.nr_required = 1;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors;
ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
if (ret < 0)
return ret;
bool stale = ret > 0;
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
struct bch_fs *c = trans->c;
int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
(int) bch2_bkey_needs_rebalance(c, old);
if (p.ptr.cached) {
if (!stale) {
ret = bch2_update_cached_sectors_list(trans, p.ptr.dev,
disk_sectors);
if (ret)
return ret;
}
} else if (!p.has_ec) {
dirty_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
if (mod) {
int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new.k->p, mod > 0);
if (ret)
return ret;
r.e.nr_required = 0;
}
}
if (r.e.nr_devs) {
ret = bch2_update_replicas_list(trans, &r.e, dirty_sectors);
if (ret)
return ret;
}
if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
return 0;
}
int bch2_trans_mark_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
struct bch_fs *c = trans->c;
int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
(int) bch2_bkey_needs_rebalance(c, old);
if (mod) {
int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new.k->p, mod > 0);
if (ret)
return ret;
}
return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
}
/* KEY_TYPE_reservation */
static int __trigger_reservation(struct btree_trans *trans,
......
......@@ -340,12 +340,11 @@ int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_trans_mark_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, unsigned);
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
({ \
int ret = 0; \
......
......@@ -415,8 +415,8 @@ void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
.key_invalid = bch2_btree_ptr_invalid, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
.trans_trigger = bch2_trigger_extent, \
.atomic_trigger = bch2_trigger_extent, \
})
#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
......@@ -424,8 +424,8 @@ void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
.val_to_text = bch2_btree_ptr_v2_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
.trans_trigger = bch2_trigger_extent, \
.atomic_trigger = bch2_trigger_extent, \
.min_val_size = 40, \
})
......@@ -439,8 +439,8 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
.swab = bch2_ptr_swab, \
.key_normalize = bch2_extent_normalize, \
.key_merge = bch2_extent_merge, \
.trans_trigger = bch2_trans_mark_extent, \
.atomic_trigger = bch2_mark_extent, \
.trans_trigger = bch2_trigger_extent, \
.atomic_trigger = bch2_trigger_extent, \
})
/* KEY_TYPE_reservation: */
......
......@@ -308,7 +308,7 @@ int bch2_trans_mark_reflink_v(struct btree_trans *trans,
bkey_val_bytes(new.k) - 8))
return 0;
return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
}
/* indirect inline data */
......
......@@ -33,7 +33,7 @@ int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_reflink_v, \
.atomic_trigger = bch2_mark_extent, \
.atomic_trigger = bch2_trigger_extent, \
.min_val_size = 8, \
})
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment