Commit be11ae16 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: __mark_pointer now takes bch_alloc_v4

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c02eb9e8
......@@ -263,7 +263,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
c, err, alloc_key_empty_but_have_data,
"empty data type free but have data");
break;
......@@ -743,7 +743,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
alloc_data_type_set(new_a, new_a->data_type);
if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
......@@ -1703,7 +1703,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
if (ret)
goto out;
if (a->v.dirty_sectors) {
if (bch2_bucket_sectors_total(a->v)) {
if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
trans, "attempting to discard bucket with dirty data\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
......
......@@ -39,13 +39,22 @@ static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
return a.gen - a.oldest_gen;
}
static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket b)
static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
{
dst->gen = b.gen;
dst->data_type = b.data_type;
dst->dirty_sectors = b.dirty_sectors;
dst->cached_sectors = b.cached_sectors;
dst->stripe = b.stripe;
dst->gen = src.gen;
dst->data_type = src.data_type;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
}
static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
{
dst->gen = src.gen;
dst->data_type = src.data_type;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
}
static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
......@@ -73,7 +82,7 @@ static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
bucket_data_type(bucket) != bucket_data_type(ptr);
}
static inline unsigned bch2_bucket_sectors(struct bch_alloc_v4 a)
static inline unsigned bch2_bucket_sectors_total(struct bch_alloc_v4 a)
{
return a.dirty_sectors + a.cached_sectors;
}
......
......@@ -943,23 +943,18 @@ static int __mark_pointer(struct btree_trans *trans,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
u8 bucket_gen, u8 *bucket_data_type,
u32 *dirty_sectors, u32 *cached_sectors)
struct bch_alloc_v4 *a)
{
u32 *dst_sectors = !ptr->cached
? dirty_sectors
: cached_sectors;
? &a->dirty_sectors
: &a->cached_sectors;
int ret = bch2_bucket_ref_update(trans, k, ptr, sectors, ptr_data_type,
bucket_gen, *bucket_data_type, dst_sectors);
a->gen, a->data_type, dst_sectors);
if (ret)
return ret;
if (!*dirty_sectors && !*cached_sectors)
*bucket_data_type = 0;
else if (*bucket_data_type != BCH_DATA_stripe)
*bucket_data_type = ptr_data_type;
alloc_data_type_set(a, ptr_data_type);
return 0;
}
......@@ -984,9 +979,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (ret)
return ret;
ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type,
a->v.gen, &a->v.data_type,
&a->v.dirty_sectors, &a->v.cached_sectors) ?:
ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type, &a->v) ?:
bch2_trans_update(trans, &iter, &a->k_i, 0);
bch2_trans_iter_exit(trans, &iter);
......@@ -1003,30 +996,20 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
percpu_down_read(&c->mark_lock);
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
struct bucket *g = gc_bucket(ca, bucket.offset);
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
u8 bucket_data_type = g->data_type;
int ret = __mark_pointer(trans, k, &p.ptr, *sectors,
data_type, g->gen,
&bucket_data_type,
&g->dirty_sectors,
&g->cached_sectors);
if (ret) {
bucket_unlock(g);
percpu_up_read(&c->mark_lock);
return ret;
}
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
g->data_type = bucket_data_type;
struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
int ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type, &new);
if (!ret) {
alloc_to_bucket(g, new);
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
bucket_unlock(g);
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
percpu_up_read(&c->mark_lock);
return ret;
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment