Commit 55f7962d authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch_alloc->stripe_sectors

Add a separate counter to bch_alloc_v4 for amount of striped data; this
lets us separately track striped and unstriped data in a bucket, which
lets us see when erasure coding has failed to update extents with stripe
pointers, and also find buckets to continue updating if we crash mid way
through creating a new stripe.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c13d526d
......@@ -268,27 +268,41 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
i == READ ? "read" : "write",
a.v->io_time[i], LRU_TIME_MAX);
unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(a.v) * sizeof(u64) >
offsetof(struct bch_alloc_v4, stripe_sectors)
? a.v->stripe_sectors
: 0;
switch (a.v->data_type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
bkey_fsck_err_on(stripe_sectors ||
a.v->dirty_sectors ||
a.v->cached_sectors ||
a.v->stripe,
c, err, alloc_key_empty_but_have_data,
"empty data type free but have data");
"empty data type free but have data %u.%u.%u %u",
stripe_sectors,
a.v->dirty_sectors,
a.v->cached_sectors,
a.v->stripe);
break;
case BCH_DATA_sb:
case BCH_DATA_journal:
case BCH_DATA_btree:
case BCH_DATA_user:
case BCH_DATA_parity:
bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
bkey_fsck_err_on(!a.v->dirty_sectors &&
!stripe_sectors,
c, err, alloc_key_dirty_sectors_0,
"data_type %s but dirty_sectors==0",
bch2_data_type_str(a.v->data_type));
break;
case BCH_DATA_cached:
bkey_fsck_err_on(!a.v->cached_sectors ||
bch2_bucket_sectors_dirty(*a.v) ||
a.v->dirty_sectors ||
stripe_sectors ||
a.v->stripe,
c, err, alloc_key_cached_inconsistency,
"data type inconsistency");
......@@ -319,6 +333,7 @@ void bch2_alloc_v4_swab(struct bkey_s k)
a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers);
a->fragmentation_lru = swab64(a->fragmentation_lru);
a->stripe_sectors = swab32(a->stripe_sectors);
bps = alloc_v4_backpointers(a);
for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
......@@ -343,6 +358,7 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors);
prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
prt_printf(out, "stripe %u\n", a->stripe);
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
......@@ -1981,6 +1997,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
a->v.gen++;
a->v.data_type = 0;
a->v.dirty_sectors = 0;
a->v.stripe_sectors = 0;
a->v.cached_sectors = 0;
a->v.io_time[READ] = bch2_current_io_time(c, READ);
a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
......
......@@ -41,6 +41,7 @@ static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
{
dst->gen = src.gen;
dst->data_type = src.data_type;
dst->stripe_sectors = src.stripe_sectors;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
......@@ -50,6 +51,7 @@ static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket s
{
dst->gen = src.gen;
dst->data_type = src.data_type;
dst->stripe_sectors = src.stripe_sectors;
dst->dirty_sectors = src.dirty_sectors;
dst->cached_sectors = src.cached_sectors;
dst->stripe = src.stripe;
......@@ -82,12 +84,12 @@ static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
static inline unsigned bch2_bucket_sectors_total(struct bch_alloc_v4 a)
{
return a.dirty_sectors + a.cached_sectors;
return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
}
static inline unsigned bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
{
return a.dirty_sectors;
return a.stripe_sectors + a.dirty_sectors;
}
static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
......@@ -103,7 +105,7 @@ static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
{
if (a.stripe)
return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
if (a.dirty_sectors)
if (bch2_bucket_sectors_dirty(a))
return data_type;
if (a.cached_sectors)
return BCH_DATA_cached;
......
......@@ -70,6 +70,8 @@ struct bch_alloc_v4 {
__u32 stripe;
__u32 nr_external_backpointers;
__u64 fragmentation_lru;
__u32 stripe_sectors;
__u32 pad;
} __packed __aligned(8);
#define BCH_ALLOC_V4_U64s_V0 6
......
......@@ -872,6 +872,7 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
l.oldest_gen != r.oldest_gen ||
l.data_type != r.data_type ||
l.dirty_sectors != r.dirty_sectors ||
l.stripe_sectors != r.stripe_sectors ||
l.cached_sectors != r.cached_sectors ||
l.stripe_redundancy != r.stripe_redundancy ||
l.stripe != r.stripe;
......@@ -941,6 +942,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
copy_bucket_field(alloc_key_gen_wrong, gen);
copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
copy_bucket_field(alloc_key_stripe_sectors_wrong, stripe_sectors);
copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
copy_bucket_field(alloc_key_stripe_wrong, stripe);
copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
......
......@@ -533,6 +533,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
g->gen_valid = true;
g->gen = p.ptr.gen;
g->data_type = 0;
g->stripe_sectors = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
} else {
......@@ -578,6 +579,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
g->gen_valid = true;
g->gen = p.ptr.gen;
g->data_type = data_type;
g->stripe_sectors = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
} else {
......@@ -990,14 +992,14 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
const struct extent_ptr_decoded *p,
s64 sectors, enum bch_data_type ptr_data_type,
struct bch_alloc_v4 *a)
{
u32 *dst_sectors = !ptr->cached
? &a->dirty_sectors
: &a->cached_sectors;
int ret = bch2_bucket_ref_update(trans, ca, k, ptr, sectors, ptr_data_type,
u32 *dst_sectors = p->has_ec ? &a->stripe_sectors :
!p->ptr.cached ? &a->dirty_sectors :
&a->cached_sectors;
int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
a->gen, a->data_type, dst_sectors);
if (ret)
......@@ -1034,7 +1036,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_transactional) {
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket);
ret = PTR_ERR_OR_ZERO(a) ?:
__mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &a->v);
__mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v);
if (ret)
goto err;
......@@ -1057,7 +1059,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
bucket_lock(g);
struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new);
if (!ret) {
alloc_to_bucket(g, new);
bch2_dev_usage_update(c, ca, &old, &new, 0, true);
......
......@@ -16,7 +16,8 @@ struct bucket {
u32 stripe;
u32 dirty_sectors;
u32 cached_sectors;
};
u32 stripe_sectors;
} __aligned(sizeof(long));
struct bucket_array {
struct rcu_head rcu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment