Commit dafff7e5 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: New bucket sector count helpers

This introduces bch2_bucket_sectors() and bch2_bucket_sectors_dirty(),
prep work for separately accounting stripe sectors.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e6674dec
...@@ -261,10 +261,8 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, ...@@ -261,10 +261,8 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_DATA_free: case BCH_DATA_free:
case BCH_DATA_need_gc_gens: case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard: case BCH_DATA_need_discard:
bkey_fsck_err_on(a.v->dirty_sectors || bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
a.v->cached_sectors || c, err, alloc_key_empty_but_have_data,
a.v->stripe, c, err,
alloc_key_empty_but_have_data,
"empty data type free but have data"); "empty data type free but have data");
break; break;
case BCH_DATA_sb: case BCH_DATA_sb:
...@@ -272,22 +270,21 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, ...@@ -272,22 +270,21 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_DATA_btree: case BCH_DATA_btree:
case BCH_DATA_user: case BCH_DATA_user:
case BCH_DATA_parity: case BCH_DATA_parity:
bkey_fsck_err_on(!a.v->dirty_sectors, c, err, bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
alloc_key_dirty_sectors_0, c, err, alloc_key_dirty_sectors_0,
"data_type %s but dirty_sectors==0", "data_type %s but dirty_sectors==0",
bch2_data_types[a.v->data_type]); bch2_data_types[a.v->data_type]);
break; break;
case BCH_DATA_cached: case BCH_DATA_cached:
bkey_fsck_err_on(!a.v->cached_sectors || bkey_fsck_err_on(!a.v->cached_sectors ||
a.v->dirty_sectors || bch2_bucket_sectors_dirty(*a.v) ||
a.v->stripe, c, err, a.v->stripe,
alloc_key_cached_inconsistency, c, err, alloc_key_cached_inconsistency,
"data type inconsistency"); "data type inconsistency");
bkey_fsck_err_on(!a.v->io_time[READ] && bkey_fsck_err_on(!a.v->io_time[READ] &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
c, err, c, err, alloc_key_cached_but_read_time_zero,
alloc_key_cached_but_read_time_zero,
"cached bucket with read_time == 0"); "cached bucket with read_time == 0");
break; break;
case BCH_DATA_stripe: case BCH_DATA_stripe:
...@@ -790,8 +787,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, ...@@ -790,8 +787,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
new_a->data_type = alloc_data_type(*new_a, new_a->data_type); new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
if (new_a->dirty_sectors > old_a->dirty_sectors || if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
new_a->cached_sectors > old_a->cached_sectors) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
......
...@@ -71,6 +71,24 @@ static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type) ...@@ -71,6 +71,24 @@ static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type; return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type;
} }
static inline unsigned bch2_bucket_sectors(struct bch_alloc_v4 a)
{
return a.dirty_sectors + a.cached_sectors;
}
static inline unsigned bch2_bucket_sectors_dirty(struct bch_alloc_v4 a)
{
return a.dirty_sectors;
}
static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
struct bch_alloc_v4 a)
{
int d = bch2_bucket_sectors_dirty(a);
return d ? max(0, ca->mi.bucket_size - d) : 0;
}
static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a) static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
{ {
return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0; return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
...@@ -90,10 +108,11 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, ...@@ -90,10 +108,11 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
struct bch_dev *ca) struct bch_dev *ca)
{ {
if (!data_type_movable(a.data_type) || if (!data_type_movable(a.data_type) ||
a.dirty_sectors >= ca->mi.bucket_size) !bch2_bucket_sectors_fragmented(ca, a))
return 0; return 0;
return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size); u64 d = bch2_bucket_sectors_dirty(a);
return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
} }
static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a) static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
......
...@@ -277,14 +277,6 @@ void bch2_dev_usage_init(struct bch_dev *ca) ...@@ -277,14 +277,6 @@ void bch2_dev_usage_init(struct bch_dev *ca)
ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket; ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
} }
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bch_alloc_v4 a)
{
return a.dirty_sectors
? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
: 0;
}
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
struct bch_alloc_v4 old, struct bch_alloc_v4 old,
struct bch_alloc_v4 new, struct bch_alloc_v4 new,
...@@ -306,41 +298,40 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, ...@@ -306,41 +298,40 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
u->d[old.data_type].buckets--; u->d[old.data_type].buckets--;
u->d[new.data_type].buckets++; u->d[new.data_type].buckets++;
u->buckets_ec -= (int) !!old.stripe; u->buckets_ec -= !!old.stripe;
u->buckets_ec += (int) !!new.stripe; u->buckets_ec += !!new.stripe;
u->d[old.data_type].sectors -= old.dirty_sectors; u->d[old.data_type].sectors -= bch2_bucket_sectors_dirty(old);
u->d[new.data_type].sectors += new.dirty_sectors; u->d[new.data_type].sectors += bch2_bucket_sectors_dirty(new);
u->d[BCH_DATA_cached].sectors += new.cached_sectors; u->d[BCH_DATA_cached].sectors += new.cached_sectors;
u->d[BCH_DATA_cached].sectors -= old.cached_sectors; u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old); u->d[old.data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, old);
u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new); u->d[new.data_type].fragmented += bch2_bucket_sectors_fragmented(ca, new);
preempt_enable(); preempt_enable();
} }
static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
{
return (struct bch_alloc_v4) {
.gen = b.gen,
.data_type = b.data_type,
.dirty_sectors = b.dirty_sectors,
.cached_sectors = b.cached_sectors,
.stripe = b.stripe,
};
}
static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca, static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
struct bucket old, struct bucket new, struct bucket old, struct bucket new,
u64 journal_seq, bool gc) u64 journal_seq, bool gc)
{ {
struct bch_alloc_v4 old_a = { bch2_dev_usage_update(c, ca,
.gen = old.gen, bucket_m_to_alloc(old),
.data_type = old.data_type, bucket_m_to_alloc(new),
.dirty_sectors = old.dirty_sectors, journal_seq, gc);
.cached_sectors = old.cached_sectors,
.stripe = old.stripe,
};
struct bch_alloc_v4 new_a = {
.gen = new.gen,
.data_type = new.data_type,
.dirty_sectors = new.dirty_sectors,
.cached_sectors = new.cached_sectors,
.stripe = new.stripe,
};
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
} }
static inline int __update_replicas(struct bch_fs *c, static inline int __update_replicas(struct bch_fs *c,
...@@ -640,7 +631,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, ...@@ -640,7 +631,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
goto err; goto err;
} }
g->data_type = data_type; g->data_type = data_type;
g->dirty_sectors += sectors; g->dirty_sectors += sectors;
new = *g; new = *g;
......
...@@ -677,7 +677,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt, ...@@ -677,7 +677,7 @@ int __bch2_evacuate_bucket(struct moving_context *ctxt,
} }
a = bch2_alloc_to_v4(k, &a_convert); a = bch2_alloc_to_v4(k, &a_convert);
dirty_sectors = a->dirty_sectors; dirty_sectors = bch2_bucket_sectors_dirty(*a);
bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
fragmentation = a->fragmentation_lru; fragmentation = a->fragmentation_lru;
......
...@@ -91,7 +91,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, ...@@ -91,7 +91,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
a = bch2_alloc_to_v4(k, &_a); a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen; b->k.gen = a->gen;
b->sectors = a->dirty_sectors; b->sectors = bch2_bucket_sectors_dirty(*a);
ret = data_type_movable(a->data_type) && ret = data_type_movable(a->data_type) &&
a->fragmentation_lru && a->fragmentation_lru &&
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment