Commit 19a614d2 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Better inlining for bch2_alloc_to_v4_mut

This separates out the slowpath into a separate function, and inlines
bch2_alloc_v4_mut into bch2_trans_start_alloc_update(), the main place
it's called.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent adf6360b
This diff is collapsed.
......@@ -70,16 +70,46 @@ static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_
return pos;
}
static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
{
unsigned ret = (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
BCH_ALLOC_V4_U64s_V0);
BUG_ON(ret > U8_MAX - BKEY_U64s);
return ret;
}
static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
{
set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
}
struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
{
const struct bch_alloc_v4 *ret;
if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
goto slowpath;
ret = bkey_s_c_to_alloc_v4(k).v;
if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
goto slowpath;
return ret;
slowpath:
__bch2_alloc_to_v4(k, convert);
return convert;
}
struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
#define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
......
......@@ -204,7 +204,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket,
enum alloc_reserve reserve,
struct bch_alloc_v4 *a,
const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s,
struct closure *cl)
{
......@@ -289,7 +289,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
struct btree_iter iter = { NULL };
struct bkey_s_c k;
struct open_bucket *ob;
struct bch_alloc_v4 a;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
u64 b = free_entry & ~(~0ULL << 56);
unsigned genbits = free_entry >> 56;
struct printbuf buf = PRINTBUF;
......@@ -313,12 +314,12 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err;
}
bch2_alloc_to_v4(k, &a);
a = bch2_alloc_to_v4(k, &a_convert);
if (genbits != (alloc_freespace_genbits(a) >> 56)) {
if (genbits != (alloc_freespace_genbits(*a) >> 56)) {
prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
" freespace key ",
genbits, alloc_freespace_genbits(a) >> 56);
genbits, alloc_freespace_genbits(*a) >> 56);
bch2_bkey_val_to_text(&buf, c, freespace_k);
prt_printf(&buf, "\n ");
bch2_bkey_val_to_text(&buf, c, k);
......@@ -328,7 +329,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
}
if (a.data_type != BCH_DATA_free) {
if (a->data_type != BCH_DATA_free) {
prt_printf(&buf, "non free bucket in freespace btree\n"
" freespace key ");
bch2_bkey_val_to_text(&buf, c, freespace_k);
......@@ -339,7 +340,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err;
}
ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl);
ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
if (!ob)
iter.path->preserve = false;
err:
......@@ -397,7 +398,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
again:
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
BTREE_ITER_SLOTS, k, ret) {
struct bch_alloc_v4 a;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break;
......@@ -406,14 +408,14 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
is_superblock_bucket(ca, k.k->p.offset))
continue;
bch2_alloc_to_v4(k, &a);
a = bch2_alloc_to_v4(k, &a_convert);
if (a.data_type != BCH_DATA_free)
if (a->data_type != BCH_DATA_free)
continue;
s->buckets_seen++;
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl);
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
if (ob)
break;
}
......
......@@ -934,6 +934,9 @@ struct bch_alloc_v4 {
struct bpos backpointers[0];
} __packed __aligned(8);
#define BCH_ALLOC_V4_U64s_V0 6
#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(u64))
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
......
......@@ -1351,15 +1351,16 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b;
struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new;
struct bch_alloc_v4 old_convert, new;
const struct bch_alloc_v4 *old;
enum bch_data_type type;
int ret;
if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
return 1;
bch2_alloc_to_v4(k, &old);
new = old;
old = bch2_alloc_to_v4(k, &old_convert);
new = *old;
percpu_down_read(&c->mark_lock);
b = gc_bucket(ca, iter->pos.offset);
......@@ -1371,7 +1372,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
type = __alloc_data_type(b->dirty_sectors,
b->cached_sectors,
b->stripe,
old,
*old,
b->data_type);
if (b->data_type != type) {
struct bch_dev_usage *u;
......@@ -1393,7 +1394,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
gc.data_type != BCH_DATA_btree)
return 0;
if (gen_after(old.gen, gc.gen))
if (gen_after(old->gen, gc.gen))
return 0;
#define copy_bucket_field(_f) \
......@@ -1415,7 +1416,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
copy_bucket_field(stripe);
#undef copy_bucket_field
if (!bch2_alloc_v4_cmp(old, new))
if (!bch2_alloc_v4_cmp(*old, new))
return 0;
a = bch2_alloc_to_v4_mut(trans, k);
......@@ -1473,7 +1474,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
struct btree_iter iter;
struct bkey_s_c k;
struct bucket *g;
struct bch_alloc_v4 a;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
unsigned i;
int ret;
......@@ -1499,20 +1501,20 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
ca = bch_dev_bkey_exists(c, k.k->p.inode);
g = gc_bucket(ca, k.k->p.offset);
bch2_alloc_to_v4(k, &a);
a = bch2_alloc_to_v4(k, &a_convert);
g->gen_valid = 1;
g->gen = a.gen;
g->gen = a->gen;
if (metadata_only &&
(a.data_type == BCH_DATA_user ||
a.data_type == BCH_DATA_cached ||
a.data_type == BCH_DATA_parity)) {
g->data_type = a.data_type;
g->dirty_sectors = a.dirty_sectors;
g->cached_sectors = a.cached_sectors;
g->stripe = a.stripe;
g->stripe_redundancy = a.stripe_redundancy;
(a->data_type == BCH_DATA_user ||
a->data_type == BCH_DATA_cached ||
a->data_type == BCH_DATA_parity)) {
g->data_type = a->data_type;
g->dirty_sectors = a->dirty_sectors;
g->cached_sectors = a->cached_sectors;
g->stripe = a->stripe;
g->stripe_redundancy = a->stripe_redundancy;
}
}
bch2_trans_iter_exit(&trans, &iter);
......@@ -1913,13 +1915,12 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
struct bkey_s_c k)
{
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bch_alloc_v4 a;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
struct bkey_i_alloc_v4 *a_mut;
int ret;
bch2_alloc_to_v4(k, &a);
if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0;
a_mut = bch2_alloc_to_v4_mut(trans, k);
......
......@@ -490,8 +490,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a, new_a;
struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
......@@ -508,36 +510,38 @@ int bch2_mark_alloc(struct btree_trans *trans,
ca = bch_dev_bkey_exists(c, new.k->p.inode);
bch2_alloc_to_v4(old, &old_a);
bch2_alloc_to_v4(new, &new_a);
old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = bch2_alloc_to_v4(new, &new_a_convert);
bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) &&
data_type_is_empty(old_a.data_type) !=
data_type_is_empty(new_a.data_type) &&
data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
BUG_ON(!journal_seq);
EBUG_ON(!journal_seq);
/*
* If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
new_a.journal_seq = data_type_is_empty(new_a.data_type) &&
v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
v->journal_seq = new_a.journal_seq;
}
if (!data_type_is_empty(old_a.data_type) &&
data_type_is_empty(new_a.data_type) &&
new_a.journal_seq) {
if (!data_type_is_empty(old_a->data_type) &&
data_type_is_empty(new_a->data_type) &&
bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
new_a.journal_seq);
bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
......@@ -546,10 +550,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
percpu_down_read(&c->mark_lock);
if (!gc && new_a.gen != old_a.gen)
*bucket_gen(ca, new.k->p.offset) = new_a.gen;
if (!gc && new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a->gen;
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
......@@ -557,12 +561,12 @@ int bch2_mark_alloc(struct btree_trans *trans,
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_a.gen;
g->data_type = new_a.data_type;
g->stripe = new_a.stripe;
g->stripe_redundancy = new_a.stripe_redundancy;
g->dirty_sectors = new_a.dirty_sectors;
g->cached_sectors = new_a.cached_sectors;
g->gen = new_a->gen;
g->data_type = new_a->data_type;
g->stripe = new_a->stripe;
g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
......@@ -574,9 +578,9 @@ int bch2_mark_alloc(struct btree_trans *trans,
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old_a.cached_sectors) {
old_a->cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
-((s64) old_a.cached_sectors),
-((s64) old_a->cached_sectors),
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
......@@ -585,20 +589,20 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
}
if (new_a.data_type == BCH_DATA_free &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
if (new_a->data_type == BCH_DATA_free &&
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if (new_a.data_type == BCH_DATA_need_discard &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
if (new_a->data_type == BCH_DATA_need_discard &&
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
if (old_a.data_type != BCH_DATA_cached &&
new_a.data_type == BCH_DATA_cached &&
if (old_a->data_type != BCH_DATA_cached &&
new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
if (new_a.data_type == BCH_DATA_need_gc_gens)
if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0;
......
......@@ -140,7 +140,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_alloc_v4 a;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
struct bpos alloc_pos;
......@@ -160,10 +161,10 @@ static int bch2_check_lru_key(struct btree_trans *trans,
if (ret)
goto err;
bch2_alloc_to_v4(k, &a);
a = bch2_alloc_to_v4(k, &a_convert);
if (fsck_err_on(a.data_type != BCH_DATA_cached ||
a.io_time[READ] != lru_k.k->p.offset, c,
if (fsck_err_on(a->data_type != BCH_DATA_cached ||
a->io_time[READ] != lru_k.k->p.offset, c,
"incorrect lru entry %s\n"
" for %s",
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
......
......@@ -117,7 +117,6 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_alloc_v4 a;
int ret;
bch2_trans_init(&trans, c, 0, 0);
......@@ -126,21 +125,23 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
BTREE_ITER_PREFETCH, k, ret) {
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
bch2_alloc_to_v4(k, &a);
a = bch2_alloc_to_v4(k, &a_convert);
if (a.data_type != BCH_DATA_user ||
a.dirty_sectors >= ca->mi.bucket_size ||
if (a->data_type != BCH_DATA_user ||
a->dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
e = (struct copygc_heap_entry) {
.dev = iter.pos.inode,
.gen = a.gen,
.replicas = 1 + a.stripe_redundancy,
.fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31),
.gen = a->gen,
.replicas = 1 + a->stripe_redundancy,
.fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31),
ca->mi.bucket_size),
.sectors = a.dirty_sectors,
.sectors = a->dirty_sectors,
.offset = bucket_to_sector(ca, iter.pos.offset),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment