Commit 153d1c63 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: unify alloc trigger

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6820ac2c
...@@ -749,25 +749,25 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans, ...@@ -749,25 +749,25 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
return ret; return ret;
} }
int bch2_trans_mark_alloc(struct btree_trans *trans, int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_id btree_id, unsigned level, enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new, struct bkey_s_c old, struct bkey_s new,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, *new_a;
const struct bch_alloc_v4 *old_a;
u64 old_lru, new_lru;
int ret = 0; int ret = 0;
/* if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
* Deletion only happens in the device removal path, with "alloc key for invalid device or bucket"))
* BTREE_TRIGGER_NORUN: return -EIO;
*/
BUG_ON(new.k->type != KEY_TYPE_alloc_v4); struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
old_a = bch2_alloc_to_v4(old, &old_a_convert); struct bch_alloc_v4 old_a_convert;
new_a = bkey_s_to_alloc_v4(new).v; const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
new_a->data_type = alloc_data_type(*new_a, new_a->data_type); new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
...@@ -798,9 +798,8 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, ...@@ -798,9 +798,8 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
!new_a->io_time[READ]) !new_a->io_time[READ])
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
old_lru = alloc_lru_idx_read(*old_a); u64 old_lru = alloc_lru_idx_read(*old_a);
new_lru = alloc_lru_idx_read(*new_a); u64 new_lru = alloc_lru_idx_read(*new_a);
if (old_lru != new_lru) { if (old_lru != new_lru) {
ret = bch2_lru_change(trans, new.k->p.inode, ret = bch2_lru_change(trans, new.k->p.inode,
bucket_to_u64(new.k->p), bucket_to_u64(new.k->p),
...@@ -811,7 +810,6 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, ...@@ -811,7 +810,6 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
bch_dev_bkey_exists(c, new.k->p.inode)); bch_dev_bkey_exists(c, new.k->p.inode));
if (old_a->fragmentation_lru != new_a->fragmentation_lru) { if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
ret = bch2_lru_change(trans, ret = bch2_lru_change(trans,
BCH_LRU_FRAGMENTATION_START, BCH_LRU_FRAGMENTATION_START,
...@@ -839,49 +837,18 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, ...@@ -839,49 +837,18 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
} }
}
return 0; if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
} struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
int bch2_mark_alloc(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq; u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq; u64 bucket_journal_seq = new_a->journal_seq;
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
/*
* alloc btree is read in by bch2_alloc_read, not gc:
*/
if ((flags & BTREE_TRIGGER_GC) &&
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
"alloc key for invalid device or bucket"))
return -EIO;
ca = bch_dev_bkey_exists(c, new.k->p.inode);
old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) && if ((flags & BTREE_TRIGGER_INSERT) &&
data_type_is_empty(old_a->data_type) != data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a->data_type) && data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) { new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v; struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
EBUG_ON(!journal_seq);
/* /*
* If the btree updates referring to a bucket weren't flushed * If the btree updates referring to a bucket weren't flushed
...@@ -910,27 +877,10 @@ int bch2_mark_alloc(struct btree_trans *trans, ...@@ -910,27 +877,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
} }
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
if (!gc && new_a->gen != old_a->gen) if (new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a->gen; *bucket_gen(ca, new.k->p.offset) = new_a->gen;
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc); bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_a->gen;
g->data_type = new_a->data_type;
g->stripe = new_a->stripe;
g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
}
percpu_up_read(&c->mark_lock);
if (new_a->data_type == BCH_DATA_free && if (new_a->data_type == BCH_DATA_free &&
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk)) (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
...@@ -947,6 +897,30 @@ int bch2_mark_alloc(struct btree_trans *trans, ...@@ -947,6 +897,30 @@ int bch2_mark_alloc(struct btree_trans *trans,
if (new_a->data_type == BCH_DATA_need_gc_gens) if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c); bch2_do_gc_gens(c);
percpu_up_read(&c->mark_lock);
}
if ((flags & BTREE_TRIGGER_GC) &&
(flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
struct bch_alloc_v4 new_a_convert;
const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_a->gen;
g->data_type = new_a->data_type;
g->stripe = new_a->stripe;
g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g);
percpu_up_read(&c->mark_lock);
}
return 0; return 0;
} }
......
...@@ -182,24 +182,24 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); ...@@ -182,24 +182,24 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_alloc ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v1_invalid, \ .key_invalid = bch2_alloc_v1_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 8, \ .min_val_size = 8, \
}) })
#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v2_invalid, \ .key_invalid = bch2_alloc_v2_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 8, \ .min_val_size = 8, \
}) })
#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v3_invalid, \ .key_invalid = bch2_alloc_v3_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 16, \ .min_val_size = 16, \
}) })
...@@ -207,8 +207,8 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); ...@@ -207,8 +207,8 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
.key_invalid = bch2_alloc_v4_invalid, \ .key_invalid = bch2_alloc_v4_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \ .swab = bch2_alloc_v4_swab, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 48, \ .min_val_size = 48, \
}) })
...@@ -232,9 +232,7 @@ static inline bool bkey_is_alloc(const struct bkey *k) ...@@ -232,9 +232,7 @@ static inline bool bkey_is_alloc(const struct bkey *k)
int bch2_alloc_read(struct bch_fs *); int bch2_alloc_read(struct bch_fs *);
int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned, int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned); struct bkey_s_c, struct bkey_s, unsigned);
int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment