Commit 153d1c63 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: unify alloc trigger

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6820ac2c
...@@ -749,173 +749,163 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans, ...@@ -749,173 +749,163 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
return ret; return ret;
} }
int bch2_trans_mark_alloc(struct btree_trans *trans, int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_id btree_id, unsigned level, enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new, struct bkey_s_c old, struct bkey_s new,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, *new_a;
const struct bch_alloc_v4 *old_a;
u64 old_lru, new_lru;
int ret = 0; int ret = 0;
/* if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
* Deletion only happens in the device removal path, with "alloc key for invalid device or bucket"))
* BTREE_TRIGGER_NORUN: return -EIO;
*/
BUG_ON(new.k->type != KEY_TYPE_alloc_v4);
old_a = bch2_alloc_to_v4(old, &old_a_convert); struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
new_a = bkey_s_to_alloc_v4(new).v;
new_a->data_type = alloc_data_type(*new_a, new_a->data_type); struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) { if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
}
if (data_type_is_empty(new_a->data_type) && new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
!bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
}
if (old_a->data_type != new_a->data_type || if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
(new_a->data_type == BCH_DATA_free && new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) { new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
ret = bch2_bucket_do_index(trans, old, old_a, false) ?: SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
bch2_bucket_do_index(trans, new.s_c, new_a, true); SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
if (ret) }
return ret;
}
if (new_a->data_type == BCH_DATA_cached && if (data_type_is_empty(new_a->data_type) &&
!new_a->io_time[READ]) BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
}
old_lru = alloc_lru_idx_read(*old_a); if (old_a->data_type != new_a->data_type ||
new_lru = alloc_lru_idx_read(*new_a); (new_a->data_type == BCH_DATA_free &&
alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
bch2_bucket_do_index(trans, new.s_c, new_a, true);
if (ret)
return ret;
}
if (old_lru != new_lru) { if (new_a->data_type == BCH_DATA_cached &&
ret = bch2_lru_change(trans, new.k->p.inode, !new_a->io_time[READ])
bucket_to_u64(new.k->p), new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
old_lru, new_lru);
if (ret)
return ret;
}
new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, u64 old_lru = alloc_lru_idx_read(*old_a);
bch_dev_bkey_exists(c, new.k->p.inode)); u64 new_lru = alloc_lru_idx_read(*new_a);
if (old_lru != new_lru) {
ret = bch2_lru_change(trans, new.k->p.inode,
bucket_to_u64(new.k->p),
old_lru, new_lru);
if (ret)
return ret;
}
if (old_a->fragmentation_lru != new_a->fragmentation_lru) { new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
ret = bch2_lru_change(trans, bch_dev_bkey_exists(c, new.k->p.inode));
BCH_LRU_FRAGMENTATION_START, if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
bucket_to_u64(new.k->p), ret = bch2_lru_change(trans,
old_a->fragmentation_lru, new_a->fragmentation_lru); BCH_LRU_FRAGMENTATION_START,
if (ret) bucket_to_u64(new.k->p),
return ret; old_a->fragmentation_lru, new_a->fragmentation_lru);
} if (ret)
return ret;
}
if (old_a->gen != new_a->gen) { if (old_a->gen != new_a->gen) {
ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
if (ret) if (ret)
return ret; return ret;
} }
/* /*
* need to know if we're getting called from the invalidate path or * need to know if we're getting called from the invalidate path or
* not: * not:
*/ */
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) && if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old_a->cached_sectors) { old_a->cached_sectors) {
ret = bch2_update_cached_sectors_list(trans, new.k->p.inode, ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
-((s64) old_a->cached_sectors)); -((s64) old_a->cached_sectors));
if (ret) if (ret)
return ret; return ret;
}
} }
return 0; if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
} struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq = new_a->journal_seq;
int bch2_mark_alloc(struct btree_trans *trans, if ((flags & BTREE_TRIGGER_INSERT) &&
enum btree_id btree, unsigned level, data_type_is_empty(old_a->data_type) !=
struct bkey_s_c old, struct bkey_s new, data_type_is_empty(new_a->data_type) &&
unsigned flags) new.k->type == KEY_TYPE_alloc_v4) {
{ struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq;
struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca;
int ret = 0;
/* /*
* alloc btree is read in by bch2_alloc_read, not gc: * If the btree updates referring to a bucket weren't flushed
*/ * before the bucket became empty again, then the we don't have
if ((flags & BTREE_TRIGGER_GC) && * to wait on a journal flush before we can reuse the bucket:
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) */
return 0; v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans, (journal_seq == v->journal_seq ||
"alloc key for invalid device or bucket")) bch2_journal_noflush_seq(&c->journal, v->journal_seq))
return -EIO; ? 0 : journal_seq;
}
ca = bch_dev_bkey_exists(c, new.k->p.inode); if (!data_type_is_empty(old_a->data_type) &&
data_type_is_empty(new_a->data_type) &&
bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
return ret;
}
}
old_a = bch2_alloc_to_v4(old, &old_a_convert); percpu_down_read(&c->mark_lock);
new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert); if (new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a->gen;
bucket_journal_seq = new_a->journal_seq; bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
if ((flags & BTREE_TRIGGER_INSERT) && if (new_a->data_type == BCH_DATA_free &&
data_type_is_empty(old_a->data_type) != (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
data_type_is_empty(new_a->data_type) && closure_wake_up(&c->freelist_wait);
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
EBUG_ON(!journal_seq); if (new_a->data_type == BCH_DATA_need_discard &&
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
/* if (old_a->data_type != BCH_DATA_cached &&
* If the btree updates referring to a bucket weren't flushed new_a->data_type == BCH_DATA_cached &&
* before the bucket became empty again, then the we don't have should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
* to wait on a journal flush before we can reuse the bucket: bch2_do_invalidates(c);
*/
v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
}
if (!data_type_is_empty(old_a->data_type) && if (new_a->data_type == BCH_DATA_need_gc_gens)
data_type_is_empty(new_a->data_type) && bch2_do_gc_gens(c);
bucket_journal_seq) { percpu_up_read(&c->mark_lock);
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset,
bucket_journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
return ret;
}
} }
percpu_down_read(&c->mark_lock); if ((flags & BTREE_TRIGGER_GC) &&
if (!gc && new_a->gen != old_a->gen) (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
*bucket_gen(ca, new.k->p.offset) = new_a->gen; struct bch_alloc_v4 new_a_convert;
const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
if (gc) { percpu_down_read(&c->mark_lock);
struct bucket *g = gc_bucket(ca, new.k->p.offset); struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g); bucket_lock(g);
...@@ -929,24 +919,8 @@ int bch2_mark_alloc(struct btree_trans *trans, ...@@ -929,24 +919,8 @@ int bch2_mark_alloc(struct btree_trans *trans,
g->cached_sectors = new_a->cached_sectors; g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g); bucket_unlock(g);
percpu_up_read(&c->mark_lock);
} }
percpu_up_read(&c->mark_lock);
if (new_a->data_type == BCH_DATA_free &&
(!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if (new_a->data_type == BCH_DATA_need_discard &&
(!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c);
if (old_a->data_type != BCH_DATA_cached &&
new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c);
if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c);
return 0; return 0;
} }
......
...@@ -182,24 +182,24 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); ...@@ -182,24 +182,24 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_alloc ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v1_invalid, \ .key_invalid = bch2_alloc_v1_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 8, \ .min_val_size = 8, \
}) })
#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v2_invalid, \ .key_invalid = bch2_alloc_v2_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 8, \ .min_val_size = 8, \
}) })
#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \ #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
.key_invalid = bch2_alloc_v3_invalid, \ .key_invalid = bch2_alloc_v3_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 16, \ .min_val_size = 16, \
}) })
...@@ -207,8 +207,8 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); ...@@ -207,8 +207,8 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
.key_invalid = bch2_alloc_v4_invalid, \ .key_invalid = bch2_alloc_v4_invalid, \
.val_to_text = bch2_alloc_to_text, \ .val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \ .swab = bch2_alloc_v4_swab, \
.trans_trigger = bch2_trans_mark_alloc, \ .trans_trigger = bch2_trigger_alloc, \
.atomic_trigger = bch2_mark_alloc, \ .atomic_trigger = bch2_trigger_alloc, \
.min_val_size = 48, \ .min_val_size = 48, \
}) })
...@@ -232,10 +232,8 @@ static inline bool bkey_is_alloc(const struct bkey *k) ...@@ -232,10 +232,8 @@ static inline bool bkey_is_alloc(const struct bkey *k)
int bch2_alloc_read(struct bch_fs *); int bch2_alloc_read(struct bch_fs *);
int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned, int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned); struct bkey_s_c, struct bkey_s, unsigned);
int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s, unsigned);
int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *);
void bch2_do_discards(struct bch_fs *); void bch2_do_discards(struct bch_fs *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment