Commit f25d8215 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Kill allocator threads & freelists

Now that we have new persistent data structures for the allocator, this
patch converts the allocator to use them.

Now, foreground bucket allocation uses the freespace btree to find
buckets to allocate, instead of popping buckets off the freelist.

The background allocator threads are no longer needed and are deleted,
as well as the allocator freelists. Now we only need background tasks
for invalidating buckets containing cached data (when we are low on
empty buckets), and for issuing discards.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent c6b2826c
This diff is collapsed.
......@@ -8,8 +8,6 @@
#include "debug.h"
#include "super.h"
extern const char * const bch2_allocator_states[];
/* How out of date a pointer gen is allowed to be: */
#define BUCKET_GC_GEN_MAX 96U
......@@ -117,42 +115,11 @@ int bch2_trans_mark_alloc(struct btree_trans *, struct bkey_s_c,
struct bkey_i *, unsigned);
int bch2_fs_freespace_init(struct bch_fs *);
static inline void bch2_wake_allocator(struct bch_dev *ca)
{
struct task_struct *p;
rcu_read_lock();
p = rcu_dereference(ca->alloc_thread);
if (p)
wake_up_process(p);
rcu_read_unlock();
}
static inline void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
size_t bucket)
{
if (bch2_expensive_debug_checks) {
size_t iter;
long i;
unsigned j;
for (j = 0; j < RESERVE_NR; j++)
fifo_for_each_entry(i, &ca->free[j], iter)
BUG_ON(i == bucket);
fifo_for_each_entry(i, &ca->free_inc, iter)
BUG_ON(i == bucket);
}
}
void bch2_recalc_capacity(struct bch_fs *);
void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_quiesce(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_stop(struct bch_dev *);
int bch2_dev_allocator_start(struct bch_dev *);
void bch2_fs_allocator_background_init(struct bch_fs *);
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
This diff is collapsed.
......@@ -14,6 +14,8 @@ struct bch_devs_List;
extern const char * const bch2_alloc_reserves[];
void bch2_reset_alloc_cursors(struct bch_fs *);
struct dev_alloc_list {
unsigned nr;
u8 devs[BCH_SB_MEMBERS_MAX];
......@@ -136,6 +138,15 @@ int bch2_bucket_alloc_set(struct bch_fs *, struct open_buckets *,
unsigned, unsigned *, bool *, enum alloc_reserve,
unsigned, struct closure *);
int bch2_alloc_sectors_start_trans(struct btree_trans *,
unsigned, unsigned,
struct write_point_specifier,
struct bch_devs_list *,
unsigned, unsigned,
enum alloc_reserve,
unsigned,
struct closure *,
struct write_point **);
int bch2_alloc_sectors_start(struct bch_fs *,
unsigned, unsigned,
struct write_point_specifier,
......
......@@ -10,18 +10,6 @@
struct ec_bucket_buf;
#define ALLOC_THREAD_STATES() \
x(stopped) \
x(running) \
x(blocked) \
x(blocked_full)
enum allocator_states {
#define x(n) ALLOCATOR_##n,
ALLOC_THREAD_STATES()
#undef x
};
#define BCH_ALLOC_RESERVES() \
x(btree_movinggc) \
x(btree) \
......@@ -32,11 +20,8 @@ enum alloc_reserve {
#define x(name) RESERVE_##name,
BCH_ALLOC_RESERVES()
#undef x
RESERVE_NR
};
typedef FIFO(long) alloc_fifo;
#define OPEN_BUCKETS_COUNT 1024
#define WRITE_POINT_HASH_NR 32
......@@ -127,12 +112,4 @@ struct write_point_specifier {
unsigned long v;
};
struct alloc_heap_entry {
size_t bucket;
size_t nr;
unsigned long key;
};
typedef HEAP(struct alloc_heap_entry) alloc_heap;
#endif /* _BCACHEFS_ALLOC_TYPES_H */
......@@ -462,34 +462,18 @@ struct bch_dev {
/* Allocator: */
u64 new_fs_bucket_idx;
struct task_struct __rcu *alloc_thread;
u64 alloc_cursor;
/*
* free: Buckets that are ready to be used
*
* free_inc: Incoming buckets - these are buckets that currently have
* cached data in them, and we can't reuse them until after we write
* their new gen to disk. After prio_write() finishes writing the new
* gens/prios, they'll be moved to the free list (and possibly discarded
* in the process)
*/
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
unsigned nr_open_buckets;
unsigned nr_btree_reserve;
open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
open_bucket_idx_t open_buckets_partial_nr;
size_t fifo_last_bucket;
size_t inc_gen_needs_gc;
size_t inc_gen_really_needs_gc;
size_t buckets_waiting_on_journal;
enum allocator_states allocator_state;
alloc_heap alloc_heap;
atomic64_t rebalance_work;
struct journal_device journal;
......@@ -511,8 +495,6 @@ struct bch_dev {
enum {
/* startup: */
BCH_FS_ALLOC_CLEAN,
BCH_FS_ALLOCATOR_RUNNING,
BCH_FS_ALLOCATOR_STOPPING,
BCH_FS_INITIAL_GC_DONE,
BCH_FS_INITIAL_GC_UNFIXED,
BCH_FS_TOPOLOGY_REPAIR_DONE,
......@@ -914,6 +896,7 @@ mempool_t bio_bounce_pages;
atomic_long_t read_realloc_races;
atomic_long_t extent_migrate_done;
atomic_long_t extent_migrate_raced;
atomic_long_t bucket_alloc_fail;
unsigned btree_gc_periodic:1;
unsigned copy_gc_enabled:1;
......
......@@ -1684,9 +1684,8 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
*/
int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
struct bch_dev *ca;
u64 start_time = local_clock();
unsigned i, iter = 0;
unsigned iter = 0;
int ret;
lockdep_assert_held(&c->state_lock);
......@@ -1787,13 +1786,6 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
trace_gc_end(c);
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
/*
* Wake up allocator in case it was waiting for buckets
* because of not being able to inc gens
*/
for_each_member_device(ca, c, i)
bch2_wake_allocator(ca);
/*
* At startup, allocations can happen directly instead of via the
* allocator thread - issue wakeup in case they blocked on gc_lock:
......
......@@ -178,12 +178,13 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
six_unlock_intent(&b->c.lock);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct disk_reservation *res,
struct closure *cl,
bool interior_node,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct write_point *wp;
struct btree *b;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
......@@ -214,7 +215,7 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
mutex_unlock(&c->btree_reserve_cache_lock);
retry:
ret = bch2_alloc_sectors_start(c,
ret = bch2_alloc_sectors_start_trans(trans,
c->opts.metadata_target ?:
c->opts.foreground_target,
0,
......@@ -414,7 +415,8 @@ static void bch2_btree_reserve_put(struct btree_update *as)
mutex_unlock(&c->btree_reserve_cache_lock);
}
static int bch2_btree_reserve_get(struct btree_update *as,
static int bch2_btree_reserve_get(struct btree_trans *trans,
struct btree_update *as,
unsigned nr_nodes[2],
unsigned flags,
struct closure *cl)
......@@ -441,7 +443,7 @@ static int bch2_btree_reserve_get(struct btree_update *as,
struct prealloc_nodes *p = as->prealloc_nodes + interior;
while (p->nr < nr_nodes[interior]) {
b = __bch2_btree_node_alloc(c, &as->disk_res,
b = __bch2_btree_node_alloc(trans, &as->disk_res,
flags & BTREE_INSERT_NOWAIT ? NULL : cl,
interior, flags);
if (IS_ERR(b)) {
......@@ -1066,8 +1068,9 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
if (ret)
goto err;
ret = bch2_btree_reserve_get(as, nr_nodes, flags, NULL);
if (ret) {
ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL);
if (ret == -EAGAIN ||
ret == -ENOMEM) {
struct closure cl;
closure_init_stack(&cl);
......@@ -1075,7 +1078,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
bch2_trans_unlock(trans);
do {
ret = bch2_btree_reserve_get(as, nr_nodes, flags, &cl);
ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
closure_sync(&cl);
} while (ret == -EAGAIN);
}
......
......@@ -296,11 +296,6 @@ static inline int bucket_sectors_fragmented(struct bch_dev *ca,
: 0;
}
static inline int is_stripe_data_bucket(struct bucket_mark m)
{
return m.stripe && m.data_type != BCH_DATA_parity;
}
static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
return m.cached_sectors && !m.dirty_sectors
......@@ -350,9 +345,6 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
preempt_enable();
if (!is_available_bucket(old) && is_available_bucket(new))
bch2_wake_allocator(ca);
}
static inline int __update_replicas(struct bch_fs *c,
......@@ -488,19 +480,6 @@ static inline void update_cached_sectors_list(struct btree_trans *trans,
update_replicas_list(trans, &r.e, sectors);
}
void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator)
{
struct bucket *g = bucket(ca, b);
struct bucket_mark old, new;
old = bucket_cmpxchg(g, new, ({
new.owned_by_allocator = owned_by_allocator;
}));
BUG_ON(owned_by_allocator == old.owned_by_allocator);
}
int bch2_mark_alloc(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
......@@ -560,6 +539,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
}
if (!new_a.data_type &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if (bucket_state(new_a) == BUCKET_need_gc_gens) {
atomic_inc(&c->kick_gc);
wake_up_process(c->gc_thread);
......@@ -583,7 +566,6 @@ int bch2_mark_alloc(struct btree_trans *trans,
g->io_time[READ] = new_a.io_time[READ];
g->io_time[WRITE] = new_a.io_time[WRITE];
g->oldest_gen = new_a.oldest_gen;
g->gen_valid = 1;
g->stripe = new_a.stripe;
g->stripe_redundancy = new_a.stripe_redundancy;
......@@ -1861,8 +1843,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
a->v.data_type = type;
a->v.dirty_sectors = sectors;
ret = bch2_trans_update(trans, &iter, &a->k_i,
BTREE_UPDATE_NO_KEY_CACHE_COHERENCY);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto out;
out:
......@@ -2048,24 +2029,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
struct bucket_array *buckets = NULL, *old_buckets = NULL;
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL;
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
alloc_heap alloc_heap;
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / btree_sectors(c));
/* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);
size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
btree_reserve * 2);
bool resize = ca->buckets[0] != NULL;
int ret = -ENOMEM;
unsigned i;
memset(&free, 0, sizeof(free));
memset(&free_inc, 0, sizeof(free_inc));
memset(&alloc_heap, 0, sizeof(alloc_heap));
if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
nbuckets * sizeof(struct bucket),
......@@ -2075,12 +2040,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
(c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO))) ||
!init_fifo(&free[RESERVE_movinggc],
copygc_reserve, GFP_KERNEL) ||
!init_fifo(&free[RESERVE_none], reserve_none, GFP_KERNEL) ||
!init_fifo(&free_inc, free_inc_nr, GFP_KERNEL) ||
!init_heap(&alloc_heap, ALLOC_SCAN_BATCH(ca) << 1, GFP_KERNEL))
GFP_KERNEL|__GFP_ZERO))))
goto err;
buckets->first_bucket = ca->mi.first_bucket;
......@@ -2126,18 +2086,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
up_write(&c->gc_lock);
}
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) {
fifo_move(&free[i], &ca->free[i]);
swap(ca->free[i], free[i]);
}
fifo_move(&free_inc, &ca->free_inc);
swap(ca->free_inc, free_inc);
spin_unlock(&c->freelist_lock);
/* with gc lock held, alloc_heap can't be in use: */
swap(ca->alloc_heap, alloc_heap);
nbuckets = ca->mi.nbuckets;
if (resize)
......@@ -2145,10 +2093,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
ret = 0;
err:
free_heap(&alloc_heap);
free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&free[i]);
kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
if (bucket_gens)
......@@ -2163,10 +2107,6 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
{
unsigned i;
free_heap(&ca->alloc_heap);
free_fifo(&ca->free_inc);
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&ca->free[i]);
kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
......
......@@ -58,11 +58,6 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
return __bucket(ca, b, true);
}
static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
{
return __bucket(ca, b, false);
}
static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
{
return rcu_dereference_check(ca->bucket_gens,
......@@ -151,50 +146,50 @@ static inline bool is_available_bucket(struct bucket_mark mark)
struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
static inline u64 __dev_buckets_available(struct bch_dev *ca,
struct bch_dev_usage stats)
struct bch_dev_usage stats,
enum alloc_reserve reserve)
{
u64 total = ca->mi.nbuckets - ca->mi.first_bucket;
s64 total = ca->mi.nbuckets - ca->mi.first_bucket;
s64 reserved = 0;
switch (reserve) {
case RESERVE_none:
reserved += ca->mi.nbuckets >> 6;
fallthrough;
case RESERVE_movinggc:
reserved += ca->nr_btree_reserve;
fallthrough;
case RESERVE_btree:
reserved += ca->nr_btree_reserve;
fallthrough;
case RESERVE_btree_movinggc:
break;
default:
BUG();
}
if (WARN_ONCE(stats.buckets_unavailable > total,
"buckets_unavailable overflow (%llu > %llu)\n",
stats.buckets_unavailable, total))
return 0;
return total - stats.buckets_unavailable;
return max_t(s64, 0,
total -
stats.buckets_unavailable -
ca->nr_open_buckets -
reserved);
}
static inline u64 dev_buckets_available(struct bch_dev *ca)
static inline u64 dev_buckets_available(struct bch_dev *ca,
enum alloc_reserve reserve)
{
return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
}
static inline u64 __dev_buckets_reclaimable(struct bch_dev *ca,
struct bch_dev_usage stats)
{
struct bch_fs *c = ca->fs;
s64 available = __dev_buckets_available(ca, stats);
unsigned i;
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++)
available -= fifo_used(&ca->free[i]);
available -= fifo_used(&ca->free_inc);
available -= ca->nr_open_buckets;
spin_unlock(&c->freelist_lock);
return max(available, 0LL);
}
static inline u64 dev_buckets_reclaimable(struct bch_dev *ca)
{
return __dev_buckets_reclaimable(ca, bch2_dev_usage_read(ca));
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve);
}
/* Filesystem usage: */
static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
return sizeof(struct bch_fs_usage) / sizeof(u64) +
READ_ONCE(c->replicas.nr);
}
......@@ -222,7 +217,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
void bch2_fs_usage_initialize(struct bch_fs *);
void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *, size_t, bool);
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
......
......@@ -14,7 +14,6 @@ struct bucket_mark {
struct {
u8 gen;
u8 data_type:3,
owned_by_allocator:1,
stripe:1;
u16 dirty_sectors;
u16 cached_sectors;
......@@ -29,7 +28,6 @@ struct bucket {
};
u64 io_time[2];
u8 oldest_gen;
unsigned gen_valid:1;
u8 stripe_redundancy;
u32 stripe;
......
......@@ -1295,9 +1295,6 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
BUG_ON(nr_have_data > h->s->nr_data);
BUG_ON(nr_have_parity > h->s->nr_parity);
percpu_down_read(&c->mark_lock);
rcu_read_lock();
buckets.nr = 0;
if (nr_have_parity < h->s->nr_parity) {
ret = bch2_bucket_alloc_set(c, &buckets,
......@@ -1324,7 +1321,7 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
}
if (ret)
goto err;
return ret;
}
buckets.nr = 0;
......@@ -1352,12 +1349,10 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
}
if (ret)
goto err;
return ret;
}
err:
rcu_read_unlock();
percpu_up_read(&c->mark_lock);
return ret;
return 0;
}
/* XXX: doesn't obey target: */
......
......@@ -812,10 +812,8 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
break;
}
} else {
rcu_read_lock();
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none,
false, cl);
rcu_read_unlock();
if (IS_ERR(ob[nr_got])) {
ret = cl ? -EAGAIN : -ENOSPC;
break;
......
......@@ -1398,6 +1398,10 @@ static void journal_write_done(struct closure *cl)
if (!JSET_NO_FLUSH(w->data)) {
j->flushed_seq_ondisk = seq;
j->last_seq_ondisk = w->last_seq;
closure_wake_up(&c->freelist_wait);
bch2_reset_alloc_cursors(c);
}
} else if (!j->err_seq || seq < j->err_seq)
j->err_seq = seq;
......
......@@ -104,18 +104,6 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
return DATA_SKIP;
}
static bool have_copygc_reserve(struct bch_dev *ca)
{
bool ret;
spin_lock(&ca->fs->freelist_lock);
ret = fifo_full(&ca->free[RESERVE_movinggc]) ||
ca->allocator_state != ALLOCATOR_running;
spin_unlock(&ca->fs->freelist_lock);
return ret;
}
static inline int fragmentation_cmp(copygc_heap *heap,
struct copygc_heap_entry l,
struct copygc_heap_entry r)
......@@ -247,11 +235,10 @@ static int bch2_copygc(struct bch_fs *c)
}
for_each_rw_member(ca, c, dev_idx) {
closure_wait_event(&c->freelist_wait, have_copygc_reserve(ca));
s64 avail = min(dev_buckets_available(ca, RESERVE_movinggc),
ca->mi.nbuckets >> 6);
spin_lock(&ca->fs->freelist_lock);
sectors_reserved += fifo_used(&ca->free[RESERVE_movinggc]) * ca->mi.bucket_size;
spin_unlock(&ca->fs->freelist_lock);
sectors_reserved += avail * ca->mi.bucket_size;
}
ret = walk_buckets_to_copygc(c);
......@@ -352,8 +339,8 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
fragmented_allowed = ((__dev_buckets_reclaimable(ca, usage) *
ca->mi.bucket_size) >> 1);
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
ca->mi.bucket_size) >> 1);
fragmented = usage.d[BCH_DATA_user].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
......
......@@ -1374,6 +1374,7 @@ int bch2_fs_initialize(struct bch_fs *c)
* Write out the superblock and journal buckets, now that we can do
* btree updates
*/
bch_verbose(c, "marking superblocks");
err = "error marking superblock and journal";
for_each_member_device(ca, c, i) {
ret = bch2_trans_mark_dev_sb(c, ca);
......@@ -1385,6 +1386,7 @@ int bch2_fs_initialize(struct bch_fs *c)
ca->new_fs_bucket_idx = 0;
}
bch_verbose(c, "initializing freespace");
err = "error initializing freespace";
ret = bch2_fs_freespace_init(c);
if (ret)
......
......@@ -206,17 +206,9 @@ static void __bch2_fs_read_only(struct bch_fs *c)
*/
bch2_journal_flush_all_pins(&c->journal);
/*
* If the allocator threads didn't all start up, the btree updates to
* write out alloc info aren't going to work:
*/
if (!test_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags))
goto nowrote_alloc;
bch_verbose(c, "flushing journal and stopping allocators");
bch2_journal_flush_all_pins(&c->journal);
set_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
do {
clean_passes++;
......@@ -241,17 +233,11 @@ static void __bch2_fs_read_only(struct bch_fs *c)
bch_verbose(c, "flushing journal and stopping allocators complete");
set_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
nowrote_alloc:
closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c));
flush_work(&c->btree_interior_update_work);
for_each_member_device(ca, c, i)
bch2_dev_allocator_stop(ca);
clear_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
clear_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
bch2_fs_journal_stop(&c->journal);
/*
......@@ -287,10 +273,6 @@ void bch2_fs_read_only(struct bch_fs *c)
/*
* Block new foreground-end write operations from starting - any new
* writes will return -EROFS:
*
* (This is really blocking new _allocations_, writes to previously
* allocated space can still happen until stopping the allocator in
* bch2_dev_allocator_stop()).
*/
percpu_ref_kill(&c->writes);
......@@ -419,20 +401,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {
bch_err(c, "error starting allocator threads");
percpu_ref_put(&ca->io_ref);
goto err;
}
}
set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
for_each_rw_member(ca, c, i)
bch2_wake_allocator(ca);
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
......@@ -946,20 +914,6 @@ int bch2_fs_start(struct bch_fs *c)
set_bit(BCH_FS_STARTED, &c->flags);
/*
* Allocator threads don't start filling copygc reserve until after we
* set BCH_FS_STARTED - wake them now:
*
* XXX ugly hack:
* Need to set ca->allocator_state here instead of relying on the
* allocator threads to do it to avoid racing with the copygc threads
* checking it and thinking they have no alloc reserve:
*/
for_each_online_member(ca, c, i) {
ca->allocator_state = ALLOCATOR_running;
bch2_wake_allocator(ca);
}
if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
} else {
......@@ -1051,8 +1005,6 @@ static void bch2_dev_release(struct kobject *kobj)
static void bch2_dev_free(struct bch_dev *ca)
{
bch2_dev_allocator_stop(ca);
cancel_work_sync(&ca->io_error_work);
if (ca->kobj.state_in_sysfs &&
......@@ -1167,6 +1119,9 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
ca->mi = bch2_mi_to_cpu(member);
ca->uuid = member->uuid;
ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / btree_sectors(c));
if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
0, GFP_KERNEL) ||
percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
......@@ -1216,12 +1171,6 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
ca->fs = c;
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
bch2_dev_allocator_start(ca)) {
bch2_dev_free(ca);
goto err;
}
bch2_dev_attach(c, ca, dev_idx);
out:
pr_verbose_init(c->opts, "ret %i", ret);
......@@ -1405,14 +1354,13 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
/*
* The allocator thread itself allocates btree nodes, so stop it first:
*/
bch2_dev_allocator_stop(ca);
bch2_dev_allocator_remove(c, ca);
bch2_dev_journal_stop(&c->journal, ca);
bch2_copygc_start(c);
}
static int __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
{
lockdep_assert_held(&c->state_lock);
......@@ -1420,8 +1368,6 @@ static int __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
return bch2_dev_allocator_start(ca);
}
int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
......@@ -1448,7 +1394,7 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
mutex_unlock(&c->sb_lock);
if (new_state == BCH_MEMBER_STATE_rw)
ret = __bch2_dev_read_write(c, ca);
__bch2_dev_read_write(c, ca);
rebalance_wakeup(c);
......@@ -1710,13 +1656,8 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
ca->new_fs_bucket_idx = 0;
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
ret = __bch2_dev_read_write(c, ca);
if (ret) {
bch_err(c, "device add error: error going RW on new device: %i", ret);
goto err_late;
}
}
if (ca->mi.state == BCH_MEMBER_STATE_rw)
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
return 0;
......@@ -1776,11 +1717,8 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
goto err;
}
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
ret = __bch2_dev_read_write(c, ca);
if (ret)
goto err;
}
if (ca->mi.state == BCH_MEMBER_STATE_rw)
__bch2_dev_read_write(c, ca);
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
......
......@@ -170,7 +170,6 @@ read_attribute(congested);
read_attribute(btree_avg_write_size);
read_attribute(reserve_stats);
read_attribute(btree_cache_size);
read_attribute(compression_stats);
read_attribute(journal_debug);
......@@ -186,11 +185,11 @@ read_attribute(internal_uuid);
read_attribute(has_data);
read_attribute(alloc_debug);
write_attribute(wake_allocator);
read_attribute(read_realloc_races);
read_attribute(extent_migrate_done);
read_attribute(extent_migrate_raced);
read_attribute(bucket_alloc_fail);
rw_attribute(discard);
rw_attribute(label);
......@@ -377,6 +376,8 @@ SHOW(bch2_fs)
atomic_long_read(&c->extent_migrate_done));
sysfs_print(extent_migrate_raced,
atomic_long_read(&c->extent_migrate_raced));
sysfs_print(bucket_alloc_fail,
atomic_long_read(&c->bucket_alloc_fail));
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
......@@ -577,6 +578,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
&sysfs_extent_migrate_raced,
&sysfs_bucket_alloc_fail,
&sysfs_gc_gens_pos,
......@@ -705,24 +707,6 @@ struct attribute *bch2_fs_time_stats_files[] = {
NULL
};
static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
{
enum alloc_reserve i;
spin_lock(&ca->fs->freelist_lock);
pr_buf(out, "free_inc:\t%zu\t%zu\n",
fifo_used(&ca->free_inc),
ca->free_inc.size);
for (i = 0; i < RESERVE_NR; i++)
pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
fifo_used(&ca->free[i]),
ca->free[i].size);
spin_unlock(&ca->fs->freelist_lock);
}
static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
......@@ -748,9 +732,6 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
"ec\t%16llu\n"
"available%15llu\n"
"\n"
"free_inc\t\t%zu/%zu\n"
"free[RESERVE_MOVINGGC]\t%zu/%zu\n"
"free[RESERVE_NONE]\t%zu/%zu\n"
"freelist_wait\t\t%s\n"
"open buckets allocated\t%u\n"
"open buckets this dev\t%u\n"
......@@ -758,13 +739,9 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
"open_buckets_wait\t%s\n"
"open_buckets_btree\t%u\n"
"open_buckets_user\t%u\n"
"btree reserve cache\t%u\n"
"thread state:\t\t%s\n",
"btree reserve cache\t%u\n",
stats.buckets_ec,
__dev_buckets_available(ca, stats),
fifo_used(&ca->free_inc), ca->free_inc.size,
fifo_used(&ca->free[RESERVE_movinggc]), ca->free[RESERVE_movinggc].size,
fifo_used(&ca->free[RESERVE_none]), ca->free[RESERVE_none].size,
__dev_buckets_available(ca, stats, RESERVE_none),
c->freelist_wait.list.first ? "waiting" : "empty",
OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
ca->nr_open_buckets,
......@@ -772,8 +749,7 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
c->open_buckets_wait.list.first ? "waiting" : "empty",
nr[BCH_DATA_btree],
nr[BCH_DATA_user],
c->btree_reserve_cache_nr,
bch2_allocator_states[ca->allocator_state]);
c->btree_reserve_cache_nr);
}
static const char * const bch2_rw[] = {
......@@ -848,9 +824,6 @@ SHOW(bch2_dev)
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
* 100 / CONGESTED_MAX);
if (attr == &sysfs_reserve_stats)
reserve_stats_to_text(out, ca);
if (attr == &sysfs_alloc_debug)
dev_alloc_debug_to_text(out, ca);
......@@ -890,9 +863,6 @@ STORE(bch2_dev)
return ret;
}
if (attr == &sysfs_wake_allocator)
bch2_wake_allocator(ca);
return size;
}
SYSFS_OPS(bch2_dev);
......@@ -918,11 +888,8 @@ struct attribute *bch2_dev_files[] = {
&sysfs_io_latency_stats_write,
&sysfs_congested,
&sysfs_reserve_stats,
/* debug: */
&sysfs_alloc_debug,
&sysfs_wake_allocator,
NULL
};
......
......@@ -471,37 +471,74 @@ TRACE_EVENT(invalidate,
);
DECLARE_EVENT_CLASS(bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
TP_ARGS(ca, alloc_reserve),
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 avail,
u64 seen,
u64 open,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
TP_ARGS(ca, alloc_reserve, avail, seen, open, need_journal_commit, nouse, nonblocking, ret),
TP_STRUCT__entry(
__field(dev_t, dev )
__array(char, reserve, 16 )
__field(dev_t, dev )
__array(char, reserve, 16 )
__field(u64, avail )
__field(u64, seen )
__field(u64, open )
__field(u64, need_journal_commit )
__field(u64, nouse )
__field(bool, nonblocking )
__field(int, ret )
),
TP_fast_assign(
__entry->dev = ca->dev;
strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->avail = avail;
__entry->seen = seen;
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->nouse = nouse;
__entry->nonblocking = nonblocking;
__entry->ret = ret;
),
TP_printk("%d,%d reserve %s",
TP_printk("%d,%d reserve %s avail %llu seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u ret %i",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->reserve)
__entry->reserve,
__entry->avail,
__entry->seen,
__entry->open,
__entry->need_journal_commit,
__entry->nouse,
__entry->nonblocking,
__entry->ret)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
TP_ARGS(ca, alloc_reserve)
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 avail,
u64 seen,
u64 open,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
TP_ARGS(ca, alloc_reserve, avail, seen, open, need_journal_commit, nouse, nonblocking, ret)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
TP_ARGS(ca, alloc_reserve)
);
DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve),
TP_ARGS(ca, alloc_reserve)
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 avail,
u64 seen,
u64 open,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
TP_ARGS(ca, alloc_reserve, avail, seen, open, need_journal_commit, nouse, nonblocking, ret)
);
/* Moving IO */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment