Commit 51c66fed authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Rip out copygc pd controller

We have a separate mechanism for ratelimiting copygc now - the pd
controller has only been causing problems.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 5bbe4bf9
...@@ -31,38 +31,6 @@ static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { ...@@ -31,38 +31,6 @@ static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
#undef x #undef x
}; };
/* Ratelimiting/PD controllers */
static void pd_controllers_update(struct work_struct *work)
{
struct bch_fs *c = container_of(to_delayed_work(work),
struct bch_fs,
pd_controllers_update);
struct bch_dev *ca;
s64 free = 0, fragmented = 0;
unsigned i;
for_each_member_device(ca, c, i) {
struct bch_dev_usage stats = bch2_dev_usage_read(ca);
free += bucket_to_sector(ca,
__dev_buckets_available(ca, stats)) << 9;
/*
* Bytes of internal fragmentation, which can be
* reclaimed by copy GC
*/
fragmented += max_t(s64, 0, (bucket_to_sector(ca,
stats.d[BCH_DATA_user].buckets +
stats.d[BCH_DATA_cached].buckets) -
(stats.d[BCH_DATA_user].sectors +
stats.d[BCH_DATA_cached].sectors)) << 9);
}
bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
schedule_delayed_work(&c->pd_controllers_update,
c->pd_controllers_update_seconds * HZ);
}
/* Persistent alloc info: */ /* Persistent alloc info: */
static inline u64 alloc_field_v1_get(const struct bch_alloc *a, static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
...@@ -1405,7 +1373,4 @@ int bch2_dev_allocator_start(struct bch_dev *ca) ...@@ -1405,7 +1373,4 @@ int bch2_dev_allocator_start(struct bch_dev *ca)
void bch2_fs_allocator_background_init(struct bch_fs *c) void bch2_fs_allocator_background_init(struct bch_fs *c)
{ {
spin_lock_init(&c->freelist_lock); spin_lock_init(&c->freelist_lock);
c->pd_controllers_update_seconds = 5;
INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
} }
...@@ -664,9 +664,6 @@ struct bch_fs { ...@@ -664,9 +664,6 @@ struct bch_fs {
struct workqueue_struct *copygc_wq; struct workqueue_struct *copygc_wq;
/* ALLOCATION */ /* ALLOCATION */
struct delayed_work pd_controllers_update;
unsigned pd_controllers_update_seconds;
struct bch_devs_mask rw_devs[BCH_DATA_NR]; struct bch_devs_mask rw_devs[BCH_DATA_NR];
u64 capacity; /* sectors */ u64 capacity; /* sectors */
...@@ -771,7 +768,6 @@ mempool_t bio_bounce_pages; ...@@ -771,7 +768,6 @@ mempool_t bio_bounce_pages;
/* COPYGC */ /* COPYGC */
struct task_struct *copygc_thread; struct task_struct *copygc_thread;
copygc_heap copygc_heap; copygc_heap copygc_heap;
struct bch_pd_controller copygc_pd;
struct write_point copygc_write_point; struct write_point copygc_write_point;
s64 copygc_wait; s64 copygc_wait;
......
...@@ -222,7 +222,7 @@ static int bch2_copygc(struct bch_fs *c) ...@@ -222,7 +222,7 @@ static int bch2_copygc(struct bch_fs *c)
ret = bch2_move_data(c, ret = bch2_move_data(c,
0, POS_MIN, 0, POS_MIN,
BTREE_ID_NR, POS_MAX, BTREE_ID_NR, POS_MAX,
&c->copygc_pd.rate, NULL,
writepoint_ptr(&c->copygc_write_point), writepoint_ptr(&c->copygc_write_point),
copygc_pred, NULL, copygc_pred, NULL,
&move_stats); &move_stats);
...@@ -328,9 +328,6 @@ static int bch2_copygc_thread(void *arg) ...@@ -328,9 +328,6 @@ static int bch2_copygc_thread(void *arg)
void bch2_copygc_stop(struct bch_fs *c) void bch2_copygc_stop(struct bch_fs *c)
{ {
c->copygc_pd.rate.rate = UINT_MAX;
bch2_ratelimit_reset(&c->copygc_pd.rate);
if (c->copygc_thread) { if (c->copygc_thread) {
kthread_stop(c->copygc_thread); kthread_stop(c->copygc_thread);
put_task_struct(c->copygc_thread); put_task_struct(c->copygc_thread);
...@@ -367,6 +364,4 @@ int bch2_copygc_start(struct bch_fs *c) ...@@ -367,6 +364,4 @@ int bch2_copygc_start(struct bch_fs *c)
void bch2_fs_copygc_init(struct bch_fs *c) void bch2_fs_copygc_init(struct bch_fs *c)
{ {
bch2_pd_controller_init(&c->copygc_pd);
c->copygc_pd.d_term = 0;
} }
...@@ -293,7 +293,6 @@ void bch2_fs_read_only(struct bch_fs *c) ...@@ -293,7 +293,6 @@ void bch2_fs_read_only(struct bch_fs *c)
percpu_ref_kill(&c->writes); percpu_ref_kill(&c->writes);
cancel_work_sync(&c->ec_stripe_delete_work); cancel_work_sync(&c->ec_stripe_delete_work);
cancel_delayed_work(&c->pd_controllers_update);
/* /*
* If we're not doing an emergency shutdown, we want to wait on * If we're not doing an emergency shutdown, we want to wait on
...@@ -378,8 +377,6 @@ static int bch2_fs_read_write_late(struct bch_fs *c) ...@@ -378,8 +377,6 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
return ret; return ret;
} }
schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
schedule_work(&c->ec_stripe_delete_work); schedule_work(&c->ec_stripe_delete_work);
return 0; return 0;
...@@ -571,7 +568,6 @@ void __bch2_fs_stop(struct bch_fs *c) ...@@ -571,7 +568,6 @@ void __bch2_fs_stop(struct bch_fs *c)
cancel_work_sync(&ca->io_error_work); cancel_work_sync(&ca->io_error_work);
cancel_work_sync(&c->btree_write_error_work); cancel_work_sync(&c->btree_write_error_work);
cancel_delayed_work_sync(&c->pd_controllers_update);
cancel_work_sync(&c->read_only_work); cancel_work_sync(&c->read_only_work);
} }
......
...@@ -189,7 +189,6 @@ rw_attribute(label); ...@@ -189,7 +189,6 @@ rw_attribute(label);
rw_attribute(copy_gc_enabled); rw_attribute(copy_gc_enabled);
read_attribute(copy_gc_wait); read_attribute(copy_gc_wait);
sysfs_pd_controller_attribute(copy_gc);
rw_attribute(rebalance_enabled); rw_attribute(rebalance_enabled);
sysfs_pd_controller_attribute(rebalance); sysfs_pd_controller_attribute(rebalance);
...@@ -198,8 +197,6 @@ rw_attribute(promote_whole_extents); ...@@ -198,8 +197,6 @@ rw_attribute(promote_whole_extents);
read_attribute(new_stripes); read_attribute(new_stripes);
rw_attribute(pd_controllers_update_seconds);
read_attribute(io_timers_read); read_attribute(io_timers_read);
read_attribute(io_timers_write); read_attribute(io_timers_write);
...@@ -331,12 +328,8 @@ SHOW(bch2_fs) ...@@ -331,12 +328,8 @@ SHOW(bch2_fs)
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
sysfs_print(pd_controllers_update_seconds,
c->pd_controllers_update_seconds);
sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
sysfs_pd_controller_show(copy_gc, &c->copygc_pd);
sysfs_hprint(copy_gc_wait, sysfs_hprint(copy_gc_wait,
max(0LL, c->copygc_wait - max(0LL, c->copygc_wait -
atomic64_read(&c->io_clock[WRITE].now)) << 9); atomic64_read(&c->io_clock[WRITE].now)) << 9);
...@@ -447,10 +440,7 @@ STORE(bch2_fs) ...@@ -447,10 +440,7 @@ STORE(bch2_fs)
return ret; return ret;
} }
sysfs_strtoul(pd_controllers_update_seconds,
c->pd_controllers_update_seconds);
sysfs_pd_controller_store(rebalance, &c->rebalance.pd); sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
sysfs_pd_controller_store(copy_gc, &c->copygc_pd);
sysfs_strtoul(promote_whole_extents, c->promote_whole_extents); sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
...@@ -572,7 +562,6 @@ struct attribute *bch2_fs_internal_files[] = { ...@@ -572,7 +562,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_rebalance_enabled, &sysfs_rebalance_enabled,
&sysfs_rebalance_work, &sysfs_rebalance_work,
sysfs_pd_controller_files(rebalance), sysfs_pd_controller_files(rebalance),
sysfs_pd_controller_files(copy_gc),
&sysfs_new_stripes, &sysfs_new_stripes,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment