Commit e035c280 authored by SeongJae Park's avatar SeongJae Park Committed by Andrew Morton

mm/damon/reclaim: support online inputs update

DAMON_RECLAIM reads the user input parameters only when it starts.  To
allow more efficient online tuning, this commit implements a new input
parameter called 'commit_inputs'.  Writing true to the parameter makes
DAMON_RECLAIM reads the input parameters again.

Link: https://lkml.kernel.org/r/20220429160606.127307-14-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent adc286e6
...@@ -29,6 +29,18 @@ ...@@ -29,6 +29,18 @@
*/ */
static bool enabled __read_mostly; static bool enabled __read_mostly;
/*
* Make DAMON_RECLAIM reads the input parameters again, except ``enabled``.
*
* Input parameters that updated while DAMON_RECLAIM is running are not applied
* by default. Once this parameter is set as ``Y``, DAMON_RECLAIM reads values
* of parametrs except ``enabled`` again. Once the re-reading is done, this
* parameter is set as ``N``. If invalid parameters are found while the
* re-reading, DAMON_RECLAIM will be disabled.
*/
static bool commit_inputs __read_mostly;
module_param(commit_inputs, bool, 0600);
/* /*
* Time threshold for cold memory regions identification in microseconds. * Time threshold for cold memory regions identification in microseconds.
* *
...@@ -289,57 +301,56 @@ static struct damos *damon_reclaim_new_scheme(void) ...@@ -289,57 +301,56 @@ static struct damos *damon_reclaim_new_scheme(void)
return scheme; return scheme;
} }
static int damon_reclaim_turn(bool on) static int damon_reclaim_apply_parameters(void)
{ {
struct damon_region *region;
struct damos *scheme; struct damos *scheme;
int err; struct damon_addr_range addr_range;
int err = 0;
if (!on) {
err = damon_stop(&ctx, 1);
if (!err)
kdamond_pid = -1;
return err;
}
err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0, err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
min_nr_regions, max_nr_regions); min_nr_regions, max_nr_regions);
if (err) if (err)
return err; return err;
/* Will be freed by next 'damon_set_schemes()' below */
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
err = damon_set_schemes(ctx, &scheme, 1);
if (err)
return err;
if (monitor_region_start > monitor_region_end) if (monitor_region_start > monitor_region_end)
return -EINVAL; return -EINVAL;
if (!monitor_region_start && !monitor_region_end && if (!monitor_region_start && !monitor_region_end &&
!get_monitoring_region(&monitor_region_start, !get_monitoring_region(&monitor_region_start,
&monitor_region_end)) &monitor_region_end))
return -EINVAL; return -EINVAL;
/* DAMON will free this on its own when finish monitoring */ addr_range.start = monitor_region_start;
region = damon_new_region(monitor_region_start, monitor_region_end); addr_range.end = monitor_region_end;
if (!region) return damon_set_regions(target, &addr_range, 1);
return -ENOMEM; }
damon_add_region(region, target);
/* Will be freed by 'damon_set_schemes()' below */ static int damon_reclaim_turn(bool on)
scheme = damon_reclaim_new_scheme(); {
if (!scheme) { int err;
err = -ENOMEM;
goto free_region_out; if (!on) {
err = damon_stop(&ctx, 1);
if (!err)
kdamond_pid = -1;
return err;
} }
err = damon_set_schemes(ctx, &scheme, 1);
err = damon_reclaim_apply_parameters();
if (err) if (err)
goto free_scheme_out; return err;
err = damon_start(&ctx, 1, true); err = damon_start(&ctx, 1, true);
if (!err) { if (err)
kdamond_pid = ctx->kdamond->pid; return err;
return 0; kdamond_pid = ctx->kdamond->pid;
} return 0;
free_scheme_out:
damon_destroy_scheme(scheme);
free_region_out:
damon_destroy_region(region, target);
return err;
} }
#define ENABLE_CHECK_INTERVAL_MS 1000 #define ENABLE_CHECK_INTERVAL_MS 1000
...@@ -389,6 +400,7 @@ MODULE_PARM_DESC(enabled, ...@@ -389,6 +400,7 @@ MODULE_PARM_DESC(enabled,
static int damon_reclaim_after_aggregation(struct damon_ctx *c) static int damon_reclaim_after_aggregation(struct damon_ctx *c)
{ {
struct damos *s; struct damos *s;
int err = 0;
/* update the stats parameter */ /* update the stats parameter */
damon_for_each_scheme(s, c) { damon_for_each_scheme(s, c) {
...@@ -398,7 +410,23 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c) ...@@ -398,7 +410,23 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c)
bytes_reclaimed_regions = s->stat.sz_applied; bytes_reclaimed_regions = s->stat.sz_applied;
nr_quota_exceeds = s->stat.qt_exceeds; nr_quota_exceeds = s->stat.qt_exceeds;
} }
return 0;
if (commit_inputs) {
err = damon_reclaim_apply_parameters();
commit_inputs = false;
}
return err;
}
static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
{
int err = 0;
if (commit_inputs) {
err = damon_reclaim_apply_parameters();
commit_inputs = false;
}
return err;
} }
static int __init damon_reclaim_init(void) static int __init damon_reclaim_init(void)
...@@ -410,6 +438,7 @@ static int __init damon_reclaim_init(void) ...@@ -410,6 +438,7 @@ static int __init damon_reclaim_init(void)
if (damon_select_ops(ctx, DAMON_OPS_PADDR)) if (damon_select_ops(ctx, DAMON_OPS_PADDR))
return -EINVAL; return -EINVAL;
ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
ctx->callback.after_aggregation = damon_reclaim_after_aggregation; ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
target = damon_new_target(); target = damon_new_target();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment