Commit 42f994b7 authored by SeongJae Park's avatar SeongJae Park Committed by Andrew Morton

mm/damon/core: implement scheme-specific apply interval

DAMON-based operation schemes are applied for every aggregation interval. 
That was mainly because schemes were using nr_accesses, which be complete
to be used for every aggregation interval.  However, the schemes are now
using nr_accesses_bp, which is updated for each sampling interval in a way
that reasonable to be used.  Therefore, there is no reason to apply
schemes for each aggregation interval.

The unnecessary alignment with aggregation interval was also making some
use cases of DAMOS tricky.  Quotas setting under long aggregation interval
is one such example.  Suppose the aggregation interval is ten seconds, and
there is a scheme having CPU quota 100ms per 1s.  The scheme will actually
uses 100ms per ten seconds, since it cannobe be applied before next
aggregation interval.  The feature is working as intended, but the results
might not that intuitive for some users.  This could be fixed by updating
the quota to 1s per 10s.  But, in the case, the CPU usage of DAMOS could
look like spikes, and would actually make a bad effect to other
CPU-sensitive workloads.

Implement a dedicated timing interval for each DAMON-based operation
scheme, namely apply_interval.  The interval will be sampling interval
aligned, and each scheme will be applied for its apply_interval.  The
interval is set to 0 by default, and it means the scheme should use the
aggregation interval instead.  This avoids old users getting any
behavioral difference.

Link: https://lkml.kernel.org/r/20230916020945.47296-5-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a72217ad
...@@ -314,16 +314,19 @@ struct damos_access_pattern { ...@@ -314,16 +314,19 @@ struct damos_access_pattern {
* struct damos - Represents a Data Access Monitoring-based Operation Scheme. * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions. * @pattern: Access pattern of target regions.
* @action: &damo_action to be applied to the target regions. * @action: &damo_action to be applied to the target regions.
* @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme. * @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme. * @wmarks: Watermarks for automated (in)activation of this scheme.
* @filters: Additional set of &struct damos_filter for &action. * @filters: Additional set of &struct damos_filter for &action.
* @stat: Statistics of this scheme. * @stat: Statistics of this scheme.
* @list: List head for siblings. * @list: List head for siblings.
* *
* For each aggregation interval, DAMON finds regions which fit in the * For each @apply_interval_us, DAMON finds regions which fit in the
* &pattern and applies &action to those. To avoid consuming too much * &pattern and applies &action to those. To avoid consuming too much
* CPU time or IO resources for the &action, &quota is used. * CPU time or IO resources for the &action, &quota is used.
* *
* If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
*
* To do the work only when needed, schemes can be activated for specific * To do the work only when needed, schemes can be activated for specific
* system situations using &wmarks. If all schemes that registered to the * system situations using &wmarks. If all schemes that registered to the
* monitoring context are inactive, DAMON stops monitoring either, and just * monitoring context are inactive, DAMON stops monitoring either, and just
...@@ -340,6 +343,14 @@ struct damos_access_pattern { ...@@ -340,6 +343,14 @@ struct damos_access_pattern {
struct damos { struct damos {
struct damos_access_pattern pattern; struct damos_access_pattern pattern;
enum damos_action action; enum damos_action action;
unsigned long apply_interval_us;
/* private: internal use only */
/*
* number of sample intervals that should be passed before applying
* @action
*/
unsigned long next_apply_sis;
/* public: */
struct damos_quota quota; struct damos_quota quota;
struct damos_watermarks wmarks; struct damos_watermarks wmarks;
struct list_head filters; struct list_head filters;
...@@ -641,7 +652,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f); ...@@ -641,7 +652,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f);
void damos_destroy_filter(struct damos_filter *f); void damos_destroy_filter(struct damos_filter *f);
struct damos *damon_new_scheme(struct damos_access_pattern *pattern, struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
enum damos_action action, struct damos_quota *quota, enum damos_action action,
unsigned long apply_interval_us,
struct damos_quota *quota,
struct damos_watermarks *wmarks); struct damos_watermarks *wmarks);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s); void damon_destroy_scheme(struct damos *s);
......
...@@ -313,7 +313,9 @@ static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota) ...@@ -313,7 +313,9 @@ static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
} }
struct damos *damon_new_scheme(struct damos_access_pattern *pattern, struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
enum damos_action action, struct damos_quota *quota, enum damos_action action,
unsigned long apply_interval_us,
struct damos_quota *quota,
struct damos_watermarks *wmarks) struct damos_watermarks *wmarks)
{ {
struct damos *scheme; struct damos *scheme;
...@@ -323,6 +325,13 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, ...@@ -323,6 +325,13 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
return NULL; return NULL;
scheme->pattern = *pattern; scheme->pattern = *pattern;
scheme->action = action; scheme->action = action;
scheme->apply_interval_us = apply_interval_us;
/*
* next_apply_sis will be set when kdamond starts. While kdamond is
* running, it will also updated when it is added to the DAMON context,
* or damon_attrs are updated.
*/
scheme->next_apply_sis = 0;
INIT_LIST_HEAD(&scheme->filters); INIT_LIST_HEAD(&scheme->filters);
scheme->stat = (struct damos_stat){}; scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list); INIT_LIST_HEAD(&scheme->list);
...@@ -335,9 +344,21 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, ...@@ -335,9 +344,21 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
return scheme; return scheme;
} }
static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
{
unsigned long sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
unsigned long apply_interval = s->apply_interval_us ?
s->apply_interval_us : ctx->attrs.aggr_interval;
s->next_apply_sis = ctx->passed_sample_intervals +
apply_interval / sample_interval;
}
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s) void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
{ {
list_add_tail(&s->list, &ctx->schemes); list_add_tail(&s->list, &ctx->schemes);
damos_set_next_apply_sis(s, ctx);
} }
static void damon_del_scheme(struct damos *s) static void damon_del_scheme(struct damos *s)
...@@ -558,6 +579,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) ...@@ -558,6 +579,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
{ {
unsigned long sample_interval = attrs->sample_interval ? unsigned long sample_interval = attrs->sample_interval ?
attrs->sample_interval : 1; attrs->sample_interval : 1;
struct damos *s;
if (attrs->min_nr_regions < 3) if (attrs->min_nr_regions < 3)
return -EINVAL; return -EINVAL;
...@@ -573,6 +595,10 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) ...@@ -573,6 +595,10 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
damon_update_monitoring_results(ctx, attrs); damon_update_monitoring_results(ctx, attrs);
ctx->attrs = *attrs; ctx->attrs = *attrs;
damon_for_each_scheme(s, ctx)
damos_set_next_apply_sis(s, ctx);
return 0; return 0;
} }
...@@ -1094,14 +1120,29 @@ static void kdamond_apply_schemes(struct damon_ctx *c) ...@@ -1094,14 +1120,29 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
struct damon_target *t; struct damon_target *t;
struct damon_region *r, *next_r; struct damon_region *r, *next_r;
struct damos *s; struct damos *s;
unsigned long sample_interval = c->attrs.sample_interval ?
c->attrs.sample_interval : 1;
bool has_schemes_to_apply = false;
damon_for_each_scheme(s, c) { damon_for_each_scheme(s, c) {
if (c->passed_sample_intervals != s->next_apply_sis)
continue;
s->next_apply_sis +=
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
if (!s->wmarks.activated) if (!s->wmarks.activated)
continue; continue;
has_schemes_to_apply = true;
damos_adjust_quota(c, s); damos_adjust_quota(c, s);
} }
if (!has_schemes_to_apply)
return;
damon_for_each_target(t, c) { damon_for_each_target(t, c) {
damon_for_each_region_safe(r, next_r, t) damon_for_each_region_safe(r, next_r, t)
damon_do_apply_schemes(c, t, r); damon_do_apply_schemes(c, t, r);
...@@ -1372,11 +1413,19 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx) ...@@ -1372,11 +1413,19 @@ static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
{ {
unsigned long sample_interval = ctx->attrs.sample_interval ? unsigned long sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1; ctx->attrs.sample_interval : 1;
unsigned long apply_interval;
struct damos *scheme;
ctx->passed_sample_intervals = 0; ctx->passed_sample_intervals = 0;
ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
sample_interval; sample_interval;
damon_for_each_scheme(scheme, ctx) {
apply_interval = scheme->apply_interval_us ?
scheme->apply_interval_us : ctx->attrs.aggr_interval;
scheme->next_apply_sis = apply_interval / sample_interval;
}
} }
/* /*
...@@ -1428,19 +1477,28 @@ static int kdamond_fn(void *data) ...@@ -1428,19 +1477,28 @@ static int kdamond_fn(void *data)
if (ctx->ops.check_accesses) if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx); max_nr_accesses = ctx->ops.check_accesses(ctx);
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
if (ctx->passed_sample_intervals == next_aggregation_sis) { if (ctx->passed_sample_intervals == next_aggregation_sis) {
ctx->next_aggregation_sis = next_aggregation_sis +
ctx->attrs.aggr_interval / sample_interval;
kdamond_merge_regions(ctx, kdamond_merge_regions(ctx,
max_nr_accesses / 10, max_nr_accesses / 10,
sz_limit); sz_limit);
if (ctx->callback.after_aggregation && if (ctx->callback.after_aggregation &&
ctx->callback.after_aggregation(ctx)) ctx->callback.after_aggregation(ctx))
break; break;
if (!list_empty(&ctx->schemes)) }
kdamond_apply_schemes(ctx);
/*
* do kdamond_apply_schemes() after kdamond_merge_regions() if
* possible, to reduce overhead
*/
if (!list_empty(&ctx->schemes))
kdamond_apply_schemes(ctx);
sample_interval = ctx->attrs.sample_interval ?
ctx->attrs.sample_interval : 1;
if (ctx->passed_sample_intervals == next_aggregation_sis) {
ctx->next_aggregation_sis = next_aggregation_sis +
ctx->attrs.aggr_interval / sample_interval;
kdamond_reset_aggregated(ctx); kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx); kdamond_split_regions(ctx);
if (ctx->ops.reset_aggregated) if (ctx->ops.reset_aggregated)
......
...@@ -278,7 +278,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, ...@@ -278,7 +278,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
goto fail; goto fail;
pos += parsed; pos += parsed;
scheme = damon_new_scheme(&pattern, action, &quota, &wmarks); scheme = damon_new_scheme(&pattern, action, 0, &quota,
&wmarks);
if (!scheme) if (!scheme)
goto fail; goto fail;
......
...@@ -158,6 +158,8 @@ static struct damos *damon_lru_sort_new_scheme( ...@@ -158,6 +158,8 @@ static struct damos *damon_lru_sort_new_scheme(
pattern, pattern,
/* (de)prioritize on LRU-lists */ /* (de)prioritize on LRU-lists */
action, action,
/* for each aggregation interval */
0,
/* under the quota. */ /* under the quota. */
&quota, &quota,
/* (De)activate this according to the watermarks. */ /* (De)activate this according to the watermarks. */
......
...@@ -142,6 +142,8 @@ static struct damos *damon_reclaim_new_scheme(void) ...@@ -142,6 +142,8 @@ static struct damos *damon_reclaim_new_scheme(void)
&pattern, &pattern,
/* page out those, as soon as found */ /* page out those, as soon as found */
DAMOS_PAGEOUT, DAMOS_PAGEOUT,
/* for each aggregation interval */
0,
/* under the quota. */ /* under the quota. */
&damon_reclaim_quota, &damon_reclaim_quota,
/* (De)activate this according to the watermarks. */ /* (De)activate this according to the watermarks. */
......
...@@ -1610,7 +1610,7 @@ static struct damos *damon_sysfs_mk_scheme( ...@@ -1610,7 +1610,7 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low, .low = sysfs_wmarks->low,
}; };
scheme = damon_new_scheme(&pattern, sysfs_scheme->action, &quota, scheme = damon_new_scheme(&pattern, sysfs_scheme->action, 0, &quota,
&wmarks); &wmarks);
if (!scheme) if (!scheme)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment