Commit 18250e78 authored by SeongJae Park's avatar SeongJae Park Committed by Andrew Morton

mm/damon/paddr: support DAMOS filters

Implement support of the DAMOS filters in the physical address space
monitoring operations set, for all DAMOS actions that it supports
including 'pageout', 'lru_prio', and 'lru_deprio'.

Link: https://lkml.kernel.org/r/20221205230830.144349-3-sj@kernel.orgSigned-off-by: default avatarSeongJae Park <sj@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 98def236
......@@ -202,7 +202,47 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
static unsigned long damon_pa_pageout(struct damon_region *r)
static bool __damos_pa_filter_out(struct damos_filter *filter,
struct page *page)
{
bool matched = false;
struct mem_cgroup *memcg;
switch (filter->type) {
case DAMOS_FILTER_TYPE_ANON:
matched = PageAnon(page);
break;
case DAMOS_FILTER_TYPE_MEMCG:
rcu_read_lock();
memcg = page_memcg_check(page);
if (!memcg)
matched = false;
else
matched = filter->memcg_id == mem_cgroup_id(memcg);
rcu_read_unlock();
break;
default:
break;
}
return matched == filter->matching;
}
/*
* damos_pa_filter_out - Return true if the page should be filtered out.
*/
static bool damos_pa_filter_out(struct damos *scheme, struct page *page)
{
struct damos_filter *filter;
damos_for_each_filter(filter, scheme) {
if (__damos_pa_filter_out(filter, page))
return true;
}
return false;
}
static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
{
unsigned long addr, applied;
LIST_HEAD(page_list);
......@@ -213,6 +253,11 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
if (!page)
continue;
if (damos_pa_filter_out(s, page)) {
put_page(page);
continue;
}
ClearPageReferenced(page);
test_and_clear_page_young(page);
if (isolate_lru_page(page)) {
......@@ -232,7 +277,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
struct damon_region *r, bool mark_accessed)
struct damon_region *r, struct damos *s, bool mark_accessed)
{
unsigned long addr, applied = 0;
......@@ -241,6 +286,12 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (!page)
continue;
if (damos_pa_filter_out(s, page)) {
put_page(page);
continue;
}
if (mark_accessed)
mark_page_accessed(page);
else
......@@ -251,14 +302,16 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
return applied * PAGE_SIZE;
}
static unsigned long damon_pa_mark_accessed(struct damon_region *r)
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
struct damos *s)
{
return damon_pa_mark_accessed_or_deactivate(r, true);
return damon_pa_mark_accessed_or_deactivate(r, s, true);
}
static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
struct damos *s)
{
return damon_pa_mark_accessed_or_deactivate(r, false);
return damon_pa_mark_accessed_or_deactivate(r, s, false);
}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
......@@ -267,11 +320,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
return damon_pa_pageout(r);
return damon_pa_pageout(r, scheme);
case DAMOS_LRU_PRIO:
return damon_pa_mark_accessed(r);
return damon_pa_mark_accessed(r, scheme);
case DAMOS_LRU_DEPRIO:
return damon_pa_deactivate_pages(r);
return damon_pa_deactivate_pages(r, scheme);
case DAMOS_STAT:
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment