Commit d1663a90 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Linus Torvalds

mm/memcg: move cgroup high memory limit setting into struct page_counter

High memory limit is currently recorded directly in struct mem_cgroup.
We are about to add a high limit for swap, move the field to struct
page_counter and add some helpers.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Chris Down <chris@chrisdown.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200527195846.102707-4-kuba@kernel.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ff144e69
...@@ -215,9 +215,6 @@ struct mem_cgroup { ...@@ -215,9 +215,6 @@ struct mem_cgroup {
struct page_counter kmem; struct page_counter kmem;
struct page_counter tcpmem; struct page_counter tcpmem;
/* Upper bound of normal memory consumption range */
unsigned long high;
/* Range enforcement for interrupt charges */ /* Range enforcement for interrupt charges */
struct work_struct high_work; struct work_struct high_work;
......
...@@ -10,6 +10,7 @@ struct page_counter { ...@@ -10,6 +10,7 @@ struct page_counter {
atomic_long_t usage; atomic_long_t usage;
unsigned long min; unsigned long min;
unsigned long low; unsigned long low;
unsigned long high;
unsigned long max; unsigned long max;
struct page_counter *parent; struct page_counter *parent;
...@@ -55,6 +56,13 @@ bool page_counter_try_charge(struct page_counter *counter, ...@@ -55,6 +56,13 @@ bool page_counter_try_charge(struct page_counter *counter,
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
static inline void page_counter_set_high(struct page_counter *counter,
unsigned long nr_pages)
{
WRITE_ONCE(counter->high, nr_pages);
}
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
int page_counter_memparse(const char *buf, const char *max, int page_counter_memparse(const char *buf, const char *max,
unsigned long *nr_pages); unsigned long *nr_pages);
......
...@@ -2252,7 +2252,8 @@ static void reclaim_high(struct mem_cgroup *memcg, ...@@ -2252,7 +2252,8 @@ static void reclaim_high(struct mem_cgroup *memcg,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
do { do {
if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high)) if (page_counter_read(&memcg->memory) <=
READ_ONCE(memcg->memory.high))
continue; continue;
memcg_memory_event(memcg, MEMCG_HIGH); memcg_memory_event(memcg, MEMCG_HIGH);
try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
...@@ -2345,7 +2346,7 @@ static u64 mem_find_max_overage(struct mem_cgroup *memcg) ...@@ -2345,7 +2346,7 @@ static u64 mem_find_max_overage(struct mem_cgroup *memcg)
do { do {
overage = calculate_overage(page_counter_read(&memcg->memory), overage = calculate_overage(page_counter_read(&memcg->memory),
READ_ONCE(memcg->high)); READ_ONCE(memcg->memory.high));
max_overage = max(overage, max_overage); max_overage = max(overage, max_overage);
} while ((memcg = parent_mem_cgroup(memcg)) && } while ((memcg = parent_mem_cgroup(memcg)) &&
!mem_cgroup_is_root(memcg)); !mem_cgroup_is_root(memcg));
...@@ -2604,7 +2605,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -2604,7 +2605,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
* reclaim, the cost of mismatch is negligible. * reclaim, the cost of mismatch is negligible.
*/ */
do { do {
if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) { if (page_counter_read(&memcg->memory) >
READ_ONCE(memcg->memory.high)) {
/* Don't bother a random interrupted task */ /* Don't bother a random interrupted task */
if (in_interrupt()) { if (in_interrupt()) {
schedule_work(&memcg->high_work); schedule_work(&memcg->high_work);
...@@ -4347,7 +4349,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, ...@@ -4347,7 +4349,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
while ((parent = parent_mem_cgroup(memcg))) { while ((parent = parent_mem_cgroup(memcg))) {
unsigned long ceiling = min(READ_ONCE(memcg->memory.max), unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
READ_ONCE(memcg->high)); READ_ONCE(memcg->memory.high));
unsigned long used = page_counter_read(&memcg->memory); unsigned long used = page_counter_read(&memcg->memory);
*pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
...@@ -5072,7 +5074,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ...@@ -5072,7 +5074,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(memcg)) if (IS_ERR(memcg))
return ERR_CAST(memcg); return ERR_CAST(memcg);
WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
if (parent) { if (parent) {
memcg->swappiness = mem_cgroup_swappiness(parent); memcg->swappiness = mem_cgroup_swappiness(parent);
...@@ -5225,7 +5227,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) ...@@ -5225,7 +5227,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
page_counter_set_min(&memcg->memory, 0); page_counter_set_min(&memcg->memory, 0);
page_counter_set_low(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0);
WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
memcg_wb_domain_size_changed(memcg); memcg_wb_domain_size_changed(memcg);
} }
...@@ -6024,7 +6026,8 @@ static ssize_t memory_low_write(struct kernfs_open_file *of, ...@@ -6024,7 +6026,8 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
static int memory_high_show(struct seq_file *m, void *v) static int memory_high_show(struct seq_file *m, void *v)
{ {
return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high)); return seq_puts_memcg_tunable(m,
READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
} }
static ssize_t memory_high_write(struct kernfs_open_file *of, static ssize_t memory_high_write(struct kernfs_open_file *of,
...@@ -6041,7 +6044,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, ...@@ -6041,7 +6044,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
if (err) if (err)
return err; return err;
WRITE_ONCE(memcg->high, high); page_counter_set_high(&memcg->memory, high);
for (;;) { for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory); unsigned long nr_pages = page_counter_read(&memcg->memory);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment