Commit 0db15298 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: flatten struct cg_proto

There are no more external users of struct cg_proto, flatten the
structure into struct mem_cgroup.

Since using those struct members doesn't stand out as much anymore,
add cgroup2 static branches to make it clearer which code is legacy.
Suggested-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d886f4e4
...@@ -85,12 +85,6 @@ enum mem_cgroup_events_target { ...@@ -85,12 +85,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS, MEM_CGROUP_NTARGETS,
}; };
struct cg_proto {
struct page_counter memory_allocated; /* Current allocated memory. */
int memory_pressure;
bool active;
};
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
struct mem_cgroup_stat_cpu { struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS]; long count[MEM_CGROUP_STAT_NSTATS];
...@@ -169,8 +163,11 @@ struct mem_cgroup { ...@@ -169,8 +163,11 @@ struct mem_cgroup {
/* Accounted resources */ /* Accounted resources */
struct page_counter memory; struct page_counter memory;
/* Legacy consumer-oriented counters */
struct page_counter memsw; struct page_counter memsw;
struct page_counter kmem; struct page_counter kmem;
struct page_counter tcpmem;
/* Normal memory consumption range */ /* Normal memory consumption range */
unsigned long low; unsigned long low;
...@@ -236,7 +233,8 @@ struct mem_cgroup { ...@@ -236,7 +233,8 @@ struct mem_cgroup {
unsigned long socket_pressure; unsigned long socket_pressure;
/* Legacy tcp memory accounting */ /* Legacy tcp memory accounting */
struct cg_proto tcp_mem; bool tcpmem_active;
int tcpmem_pressure;
#ifndef CONFIG_SLOB #ifndef CONFIG_SLOB
/* Index in the kmem_cache->memcg_params.memcg_caches array */ /* Index in the kmem_cache->memcg_params.memcg_caches array */
...@@ -715,7 +713,7 @@ extern struct static_key_false memcg_sockets_enabled_key; ...@@ -715,7 +713,7 @@ extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{ {
if (memcg->tcp_mem.memory_pressure) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true; return true;
do { do {
if (time_before(jiffies, memcg->socket_pressure)) if (time_before(jiffies, memcg->socket_pressure))
......
...@@ -2843,7 +2843,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, ...@@ -2843,7 +2843,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
counter = &memcg->kmem; counter = &memcg->kmem;
break; break;
case _TCP: case _TCP:
counter = &memcg->tcp_mem.memory_allocated; counter = &memcg->tcpmem;
break; break;
default: default:
BUG(); BUG();
...@@ -3028,11 +3028,11 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) ...@@ -3028,11 +3028,11 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
mutex_lock(&memcg_limit_mutex); mutex_lock(&memcg_limit_mutex);
ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, limit); ret = page_counter_limit(&memcg->tcpmem, limit);
if (ret) if (ret)
goto out; goto out;
if (!memcg->tcp_mem.active) { if (!memcg->tcpmem_active) {
/* /*
* The active flag needs to be written after the static_key * The active flag needs to be written after the static_key
* update. This is what guarantees that the socket activation * update. This is what guarantees that the socket activation
...@@ -3050,7 +3050,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit) ...@@ -3050,7 +3050,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
* patched in yet. * patched in yet.
*/ */
static_branch_inc(&memcg_sockets_enabled_key); static_branch_inc(&memcg_sockets_enabled_key);
memcg->tcp_mem.active = true; memcg->tcpmem_active = true;
} }
out: out:
mutex_unlock(&memcg_limit_mutex); mutex_unlock(&memcg_limit_mutex);
...@@ -3119,7 +3119,7 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, ...@@ -3119,7 +3119,7 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
counter = &memcg->kmem; counter = &memcg->kmem;
break; break;
case _TCP: case _TCP:
counter = &memcg->tcp_mem.memory_allocated; counter = &memcg->tcpmem;
break; break;
default: default:
BUG(); BUG();
...@@ -4295,8 +4295,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) ...@@ -4295,8 +4295,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw); page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem); page_counter_init(&memcg->kmem, &parent->kmem);
page_counter_init(&memcg->tcp_mem.memory_allocated, page_counter_init(&memcg->tcpmem, &parent->tcpmem);
&parent->tcp_mem.memory_allocated);
/* /*
* No need to take a reference to the parent because cgroup * No need to take a reference to the parent because cgroup
...@@ -4308,7 +4307,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) ...@@ -4308,7 +4307,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
memcg->soft_limit = PAGE_COUNTER_MAX; memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->kmem, NULL);
page_counter_init(&memcg->tcp_mem.memory_allocated, NULL); page_counter_init(&memcg->tcpmem, NULL);
/* /*
* Deeper hierachy with use_hierarchy == false doesn't make * Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this * much sense so let cgroup subsystem know about this
...@@ -4374,7 +4373,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) ...@@ -4374,7 +4373,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_dec(&memcg_sockets_enabled_key); static_branch_dec(&memcg_sockets_enabled_key);
if (memcg->tcp_mem.active) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
static_branch_dec(&memcg_sockets_enabled_key); static_branch_dec(&memcg_sockets_enabled_key);
memcg_free_kmem(memcg); memcg_free_kmem(memcg);
...@@ -5601,7 +5600,7 @@ void sock_update_memcg(struct sock *sk) ...@@ -5601,7 +5600,7 @@ void sock_update_memcg(struct sock *sk)
memcg = mem_cgroup_from_task(current); memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup) if (memcg == root_mem_cgroup)
goto out; goto out;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
goto out; goto out;
if (css_tryget_online(&memcg->css)) if (css_tryget_online(&memcg->css))
sk->sk_memcg = memcg; sk->sk_memcg = memcg;
...@@ -5629,15 +5628,14 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -5629,15 +5628,14 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
gfp_t gfp_mask = GFP_KERNEL; gfp_t gfp_mask = GFP_KERNEL;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
struct page_counter *counter; struct page_counter *fail;
if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated, if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
nr_pages, &counter)) { memcg->tcpmem_pressure = 0;
memcg->tcp_mem.memory_pressure = 0;
return true; return true;
} }
page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages); page_counter_charge(&memcg->tcpmem, nr_pages);
memcg->tcp_mem.memory_pressure = 1; memcg->tcpmem_pressure = 1;
return false; return false;
} }
...@@ -5660,8 +5658,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -5660,8 +5658,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{ {
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) { if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
page_counter_uncharge(&memcg->tcp_mem.memory_allocated, page_counter_uncharge(&memcg->tcpmem, nr_pages);
nr_pages);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment