Commit 3a61c7c5 authored by Roman Gushchin's avatar Roman Gushchin Committed by Alexei Starovoitov

bpf: Memcg-based memory accounting for cgroup storage maps

Account memory used by cgroup storage maps including metadata
structures.

Account the percpu memory for the percpu flavor of cgroup storage.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20201201215900.3569844-11-guro@fb.com
parent e88cc05b
...@@ -164,10 +164,10 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key, ...@@ -164,10 +164,10 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
return 0; return 0;
} }
new = kmalloc_node(sizeof(struct bpf_storage_buffer) + new = bpf_map_kmalloc_node(map, sizeof(struct bpf_storage_buffer) +
map->value_size, map->value_size,
__GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN, __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
map->numa_node); map->numa_node);
if (!new) if (!new)
return -ENOMEM; return -ENOMEM;
...@@ -313,7 +313,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) ...@@ -313,7 +313,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
return ERR_PTR(ret); return ERR_PTR(ret);
map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
__GFP_ZERO | GFP_USER, numa_node); __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
if (!map) { if (!map) {
bpf_map_charge_finish(&mem); bpf_map_charge_finish(&mem);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -496,9 +496,9 @@ static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) ...@@ -496,9 +496,9 @@ static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
enum bpf_cgroup_storage_type stype) enum bpf_cgroup_storage_type stype)
{ {
const gfp_t gfp = __GFP_ZERO | GFP_USER;
struct bpf_cgroup_storage *storage; struct bpf_cgroup_storage *storage;
struct bpf_map *map; struct bpf_map *map;
gfp_t flags;
size_t size; size_t size;
u32 pages; u32 pages;
...@@ -511,20 +511,19 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, ...@@ -511,20 +511,19 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
if (bpf_map_charge_memlock(map, pages)) if (bpf_map_charge_memlock(map, pages))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
__GFP_ZERO | GFP_USER, map->numa_node); gfp, map->numa_node);
if (!storage) if (!storage)
goto enomem; goto enomem;
flags = __GFP_ZERO | GFP_USER;
if (stype == BPF_CGROUP_STORAGE_SHARED) { if (stype == BPF_CGROUP_STORAGE_SHARED) {
storage->buf = kmalloc_node(size, flags, map->numa_node); storage->buf = bpf_map_kmalloc_node(map, size, gfp,
map->numa_node);
if (!storage->buf) if (!storage->buf)
goto enomem; goto enomem;
check_and_init_map_lock(map, storage->buf->data); check_and_init_map_lock(map, storage->buf->data);
} else { } else {
storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
if (!storage->percpu_buf) if (!storage->percpu_buf)
goto enomem; goto enomem;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment