Commit ddef81b5 authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf: use bpf_map_kvcalloc in bpf_local_storage

Introduce new helper bpf_map_kvcalloc() for the memory allocation in
bpf_local_storage(). Then the allocation will charge the memory from the
map instead of from current, though currently they are the same thing as
it is only used in map creation path now. By charging map's memory into
the memcg from the map, it will be more clear.
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Link: https://lore.kernel.org/r/20230210154734.4416-3-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent b6c1a8af
...@@ -1886,6 +1886,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); ...@@ -1886,6 +1886,8 @@ struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node); int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags); size_t align, gfp_t flags);
#else #else
...@@ -1902,6 +1904,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) ...@@ -1902,6 +1904,12 @@ bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
return kzalloc(size, flags); return kzalloc(size, flags);
} }
static inline void *
bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
{
return kvcalloc(n, size, flags);
}
static inline void __percpu * static inline void __percpu *
bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
gfp_t flags) gfp_t flags)
......
...@@ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att ...@@ -568,8 +568,8 @@ static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_att
nbuckets = max_t(u32, 2, nbuckets); nbuckets = max_t(u32, 2, nbuckets);
smap->bucket_log = ilog2(nbuckets); smap->bucket_log = ilog2(nbuckets);
smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets, smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT); nbuckets, GFP_USER | __GFP_NOWARN);
if (!smap->buckets) { if (!smap->buckets) {
bpf_map_area_free(smap); bpf_map_area_free(smap);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -464,6 +464,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) ...@@ -464,6 +464,21 @@ void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
return ptr; return ptr;
} }
void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
gfp_t flags)
{
struct mem_cgroup *memcg, *old_memcg;
void *ptr;
memcg = bpf_map_get_memcg(map);
old_memcg = set_active_memcg(memcg);
ptr = kvcalloc(n, size, flags | __GFP_ACCOUNT);
set_active_memcg(old_memcg);
mem_cgroup_put(memcg);
return ptr;
}
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags) size_t align, gfp_t flags)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment