Commit f294b37e authored by Roman Gushchin's avatar Roman Gushchin Committed by Daniel Borkmann

bpf: rework cgroup storage pointer passing

To simplify the following introduction of per-cpu cgroup storage,
let's rework a bit a mechanism of passing a pointer to a cgroup
storage into the bpf_get_local_storage(). Let's save a pointer
to the corresponding bpf_cgroup_storage structure, instead of
a pointer to the actual buffer.

It will help us to handle per-cpu storage later, which has
a different way of accessing to the actual data.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 8bad74f9
...@@ -23,7 +23,8 @@ struct bpf_cgroup_storage; ...@@ -23,7 +23,8 @@ struct bpf_cgroup_storage;
extern struct static_key_false cgroup_bpf_enabled_key; extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#define for_each_cgroup_storage_type(stype) \ #define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
...@@ -115,15 +116,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage ...@@ -115,15 +116,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
*storage[MAX_BPF_CGROUP_STORAGE_TYPE]) *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{ {
enum bpf_cgroup_storage_type stype; enum bpf_cgroup_storage_type stype;
struct bpf_storage_buffer *buf;
for_each_cgroup_storage_type(stype) {
if (!storage[stype])
continue;
buf = READ_ONCE(storage[stype]->buf); for_each_cgroup_storage_type(stype)
this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]); this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
}
} }
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
......
...@@ -195,7 +195,8 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { ...@@ -195,7 +195,8 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
}; };
#ifdef CONFIG_CGROUP_BPF #ifdef CONFIG_CGROUP_BPF
DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
{ {
...@@ -204,8 +205,11 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) ...@@ -204,8 +205,11 @@ BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
* verifier checks that its value is correct. * verifier checks that its value is correct.
*/ */
enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
struct bpf_cgroup_storage *storage;
return (unsigned long) this_cpu_read(bpf_cgroup_storage[stype]); storage = this_cpu_read(bpf_cgroup_storage[stype]);
return (unsigned long)&READ_ONCE(storage->buf)->data[0];
} }
const struct bpf_func_proto bpf_get_local_storage_proto = { const struct bpf_func_proto bpf_get_local_storage_proto = {
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/slab.h> #include <linux/slab.h>
DEFINE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); DEFINE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#ifdef CONFIG_CGROUP_BPF #ifdef CONFIG_CGROUP_BPF
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment