Commit f27a6fad authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'introduce dummy BPF STRUCT_OPS'

Hou Tao says:

====================

Hi,

Currently the test of BPF STRUCT_OPS depends on the specific bpf
implementation (e.g, tcp_congestion_ops), but it can not cover all
basic functionalities (e.g, return value handling), so introduce
a dummy BPF STRUCT_OPS for test purpose.

Instead of loading a userspace-implemeted bpf_dummy_ops map into
kernel and calling the specific function by writing to sysfs provided
by bpf_testmode.ko, only loading bpf_dummy_ops related prog into
kernel and calling these prog by bpf_prog_test_run(). The latter
is more flexible and has no dependency on extra kernel module.

Now the return value handling is supported by test_1(...) ops,
and passing multiple arguments is supported by test_2(...) ops.
If more is needed, test_x(...) ops can be added afterwards.

Comments are always welcome.
Regards,
Hou

Change Log:
v4:
 * add Acked-by tags in patch 1~4
 * patch 2: remove unncessary comments and update commit message
            accordingly
 * patch 4: remove unnecessary nr checking in dummy_ops_init_args()

v3: https://www.spinics.net/lists/bpf/msg48303.html
 * rebase on bpf-next
 * address comments for Martin, mainly include: merge patch 3 &
   patch 4 in v2, fix names of btf ctx access check helpers,
   handle CONFIG_NET, fix leak in dummy_ops_init_args(), and
   simplify bpf_dummy_init()
 * patch 4: use a loop to check args in test_dummy_multiple_args()

v2: https://www.spinics.net/lists/bpf/msg47948.html
 * rebase on bpf-next
 * add test_2(...) ops to test the passing of multiple arguments
 * a new patch (patch #2) is added to factor out ctx access helpers
 * address comments from Martin & Andrii

v1: https://www.spinics.net/lists/bpf/msg46787.html

RFC: https://www.spinics.net/lists/bpf/msg46117.html
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 36e70b9b 31122b2f
......@@ -1000,6 +1000,10 @@ bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
struct bpf_prog *prog,
const struct btf_func_model *model,
void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
......@@ -1014,6 +1018,22 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}
#ifdef CONFIG_NET
/* Define it here to avoid the use of forward declaration */
struct bpf_dummy_ops_state {
int val;
};
struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
};
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#endif
#else
static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
{
......@@ -1646,6 +1666,29 @@ bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
static inline bool bpf_tracing_ctx_access(int off, int size,
enum bpf_access_type type)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return true;
}
static inline bool bpf_tracing_btf_ctx_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (!bpf_tracing_ctx_access(off, size, type))
return false;
return btf_ctx_access(off, size, type, prog, info);
}
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
......
......@@ -93,6 +93,9 @@ const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
};
const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
#ifdef CONFIG_NET
.test_run = bpf_struct_ops_test_run,
#endif
};
static const struct btf_type *module_type;
......@@ -312,6 +315,20 @@ static int check_zero_holes(const struct btf_type *t, void *data)
return 0;
}
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
struct bpf_prog *prog,
const struct btf_func_model *model,
void *image, void *image_end)
{
u32 flags;
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
return arch_prepare_bpf_trampoline(NULL, image, image_end,
model, flags, tprogs, NULL);
}
static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
void *value, u64 flags)
{
......@@ -323,7 +340,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
struct bpf_tramp_progs *tprogs = NULL;
void *udata, *kdata;
int prog_fd, err = 0;
void *image;
void *image, *image_end;
u32 i;
if (flags)
......@@ -363,12 +380,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
udata = &uvalue->data;
kdata = &kvalue->data;
image = st_map->image;
image_end = st_map->image + PAGE_SIZE;
for_each_member(i, t, member) {
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
u32 moff;
u32 flags;
moff = btf_member_bit_offset(t, member) / 8;
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
......@@ -430,14 +447,9 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto reset_unlock;
}
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
flags = st_ops->func_models[i].ret_size > 0 ?
BPF_TRAMP_F_RET_FENTRY_RET : 0;
err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE,
&st_ops->func_models[i],
flags, tprogs, NULL);
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
&st_ops->func_models[i],
image, image_end);
if (err < 0)
goto reset_unlock;
......
......@@ -2,6 +2,9 @@
/* internal file - do not include directly */
#ifdef CONFIG_BPF_JIT
#ifdef CONFIG_NET
BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
#endif
#ifdef CONFIG_INET
#include <net/tcp.h>
BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
......
......@@ -1646,13 +1646,7 @@ static bool raw_tp_prog_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return true;
return bpf_tracing_ctx_access(off, size, type);
}
static bool tracing_prog_is_valid_access(int off, int size,
......@@ -1660,13 +1654,7 @@ static bool tracing_prog_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
return btf_ctx_access(off, size, type, prog, info);
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}
int __weak bpf_prog_test_run_tracing(struct bpf_prog *prog,
......
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BPF_SYSCALL) := test_run.o
ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_dummy_struct_ops.o
endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021. Huawei Technologies Co., Ltd
*/
#include <linux/kernel.h>
#include <linux/bpf_verifier.h>
#include <linux/bpf.h>
#include <linux/btf.h>
extern struct bpf_struct_ops bpf_bpf_dummy_ops;
/* A common type for test_N with return value in bpf_dummy_ops */
typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
struct bpf_dummy_ops_test_args {
u64 args[MAX_BPF_FUNC_ARGS];
struct bpf_dummy_ops_state state;
};
static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
{
__u32 size_in;
struct bpf_dummy_ops_test_args *args;
void __user *ctx_in;
void __user *u_state;
size_in = kattr->test.ctx_size_in;
if (size_in != sizeof(u64) * nr)
return ERR_PTR(-EINVAL);
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args)
return ERR_PTR(-ENOMEM);
ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
if (copy_from_user(args->args, ctx_in, size_in))
goto out;
/* args[0] is 0 means state argument of test_N will be NULL */
u_state = u64_to_user_ptr(args->args[0]);
if (u_state && copy_from_user(&args->state, u_state,
sizeof(args->state)))
goto out;
return args;
out:
kfree(args);
return ERR_PTR(-EFAULT);
}
static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
{
void __user *u_state;
u_state = u64_to_user_ptr(args->args[0]);
if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
return -EFAULT;
return 0;
}
static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
{
dummy_ops_test_ret_fn test = (void *)image;
struct bpf_dummy_ops_state *state = NULL;
/* state needs to be NULL if args[0] is 0 */
if (args->args[0])
state = &args->state;
return test(state, args->args[1], args->args[2],
args->args[3], args->args[4]);
}
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
const struct btf_type *func_proto;
struct bpf_dummy_ops_test_args *args;
struct bpf_tramp_progs *tprogs;
void *image = NULL;
unsigned int op_idx;
int prog_ret;
int err;
if (prog->aux->attach_btf_id != st_ops->type_id)
return -EOPNOTSUPP;
func_proto = prog->aux->attach_func_proto;
args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
if (IS_ERR(args))
return PTR_ERR(args);
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
if (!tprogs) {
err = -ENOMEM;
goto out;
}
image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!image) {
err = -ENOMEM;
goto out;
}
set_vm_flush_reset_perms(image);
op_idx = prog->expected_attach_type;
err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
&st_ops->func_models[op_idx],
image, image + PAGE_SIZE);
if (err < 0)
goto out;
set_memory_ro((long)image, 1);
set_memory_x((long)image, 1);
prog_ret = dummy_ops_call_op(image, args);
err = dummy_ops_copy_args(args);
if (err)
goto out;
if (put_user(prog_ret, &uattr->test.retval))
err = -EFAULT;
out:
kfree(args);
bpf_jit_free_exec(image);
kfree(tprogs);
return err;
}
static int bpf_dummy_init(struct btf *btf)
{
return 0;
}
static bool bpf_dummy_ops_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off,
int size, enum bpf_access_type atype,
u32 *next_btf_id)
{
const struct btf_type *state;
s32 type_id;
int err;
type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
state = btf_type_by_id(btf, type_id);
if (t != state) {
bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
return -EACCES;
}
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
if (err < 0)
return err;
return atype == BPF_READ ? err : NOT_INIT;
}
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
.is_valid_access = bpf_dummy_ops_is_valid_access,
.btf_struct_access = bpf_dummy_ops_btf_struct_access,
};
static int bpf_dummy_init_member(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata)
{
return -EOPNOTSUPP;
}
static int bpf_dummy_reg(void *kdata)
{
return -EOPNOTSUPP;
}
static void bpf_dummy_unreg(void *kdata)
{
}
struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.init_member = bpf_dummy_init_member,
.reg = bpf_dummy_reg,
.unreg = bpf_dummy_unreg,
.name = "bpf_dummy_ops",
};
......@@ -81,14 +81,7 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
if (off % size != 0)
return false;
if (!btf_ctx_access(off, size, type, prog, info))
if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
return false;
if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <test_progs.h>
#include "dummy_st_ops.skel.h"
/* Need to keep consistent with definition in include/linux/bpf.h */
struct bpf_dummy_ops_state {
int val;
};
static void test_dummy_st_ops_attach(void)
{
struct dummy_st_ops *skel;
struct bpf_link *link;
skel = dummy_st_ops__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach");
dummy_st_ops__destroy(skel);
}
static void test_dummy_init_ret_value(void)
{
__u64 args[1] = {0};
struct bpf_prog_test_run_attr attr = {
.ctx_size_in = sizeof(args),
.ctx_in = args,
};
struct dummy_st_ops *skel;
int fd, err;
skel = dummy_st_ops__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_1);
attr.prog_fd = fd;
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run");
ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
dummy_st_ops__destroy(skel);
}
static void test_dummy_init_ptr_arg(void)
{
int exp_retval = 0xbeef;
struct bpf_dummy_ops_state in_state = {
.val = exp_retval,
};
__u64 args[1] = {(unsigned long)&in_state};
struct bpf_prog_test_run_attr attr = {
.ctx_size_in = sizeof(args),
.ctx_in = args,
};
struct dummy_st_ops *skel;
int fd, err;
skel = dummy_st_ops__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_1);
attr.prog_fd = fd;
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run");
ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
ASSERT_EQ(attr.retval, exp_retval, "test_ret");
dummy_st_ops__destroy(skel);
}
static void test_dummy_multiple_args(void)
{
__u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL};
struct bpf_prog_test_run_attr attr = {
.ctx_size_in = sizeof(args),
.ctx_in = args,
};
struct dummy_st_ops *skel;
int fd, err;
size_t i;
char name[8];
skel = dummy_st_ops__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_2);
attr.prog_fd = fd;
err = bpf_prog_test_run_xattr(&attr);
ASSERT_OK(err, "test_run");
for (i = 0; i < ARRAY_SIZE(args); i++) {
snprintf(name, sizeof(name), "arg %zu", i);
ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
}
dummy_st_ops__destroy(skel);
}
void test_dummy_st_ops(void)
{
if (test__start_subtest("dummy_st_ops_attach"))
test_dummy_st_ops_attach();
if (test__start_subtest("dummy_init_ret_value"))
test_dummy_init_ret_value();
if (test__start_subtest("dummy_init_ptr_arg"))
test_dummy_init_ptr_arg();
if (test__start_subtest("dummy_multiple_args"))
test_dummy_multiple_args();
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_dummy_ops_state {
int val;
} __attribute__((preserve_access_index));
struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *state);
int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4);
};
char _license[] SEC("license") = "GPL";
SEC("struct_ops/test_1")
int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
{
int ret;
if (!state)
return 0xf2f3f4f5;
ret = state->val;
state->val = 0x5a;
return ret;
}
__u64 test_2_args[5];
SEC("struct_ops/test_2")
int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4)
{
test_2_args[0] = (unsigned long)state;
test_2_args[1] = a1;
test_2_args[2] = a2;
test_2_args[3] = a3;
test_2_args[4] = a4;
return 0;
}
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment