Commit 0157edc8 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'device-ops-as-cb'

Quentin Monnet says:

====================
For passing device functions for offloaded eBPF programs, there used to
be no place where to store the pointer without making the non-offloaded
programs pay a memory price.

As a consequence, three functions were called with ndo_bpf() through
specific commands. Now that we have struct bpf_offload_dev, and since none
of those operations rely on RTNL, we can turn these three commands into
hooks inside the struct bpf_prog_offload_ops, and pass them as part of
bpf_offload_dev_create().

This patch set changes the offload architecture to do so, and brings the
relevant changes to the nfp and netdevsim drivers.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c8123ead 16a8cb5c
......@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
bpf->bpf_dev = bpf_offload_dev_create();
bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
......
......@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
struct nfp_app;
......
......@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct nfp_bpf_neutral_map *record;
int err;
/* Map record paths are entered via ndo, update side is protected. */
ASSERT_RTNL();
/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
......@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
bool freed = false;
int i;
ASSERT_RTNL();
for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL;
......@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
static int
nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog;
int ret;
......@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
......@@ -219,8 +212,9 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
return ret;
}
static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
static int nfp_bpf_translate(struct bpf_prog *prog)
{
struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_instr;
int err;
......@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
static void nfp_bpf_destroy(struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
return 0;
}
/* Atomic engine requires values to be in big endian, we need to byte swap
......@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
switch (bpf->command) {
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_bpf_verifier_prep(app, nn, bpf);
case BPF_OFFLOAD_TRANSLATE:
return nfp_bpf_translate(nn, bpf->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
......@@ -601,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
return 0;
}
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
.prepare = nfp_bpf_verifier_prep,
.translate = nfp_bpf_translate,
.destroy = nfp_bpf_destroy,
};
......@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
......@@ -745,7 +745,7 @@ nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
goto continue_subprog;
}
static int nfp_bpf_finalize(struct bpf_verifier_env *env)
int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
struct nfp_prog *nfp_prog;
......@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
};
......@@ -91,11 +91,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
.insn_hook = nsim_bpf_verify_insn,
.finalize = nsim_bpf_finalize,
};
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
return ns->xdp_hw.prog;
......@@ -263,6 +258,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
return 0;
}
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);
if (!ns->bpf_bind_accept)
return -EOPNOTSUPP;
return nsim_bpf_create_prog(ns, prog);
}
static int nsim_bpf_translate(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
state->state = "xlated";
return 0;
}
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
......@@ -275,6 +288,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
kfree(state);
}
static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
.insn_hook = nsim_bpf_verify_insn,
.finalize = nsim_bpf_finalize,
.prepare = nsim_bpf_verifier_prep,
.translate = nsim_bpf_translate,
.destroy = nsim_bpf_destroy_prog,
};
static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
if (bpf->prog && bpf->prog->aux->offload) {
......@@ -539,24 +560,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
ASSERT_RTNL();
switch (bpf->command) {
case BPF_OFFLOAD_VERIFIER_PREP:
if (!ns->bpf_bind_accept)
return -EOPNOTSUPP;
err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
if (err)
return err;
bpf->verifier.ops = &nsim_bpf_analyzer_ops;
return 0;
case BPF_OFFLOAD_TRANSLATE:
state = bpf->offload.prog->aux->offload->dev_priv;
state->state = "xlated";
return 0;
case BPF_OFFLOAD_DESTROY:
nsim_bpf_destroy_prog(bpf->offload.prog);
return 0;
case XDP_QUERY_PROG:
return xdp_attachment_query(&ns->xdp, bpf);
case XDP_QUERY_PROG_HW:
......@@ -599,7 +602,7 @@ int nsim_bpf_init(struct netdevsim *ns)
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM;
ns->sdev->bpf_dev = bpf_offload_dev_create();
ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err)
return err;
......
......@@ -268,15 +268,18 @@ struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
};
struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
const struct bpf_prog_offload_ops *dev_ops;
void *jited_image;
u32 jited_len;
};
......@@ -692,7 +695,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
struct bpf_offload_dev *bpf_offload_dev_create(void);
struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
......
......@@ -245,7 +245,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur_func(env)->regs;
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
......
......@@ -863,9 +863,6 @@ enum bpf_netdev_command {
XDP_QUERY_PROG,
XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_QUERY_XSK_UMEM,
......@@ -891,15 +888,6 @@ struct netdev_bpf {
/* flags with which program was installed */
u32 prog_flags;
};
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
struct bpf_prog *prog;
} offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
......
......@@ -33,6 +33,7 @@
static DECLARE_RWSEM(bpf_devs_lock);
struct bpf_offload_dev {
const struct bpf_prog_offload_ops *ops;
struct list_head netdevs;
};
......@@ -106,6 +107,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
err = -EINVAL;
goto err_unlock;
}
offload->offdev = ondev->offdev;
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &ondev->progs);
dev_put(offload->netdev);
......@@ -121,40 +123,19 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return err;
}
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
struct netdev_bpf *data)
int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
struct net_device *netdev;
ASSERT_RTNL();
if (!offload)
return -ENODEV;
netdev = offload->netdev;
data->command = cmd;
return netdev->netdev_ops->ndo_bpf(netdev, data);
}
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
struct netdev_bpf data = {};
int err;
data.verifier.prog = env->prog;
struct bpf_prog_offload *offload;
int ret = -ENODEV;
rtnl_lock();
err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
if (err)
goto exit_unlock;
down_read(&bpf_devs_lock);
offload = prog->aux->offload;
if (offload)
ret = offload->offdev->ops->prepare(prog);
offload->dev_state = !ret;
up_read(&bpf_devs_lock);
env->prog->aux->offload->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true;
exit_unlock:
rtnl_unlock();
return err;
return ret;
}
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
......@@ -166,7 +147,8 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload)
ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
ret = offload->offdev->ops->insn_hook(env, insn_idx,
prev_insn_idx);
up_read(&bpf_devs_lock);
return ret;
......@@ -180,8 +162,8 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload) {
if (offload->dev_ops->finalize)
ret = offload->dev_ops->finalize(env);
if (offload->offdev->ops->finalize)
ret = offload->offdev->ops->finalize(env);
else
ret = 0;
}
......@@ -193,12 +175,9 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
data.offload.prog = prog;
if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
offload->offdev->ops->destroy(prog);
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
bpf_prog_free_id(prog, true);
......@@ -210,24 +189,22 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
rtnl_lock();
down_write(&bpf_devs_lock);
if (prog->aux->offload)
__bpf_prog_offload_destroy(prog);
up_write(&bpf_devs_lock);
rtnl_unlock();
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
struct netdev_bpf data = {};
int ret;
data.offload.prog = prog;
struct bpf_prog_offload *offload;
int ret = -ENODEV;
rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock();
down_read(&bpf_devs_lock);
offload = prog->aux->offload;
if (offload)
ret = offload->offdev->ops->translate(prog);
up_read(&bpf_devs_lock);
return ret;
}
......@@ -655,7 +632,8 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
struct bpf_offload_dev *bpf_offload_dev_create(void)
struct bpf_offload_dev *
bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
{
struct bpf_offload_dev *offdev;
int err;
......@@ -673,6 +651,7 @@ struct bpf_offload_dev *bpf_offload_dev_create(void)
if (!offdev)
return ERR_PTR(-ENOMEM);
offdev->ops = ops;
INIT_LIST_HEAD(&offdev->netdevs);
return offdev;
......
......@@ -6368,7 +6368,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
goto skip_full_check;
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env);
ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
goto skip_full_check;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment