Commit cae1927c authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

bpf: offload: allow netdev to disappear while verifier is running

To allow verifier instruction callbacks without any extra locking
NETDEV_UNREGISTER notification would wait on a waitqueue for verifier
to finish.  This design decision was made when rtnl lock was providing
all the locking.  Use the read/write lock instead and remove the
workqueue.

Verifier will now call into the offload code, so dev_ops are moved
to offload structure.  Since verifier calls are all under
bpf_prog_is_dev_bound() we no longer need static inline implementations
to please builds with CONFIG_NET=n.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 9a18eedb
......@@ -238,7 +238,7 @@ struct nfp_bpf_vnic {
int nfp_bpf_jit(struct nfp_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
......
......@@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return 0;
}
const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
......@@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
return 0;
}
static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = {
static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
.insn_hook = nsim_bpf_verify_insn,
};
......
......@@ -17,6 +17,7 @@
#include <linux/numa.h>
#include <linux/wait.h>
struct bpf_verifier_env;
struct perf_event;
struct bpf_prog;
struct bpf_map;
......@@ -184,14 +185,18 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size);
};
struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
};
struct bpf_dev_offload {
struct bpf_prog *prog;
struct net_device *netdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
bool verifier_running;
wait_queue_head_t verifier_done;
const struct bpf_prog_offload_ops *dev_ops;
};
struct bpf_prog_aux {
......
......@@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
return log->len_used >= log->len_total - 1;
}
struct bpf_verifier_env;
struct bpf_ext_analyzer_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
};
#define BPF_MAX_SUBPROGS 256
/* single container for all structs
......@@ -185,7 +179,6 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
......@@ -206,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur->frame[cur->curframe]->regs;
}
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else
static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
return -EOPNOTSUPP;
}
#endif
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
#endif /* _LINUX_BPF_VERIFIER_H */
......@@ -804,7 +804,7 @@ enum bpf_netdev_command {
BPF_OFFLOAD_DESTROY,
};
struct bpf_ext_analyzer_ops;
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct netdev_bpf {
......@@ -826,7 +826,7 @@ struct netdev_bpf {
/* BPF_OFFLOAD_VERIFIER_PREP */
struct {
struct bpf_prog *prog;
const struct bpf_ext_analyzer_ops *ops; /* callee set */
const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct {
......
......@@ -44,7 +44,6 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return -ENOMEM;
offload->prog = prog;
init_waitqueue_head(&offload->verifier_done);
offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
attr->prog_ifindex);
......@@ -97,15 +96,28 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
if (err)
goto exit_unlock;
env->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true;
env->prog->aux->offload->verifier_running = true;
exit_unlock:
rtnl_unlock();
return err;
}
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
struct bpf_dev_offload *offload;
int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload->netdev)
ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
up_read(&bpf_devs_lock);
return ret;
}
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
......@@ -117,9 +129,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
data.offload.prog = prog;
if (offload->verifier_running)
wait_event(offload->verifier_done, !offload->verifier_running);
if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
......@@ -132,9 +141,6 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock();
down_write(&bpf_devs_lock);
__bpf_prog_offload_destroy(prog);
......@@ -146,15 +152,11 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog)
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
int ret;
data.offload.prog = prog;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock();
......
......@@ -4438,15 +4438,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return 0;
}
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
if (env->dev_ops && env->dev_ops->insn_hook)
return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
return 0;
}
static int do_check(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *state;
......@@ -4531,9 +4522,12 @@ static int do_check(struct bpf_verifier_env *env)
print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
}
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
if (err)
return err;
if (bpf_prog_is_dev_bound(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, insn_idx,
prev_insn_idx);
if (err)
return err;
}
regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true;
......@@ -5463,7 +5457,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
if (env->prog->aux->offload) {
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env);
if (ret)
goto err_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment