Commit b37a5306 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

bpf: remove old offload/analyzer

Thanks to the ability to load a program for a specific device,
running verifier twice is no longer needed.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c6c580d7
...@@ -152,9 +152,7 @@ struct bpf_verifier_env { ...@@ -152,9 +152,7 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */ bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
void *analyzer_priv; /* pointer to external analyzer's private data */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */ u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */ u32 id_gen; /* used to generate unique reg IDs */
...@@ -179,7 +177,4 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) ...@@ -179,7 +177,4 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
} }
#endif #endif
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv);
#endif /* _LINUX_BPF_VERIFIER_H */ #endif /* _LINUX_BPF_VERIFIER_H */
...@@ -949,9 +949,6 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, ...@@ -949,9 +949,6 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
*/ */
*reg_type = info.reg_type; *reg_type = info.reg_type;
if (env->analyzer_ops)
return 0;
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
/* remember the offset of last byte accessed in ctx */ /* remember the offset of last byte accessed in ctx */
if (env->prog->aux->max_ctx_offset < off + size) if (env->prog->aux->max_ctx_offset < off + size)
...@@ -3736,9 +3733,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -3736,9 +3733,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx) int insn_idx, int prev_insn_idx)
{ {
if (env->analyzer_ops && env->analyzer_ops->insn_hook)
return env->analyzer_ops->insn_hook(env, insn_idx,
prev_insn_idx);
if (env->dev_ops && env->dev_ops->insn_hook) if (env->dev_ops && env->dev_ops->insn_hook)
return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
...@@ -4601,72 +4595,3 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ...@@ -4601,72 +4595,3 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
kfree(env); kfree(env);
return ret; return ret;
} }
static const struct bpf_verifier_ops * const bpf_analyzer_ops[] = {
#ifdef CONFIG_NET
[BPF_PROG_TYPE_XDP] = &xdp_analyzer_ops,
[BPF_PROG_TYPE_SCHED_CLS] = &tc_cls_act_analyzer_ops,
#endif
};
int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
void *priv)
{
struct bpf_verifier_env *env;
int ret;
if (prog->type >= ARRAY_SIZE(bpf_analyzer_ops) ||
!bpf_analyzer_ops[prog->type])
return -EOPNOTSUPP;
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
prog->len);
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
env->prog = prog;
env->ops = bpf_analyzer_ops[env->prog->type];
env->analyzer_ops = ops;
env->analyzer_priv = priv;
/* grab the mutex to protect few globals used by verifier */
mutex_lock(&bpf_verifier_lock);
env->strict_alignment = false;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
env->explored_states = kcalloc(env->prog->len,
sizeof(struct bpf_verifier_state_list *),
GFP_KERNEL);
ret = -ENOMEM;
if (!env->explored_states)
goto skip_full_check;
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
env->cur_state = NULL;
}
skip_full_check:
while (!pop_stack(env, NULL, NULL));
free_states(env);
mutex_unlock(&bpf_verifier_lock);
vfree(env->insn_aux_data);
err_free_env:
kfree(env);
return ret;
}
EXPORT_SYMBOL_GPL(bpf_analyzer);
...@@ -3777,25 +3777,6 @@ static bool tc_cls_act_is_valid_access(int off, int size, ...@@ -3777,25 +3777,6 @@ static bool tc_cls_act_is_valid_access(int off, int size,
return bpf_skb_is_valid_access(off, size, type, info); return bpf_skb_is_valid_access(off, size, type, info);
} }
static bool
tc_cls_act_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct sk_buff, len):
return true;
case offsetof(struct sk_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
static bool __is_valid_xdp_access(int off, int size) static bool __is_valid_xdp_access(int off, int size)
{ {
if (off < 0 || off >= sizeof(struct xdp_md)) if (off < 0 || off >= sizeof(struct xdp_md))
...@@ -3830,21 +3811,6 @@ static bool xdp_is_valid_access(int off, int size, ...@@ -3830,21 +3811,6 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size); return __is_valid_xdp_access(off, size);
} }
static bool xdp_is_valid_access_analyzer(int off, int size,
enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
switch (off) {
case offsetof(struct xdp_buff, data):
info->reg_type = PTR_TO_PACKET;
return true;
case offsetof(struct xdp_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
return true;
}
return false;
}
void bpf_warn_invalid_xdp_action(u32 act) void bpf_warn_invalid_xdp_action(u32 act)
{ {
const u32 act_max = XDP_REDIRECT; const u32 act_max = XDP_REDIRECT;
...@@ -4516,10 +4482,6 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = { ...@@ -4516,10 +4482,6 @@ const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.gen_prologue = tc_cls_act_prologue, .gen_prologue = tc_cls_act_prologue,
}; };
const struct bpf_verifier_ops tc_cls_act_analyzer_ops = {
.is_valid_access = tc_cls_act_is_valid_access_analyzer,
};
const struct bpf_prog_ops tc_cls_act_prog_ops = { const struct bpf_prog_ops tc_cls_act_prog_ops = {
.test_run = bpf_prog_test_run_skb, .test_run = bpf_prog_test_run_skb,
}; };
...@@ -4530,10 +4492,6 @@ const struct bpf_verifier_ops xdp_verifier_ops = { ...@@ -4530,10 +4492,6 @@ const struct bpf_verifier_ops xdp_verifier_ops = {
.convert_ctx_access = xdp_convert_ctx_access, .convert_ctx_access = xdp_convert_ctx_access,
}; };
const struct bpf_verifier_ops xdp_analyzer_ops = {
.is_valid_access = xdp_is_valid_access_analyzer,
};
const struct bpf_prog_ops xdp_prog_ops = { const struct bpf_prog_ops xdp_prog_ops = {
.test_run = bpf_prog_test_run_xdp, .test_run = bpf_prog_test_run_xdp,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment