Commit 3990ed4c authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: Stop caching subprog index in the bpf_pseudo_func insn

This patch is to fix an out-of-bound access issue when jit-ing the
bpf_pseudo_func insn (i.e. ld_imm64 with src_reg == BPF_PSEUDO_FUNC)

In jit_subprog(), it currently reuses the subprog index cached in
insn[1].imm.  This subprog index is an index into a few array related
to subprogs.  For example, in jit_subprog(), it is an index to the newly
allocated 'struct bpf_prog **func' array.

The subprog index was cached in insn[1].imm after add_subprog().  However,
this could become outdated (and too big in this case) if some subprogs
are completely removed during dead code elimination (in
adjust_subprog_starts_after_remove).  The cached index in insn[1].imm
is not updated accordingly and causing out-of-bound issue in the later
jit_subprog().

Unlike bpf_pseudo_'func' insn, the current bpf_pseudo_'call' insn
is handling the DCE properly by calling find_subprog(insn->imm) to
figure out the index instead of caching the subprog index.
The existing bpf_adj_branches() will adjust the insn->imm
whenever insn is added or removed.

Instead of having two ways handling subprog index,
this patch is to make bpf_pseudo_func works more like
bpf_pseudo_call.

First change is to stop caching the subprog index result
in insn[1].imm after add_subprog().  The verification
process will use find_subprog(insn->imm) to figure
out the subprog index.

Second change is in bpf_adj_branches() and have it to
adjust the insn->imm for the bpf_pseudo_func insn also
whenever insn is added or removed.

Third change is in jit_subprog().  Like the bpf_pseudo_call handling,
bpf_pseudo_func temporarily stores the find_subprog() result
in insn->off.  It is fine because the prog's insn has been finalized
at this point.  insn->off will be reset back to 0 later to avoid
confusing the userspace prog dump tool.

Fixes: 69c087ba ("bpf: Add bpf_for_each_map_elem() helper")
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211106014014.651018-1-kafai@fb.com
parent 70bf363d
...@@ -484,6 +484,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) ...@@ -484,6 +484,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size; aux->ctx_field_size = size;
} }
static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
insn->src_reg == BPF_PSEUDO_FUNC;
}
struct bpf_prog_ops { struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
......
...@@ -390,6 +390,13 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, ...@@ -390,6 +390,13 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
i = end_new; i = end_new;
insn = prog->insnsi + end_old; insn = prog->insnsi + end_old;
} }
if (bpf_pseudo_func(insn)) {
ret = bpf_adj_delta_to_imm(insn, pos, end_old,
end_new, i, probe_pass);
if (ret)
return ret;
continue;
}
code = insn->code; code = insn->code;
if ((BPF_CLASS(code) != BPF_JMP && if ((BPF_CLASS(code) != BPF_JMP &&
BPF_CLASS(code) != BPF_JMP32) || BPF_CLASS(code) != BPF_JMP32) ||
......
...@@ -240,12 +240,6 @@ static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) ...@@ -240,12 +240,6 @@ static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
insn->src_reg == BPF_PSEUDO_KFUNC_CALL; insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
} }
static bool bpf_pseudo_func(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
insn->src_reg == BPF_PSEUDO_FUNC;
}
struct bpf_call_arg_meta { struct bpf_call_arg_meta {
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
bool raw_mode; bool raw_mode;
...@@ -1960,16 +1954,10 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env) ...@@ -1960,16 +1954,10 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
return -EPERM; return -EPERM;
} }
if (bpf_pseudo_func(insn)) { if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
ret = add_subprog(env, i + insn->imm + 1); ret = add_subprog(env, i + insn->imm + 1);
if (ret >= 0) else
/* remember subprog */
insn[1].imm = ret;
} else if (bpf_pseudo_call(insn)) {
ret = add_subprog(env, i + insn->imm + 1);
} else {
ret = add_kfunc_call(env, insn->imm, insn->off); ret = add_kfunc_call(env, insn->imm, insn->off);
}
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -9387,7 +9375,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) ...@@ -9387,7 +9375,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (insn->src_reg == BPF_PSEUDO_FUNC) { if (insn->src_reg == BPF_PSEUDO_FUNC) {
struct bpf_prog_aux *aux = env->prog->aux; struct bpf_prog_aux *aux = env->prog->aux;
u32 subprogno = insn[1].imm; u32 subprogno = find_subprog(env,
env->insn_idx + insn->imm + 1);
if (!aux->func_info) { if (!aux->func_info) {
verbose(env, "missing btf func_info\n"); verbose(env, "missing btf func_info\n");
...@@ -12557,14 +12546,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -12557,14 +12546,9 @@ static int jit_subprogs(struct bpf_verifier_env *env)
return 0; return 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (bpf_pseudo_func(insn)) { if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
env->insn_aux_data[i].call_imm = insn->imm;
/* subprog is encoded in insn[1].imm */
continue; continue;
}
if (!bpf_pseudo_call(insn))
continue;
/* Upon error here we cannot fall back to interpreter but /* Upon error here we cannot fall back to interpreter but
* need a hard reject of the program. Thus -EFAULT is * need a hard reject of the program. Thus -EFAULT is
* propagated in any case. * propagated in any case.
...@@ -12585,6 +12569,12 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -12585,6 +12569,12 @@ static int jit_subprogs(struct bpf_verifier_env *env)
env->insn_aux_data[i].call_imm = insn->imm; env->insn_aux_data[i].call_imm = insn->imm;
/* point imm to __bpf_call_base+1 from JITs point of view */ /* point imm to __bpf_call_base+1 from JITs point of view */
insn->imm = 1; insn->imm = 1;
if (bpf_pseudo_func(insn))
/* jit (e.g. x86_64) may emit fewer instructions
* if it learns a u32 imm is the same as a u64 imm.
* Force a non zero here.
*/
insn[1].imm = 1;
} }
err = bpf_prog_alloc_jited_linfo(prog); err = bpf_prog_alloc_jited_linfo(prog);
...@@ -12669,7 +12659,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -12669,7 +12659,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn = func[i]->insnsi; insn = func[i]->insnsi;
for (j = 0; j < func[i]->len; j++, insn++) { for (j = 0; j < func[i]->len; j++, insn++) {
if (bpf_pseudo_func(insn)) { if (bpf_pseudo_func(insn)) {
subprog = insn[1].imm; subprog = insn->off;
insn[0].imm = (u32)(long)func[subprog]->bpf_func; insn[0].imm = (u32)(long)func[subprog]->bpf_func;
insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
continue; continue;
...@@ -12720,7 +12710,8 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -12720,7 +12710,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (bpf_pseudo_func(insn)) { if (bpf_pseudo_func(insn)) {
insn[0].imm = env->insn_aux_data[i].call_imm; insn[0].imm = env->insn_aux_data[i].call_imm;
insn[1].imm = find_subprog(env, i + insn[0].imm + 1); insn[1].imm = insn->off;
insn->off = 0;
continue; continue;
} }
if (!bpf_pseudo_call(insn)) if (!bpf_pseudo_call(insn))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment