Commit d1c55ab5 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

bpf: prepare bpf_int_jit_compile/bpf_prog_select_runtime apis

Since the blinding is strictly only called from inside eBPF JITs,
we need to change signatures for bpf_int_jit_compile() and
bpf_prog_select_runtime() first in order to prepare that the
eBPF program we're dealing with can change underneath. Hence,
for call sites, we need to return the latest prog. No functional
change in this patch.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c237ee5e
...@@ -762,7 +762,7 @@ void bpf_jit_compile(struct bpf_prog *prog) ...@@ -762,7 +762,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
/* Nothing to do here. We support Internal BPF. */ /* Nothing to do here. We support Internal BPF. */
} }
void bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_binary_header *header; struct bpf_binary_header *header;
struct jit_ctx ctx; struct jit_ctx ctx;
...@@ -770,14 +770,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -770,14 +770,14 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
u8 *image_ptr; u8 *image_ptr;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return prog;
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog; ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) if (ctx.offset == NULL)
return; return prog;
/* 1. Initial fake pass to compute ctx->idx. */ /* 1. Initial fake pass to compute ctx->idx. */
...@@ -828,6 +828,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -828,6 +828,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
prog->jited = 1; prog->jited = 1;
out: out:
kfree(ctx.offset); kfree(ctx.offset);
return prog;
} }
void bpf_jit_free(struct bpf_prog *prog) void bpf_jit_free(struct bpf_prog *prog)
......
...@@ -1262,18 +1262,19 @@ void bpf_jit_compile(struct bpf_prog *fp) ...@@ -1262,18 +1262,19 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* /*
* Compile eBPF program "fp" * Compile eBPF program "fp"
*/ */
void bpf_int_jit_compile(struct bpf_prog *fp) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{ {
struct bpf_binary_header *header; struct bpf_binary_header *header;
struct bpf_jit jit; struct bpf_jit jit;
int pass; int pass;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return fp;
memset(&jit, 0, sizeof(jit)); memset(&jit, 0, sizeof(jit));
jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) if (jit.addrs == NULL)
return; return fp;
/* /*
* Three initial passes: * Three initial passes:
* - 1/2: Determine clobbered registers * - 1/2: Determine clobbered registers
...@@ -1305,6 +1306,7 @@ void bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1305,6 +1306,7 @@ void bpf_int_jit_compile(struct bpf_prog *fp)
} }
free_addrs: free_addrs:
kfree(jit.addrs); kfree(jit.addrs);
return fp;
} }
/* /*
......
...@@ -1073,7 +1073,7 @@ void bpf_jit_compile(struct bpf_prog *prog) ...@@ -1073,7 +1073,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
{ {
} }
void bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_binary_header *header = NULL; struct bpf_binary_header *header = NULL;
int proglen, oldproglen = 0; int proglen, oldproglen = 0;
...@@ -1084,11 +1084,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1084,11 +1084,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
int i; int i;
if (!bpf_jit_enable) if (!bpf_jit_enable)
return; return prog;
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs) if (!addrs)
return; return prog;
/* Before first pass, make a rough estimation of addrs[] /* Before first pass, make a rough estimation of addrs[]
* each bpf instruction is translated to less than 64 bytes * each bpf instruction is translated to less than 64 bytes
...@@ -1140,6 +1140,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1140,6 +1140,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
} }
out: out:
kfree(addrs); kfree(addrs);
return prog;
} }
void bpf_jit_free(struct bpf_prog *fp) void bpf_jit_free(struct bpf_prog *fp)
......
...@@ -458,7 +458,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) ...@@ -458,7 +458,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
int sk_filter(struct sock *sk, struct sk_buff *skb); int sk_filter(struct sock *sk, struct sk_buff *skb);
int bpf_prog_select_runtime(struct bpf_prog *fp); struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
void bpf_prog_free(struct bpf_prog *fp); void bpf_prog_free(struct bpf_prog *fp);
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
...@@ -492,7 +492,8 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); ...@@ -492,7 +492,8 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_skb_data(void *func); bool bpf_helper_changes_skb_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
......
...@@ -761,15 +761,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) ...@@ -761,15 +761,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
/** /**
* bpf_prog_select_runtime - select exec runtime for BPF program * bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program * @fp: bpf_prog populated with internal BPF program
* @err: pointer to error variable
* *
* Try to JIT eBPF program, if JIT is not available, use interpreter. * Try to JIT eBPF program, if JIT is not available, use interpreter.
* The BPF program will be executed via BPF_PROG_RUN() macro. * The BPF program will be executed via BPF_PROG_RUN() macro.
*/ */
int bpf_prog_select_runtime(struct bpf_prog *fp) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{ {
fp->bpf_func = (void *) __bpf_prog_run; fp->bpf_func = (void *) __bpf_prog_run;
bpf_int_jit_compile(fp); /* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
* blinding, bpf_int_jit_compile() must always return a
* valid program, which in this case would simply not
* be JITed, but falls back to the interpreter.
*/
fp = bpf_int_jit_compile(fp);
bpf_prog_lock_ro(fp); bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at /* The tail call compatibility check can only be done at
...@@ -777,7 +784,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp) ...@@ -777,7 +784,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp)
* with JITed or non JITed program concatenations and not * with JITed or non JITed program concatenations and not
* all eBPF JITs might immediately support all features. * all eBPF JITs might immediately support all features.
*/ */
return bpf_check_tail_call(fp); *err = bpf_check_tail_call(fp);
return fp;
} }
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
...@@ -859,8 +868,9 @@ const struct bpf_func_proto bpf_tail_call_proto = { ...@@ -859,8 +868,9 @@ const struct bpf_func_proto bpf_tail_call_proto = {
}; };
/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
void __weak bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
{ {
return prog;
} }
bool __weak bpf_helper_changes_skb_data(void *func) bool __weak bpf_helper_changes_skb_data(void *func)
......
...@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr)
fixup_bpf_calls(prog); fixup_bpf_calls(prog);
/* eBPF program is ready to be JITed */ /* eBPF program is ready to be JITed */
err = bpf_prog_select_runtime(prog); prog = bpf_prog_select_runtime(prog, &err);
if (err < 0) if (err < 0)
goto free_used_maps; goto free_used_maps;
......
...@@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err) ...@@ -5621,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER; fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
bpf_prog_select_runtime(fp); /* We cannot error here as we don't need type compatibility
* checks.
*/
fp = bpf_prog_select_runtime(fp, err);
break; break;
} }
......
...@@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) ...@@ -994,7 +994,11 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
*/ */
goto out_err_free; goto out_err_free;
bpf_prog_select_runtime(fp); /* We are guaranteed to never error here with cBPF to eBPF
* transitions, since there's no issue with type compatibility
* checks on program arrays.
*/
fp = bpf_prog_select_runtime(fp, &err);
kfree(old_prog); kfree(old_prog);
return fp; return fp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment