Commit ef9fde06 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-to-bpf-function-calls'

Alexei Starovoitov says:

====================
First of all huge thank you to Daniel, John, Jakub, Edward and others who
reviewed multiple iterations of this patch set over the last many months
and to Dave and others who gave critical feedback during netconf/netdev.

The patch is solid enough and we thought through numerous corner cases,
but it's not the end. More followups with code reorg and features to follow.

TLDR: Allow arbitrary function calls from bpf function to another bpf function.

Since the beginning of bpf all bpf programs were represented as a single function
and program authors were forced to use always_inline for all functions
in their C code. That was causing llvm to unnecessary inflate the code size
and forcing developers to move code to header files with little code reuse.

With a bit of additional complexity teach verifier to recognize
arbitrary function calls from one bpf function to another as long as
all of functions are presented to the verifier as a single bpf program.
Extended program layout:
..
r1 = ..    // arg1
r2 = ..    // arg2
call pc+1  // function call pc-relative
exit
.. = r1    // access arg1
.. = r2    // access arg2
..
call pc+20 // second level of function call
...

It allows for better optimized code and finally allows to introduce
the core bpf libraries that can be reused in different projects,
since programs are no longer limited by single elf file.
With function calls bpf can be compiled into multiple .o files.

This patch is the first step. It detects programs that contain
multiple functions and checks that calls between them are valid.
It splits the sequence of bpf instructions (one program) into a set
of bpf functions that call each other. Calls to only known
functions are allowed. Since all functions are presented to
the verifier at once conceptually it is 'static linking'.

Future plans:
- introduce BPF_PROG_TYPE_LIBRARY and allow a set of bpf functions
  to be loaded into the kernel that can be later linked to other
  programs with concrete program types. Aka 'dynamic linking'.

- introduce function pointer type and indirect calls to allow
  bpf functions call other dynamically loaded bpf functions while
  the caller bpf function is already executing. Aka 'runtime linking'.
  This will be more generic and more flexible alternative
  to bpf_tail_calls.

FAQ:
Q: Interpreter and JIT changes mean that new instruction is introduced ?
A: No. The call instruction technically stays the same. Now it can call
   both kernel helpers and other bpf functions.
   Calling convention stays the same as well.
   From uapi point of view the call insn got new 'relocation' BPF_PSEUDO_CALL
   similar to BPF_PSEUDO_MAP_FD 'relocation' of bpf_ldimm64 insn.

Q: What had to change on LLVM side?
A: Trivial LLVM patch to allow calls was applied to upcoming 6.0 release:
   https://reviews.llvm.org/rL318614
   with few bugfixes as well.
   Make sure to build the latest llvm to have bpf_call support.

More details in the patches.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 0bce7c9a 28ab173e
...@@ -1824,7 +1824,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1824,7 +1824,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* If BPF JIT was not enabled then we must fall back to /* If BPF JIT was not enabled then we must fall back to
* the interpreter. * the interpreter.
*/ */
if (!bpf_jit_enable) if (!prog->jit_requested)
return orig_prog; return orig_prog;
/* If constant blinding was enabled and we failed during blinding /* If constant blinding was enabled and we failed during blinding
......
...@@ -99,6 +99,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val, ...@@ -99,6 +99,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
} }
} }
static inline void emit_addr_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
{
u64 tmp = val;
int shift = 0;
emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
for (;shift < 48;) {
tmp >>= 16;
shift += 16;
emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
}
}
static inline void emit_a64_mov_i(const int is64, const int reg, static inline void emit_a64_mov_i(const int is64, const int reg,
const s32 val, struct jit_ctx *ctx) const s32 val, struct jit_ctx *ctx)
{ {
...@@ -603,7 +617,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ...@@ -603,7 +617,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 r0 = bpf2a64[BPF_REG_0]; const u8 r0 = bpf2a64[BPF_REG_0];
const u64 func = (u64)__bpf_call_base + imm; const u64 func = (u64)__bpf_call_base + imm;
emit_a64_mov_i64(tmp, func, ctx); if (ctx->prog->is_func)
emit_addr_mov_i64(tmp, func, ctx);
else
emit_a64_mov_i64(tmp, func, ctx);
emit(A64_BLR(tmp), ctx); emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx); emit(A64_MOV(1, r0, A64_R(0)), ctx);
break; break;
...@@ -835,16 +852,24 @@ static inline void bpf_flush_icache(void *start, void *end) ...@@ -835,16 +852,24 @@ static inline void bpf_flush_icache(void *start, void *end)
flush_icache_range((unsigned long)start, (unsigned long)end); flush_icache_range((unsigned long)start, (unsigned long)end);
} }
struct arm64_jit_data {
struct bpf_binary_header *header;
u8 *image;
struct jit_ctx ctx;
};
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_prog *tmp, *orig_prog = prog; struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header; struct bpf_binary_header *header;
struct arm64_jit_data *jit_data;
bool tmp_blinded = false; bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx; struct jit_ctx ctx;
int image_size; int image_size;
u8 *image_ptr; u8 *image_ptr;
if (!bpf_jit_enable) if (!prog->jit_requested)
return orig_prog; return orig_prog;
tmp = bpf_jit_blind_constants(prog); tmp = bpf_jit_blind_constants(prog);
...@@ -858,13 +883,29 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -858,13 +883,29 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp; prog = tmp;
} }
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
if (jit_data->ctx.offset) {
ctx = jit_data->ctx;
image_ptr = jit_data->image;
header = jit_data->header;
extra_pass = true;
goto skip_init_ctx;
}
memset(&ctx, 0, sizeof(ctx)); memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog; ctx.prog = prog;
ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) { if (ctx.offset == NULL) {
prog = orig_prog; prog = orig_prog;
goto out; goto out_off;
} }
/* 1. Initial fake pass to compute ctx->idx. */ /* 1. Initial fake pass to compute ctx->idx. */
...@@ -895,6 +936,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -895,6 +936,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 2. Now, the actual pass. */ /* 2. Now, the actual pass. */
ctx.image = (__le32 *)image_ptr; ctx.image = (__le32 *)image_ptr;
skip_init_ctx:
ctx.idx = 0; ctx.idx = 0;
build_prologue(&ctx); build_prologue(&ctx);
...@@ -920,13 +962,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -920,13 +962,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
bpf_flush_icache(header, ctx.image + ctx.idx); bpf_flush_icache(header, ctx.image + ctx.idx);
bpf_jit_binary_lock_ro(header); if (!prog->is_func || extra_pass) {
if (extra_pass && ctx.idx != jit_data->ctx.idx) {
pr_err_once("multi-func JIT bug %d != %d\n",
ctx.idx, jit_data->ctx.idx);
bpf_jit_binary_free(header);
prog->bpf_func = NULL;
prog->jited = 0;
goto out_off;
}
bpf_jit_binary_lock_ro(header);
} else {
jit_data->ctx = ctx;
jit_data->image = image_ptr;
jit_data->header = header;
}
prog->bpf_func = (void *)ctx.image; prog->bpf_func = (void *)ctx.image;
prog->jited = 1; prog->jited = 1;
prog->jited_len = image_size; prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
out_off: out_off:
kfree(ctx.offset); kfree(ctx.offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out: out:
if (tmp_blinded) if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? bpf_jit_prog_release_other(prog, prog == orig_prog ?
......
...@@ -1869,7 +1869,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1869,7 +1869,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int image_size; unsigned int image_size;
u8 *image_ptr; u8 *image_ptr;
if (!bpf_jit_enable || !cpu_has_mips64r2) if (!prog->jit_requested || !cpu_has_mips64r2)
return prog; return prog;
tmp = bpf_jit_blind_constants(prog); tmp = bpf_jit_blind_constants(prog);
......
...@@ -993,7 +993,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -993,7 +993,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_prog *tmp_fp; struct bpf_prog *tmp_fp;
bool bpf_blinded = false; bool bpf_blinded = false;
if (!bpf_jit_enable) if (!fp->jit_requested)
return org_fp; return org_fp;
tmp_fp = bpf_jit_blind_constants(org_fp); tmp_fp = bpf_jit_blind_constants(org_fp);
......
...@@ -1300,7 +1300,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1300,7 +1300,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_jit jit; struct bpf_jit jit;
int pass; int pass;
if (!bpf_jit_enable) if (!fp->jit_requested)
return orig_fp; return orig_fp;
tmp = bpf_jit_blind_constants(fp); tmp = bpf_jit_blind_constants(fp);
......
...@@ -1517,7 +1517,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1517,7 +1517,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
u8 *image_ptr; u8 *image_ptr;
int pass; int pass;
if (!bpf_jit_enable) if (!prog->jit_requested)
return orig_prog; return orig_prog;
tmp = bpf_jit_blind_constants(prog); tmp = bpf_jit_blind_constants(prog);
......
...@@ -1109,19 +1109,29 @@ xadd: if (is_imm8(insn->off)) ...@@ -1109,19 +1109,29 @@ xadd: if (is_imm8(insn->off))
return proglen; return proglen;
} }
struct x64_jit_data {
struct bpf_binary_header *header;
int *addrs;
u8 *image;
int proglen;
struct jit_context ctx;
};
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
struct bpf_binary_header *header = NULL; struct bpf_binary_header *header = NULL;
struct bpf_prog *tmp, *orig_prog = prog; struct bpf_prog *tmp, *orig_prog = prog;
struct x64_jit_data *jit_data;
int proglen, oldproglen = 0; int proglen, oldproglen = 0;
struct jit_context ctx = {}; struct jit_context ctx = {};
bool tmp_blinded = false; bool tmp_blinded = false;
bool extra_pass = false;
u8 *image = NULL; u8 *image = NULL;
int *addrs; int *addrs;
int pass; int pass;
int i; int i;
if (!bpf_jit_enable) if (!prog->jit_requested)
return orig_prog; return orig_prog;
tmp = bpf_jit_blind_constants(prog); tmp = bpf_jit_blind_constants(prog);
...@@ -1135,10 +1145,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1135,10 +1145,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = tmp; prog = tmp;
} }
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
addrs = jit_data->addrs;
if (addrs) {
ctx = jit_data->ctx;
oldproglen = jit_data->proglen;
image = jit_data->image;
header = jit_data->header;
extra_pass = true;
goto skip_init_addrs;
}
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs) { if (!addrs) {
prog = orig_prog; prog = orig_prog;
goto out; goto out_addrs;
} }
/* Before first pass, make a rough estimation of addrs[] /* Before first pass, make a rough estimation of addrs[]
...@@ -1149,6 +1177,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1149,6 +1177,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
addrs[i] = proglen; addrs[i] = proglen;
} }
ctx.cleanup_addr = proglen; ctx.cleanup_addr = proglen;
skip_init_addrs:
/* JITed image shrinks with every pass and the loop iterates /* JITed image shrinks with every pass and the loop iterates
* until the image stops shrinking. Very large bpf programs * until the image stops shrinking. Very large bpf programs
...@@ -1189,7 +1218,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1189,7 +1218,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image) { if (image) {
bpf_flush_icache(header, image + proglen); bpf_flush_icache(header, image + proglen);
bpf_jit_binary_lock_ro(header); if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(header);
} else {
jit_data->addrs = addrs;
jit_data->ctx = ctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = header;
}
prog->bpf_func = (void *)image; prog->bpf_func = (void *)image;
prog->jited = 1; prog->jited = 1;
prog->jited_len = proglen; prog->jited_len = proglen;
...@@ -1197,8 +1234,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1197,8 +1234,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog = orig_prog; prog = orig_prog;
} }
if (!prog->is_func || extra_pass) {
out_addrs: out_addrs:
kfree(addrs); kfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out: out:
if (tmp_blinded) if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ? bpf_jit_prog_release_other(prog, prog == orig_prog ?
......
...@@ -200,6 +200,9 @@ struct bpf_prog_aux { ...@@ -200,6 +200,9 @@ struct bpf_prog_aux {
u32 max_ctx_offset; u32 max_ctx_offset;
u32 stack_depth; u32 stack_depth;
u32 id; u32 id;
u32 func_cnt;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
struct latch_tree_node ksym_tnode; struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode; struct list_head ksym_lnode;
const struct bpf_prog_ops *ops; const struct bpf_prog_ops *ops;
...@@ -402,6 +405,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) ...@@ -402,6 +405,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */ /* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */ /* Map specifics */
struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
......
...@@ -76,6 +76,14 @@ struct bpf_reg_state { ...@@ -76,6 +76,14 @@ struct bpf_reg_state {
s64 smax_value; /* maximum possible (s64)value */ s64 smax_value; /* maximum possible (s64)value */
u64 umin_value; /* minimum possible (u64)value */ u64 umin_value; /* minimum possible (u64)value */
u64 umax_value; /* maximum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
* is used which is an index in bpf_verifier_state->frame[] array
* pointing to bpf_func_state.
* This field must be second to last, for states_equal() reasons.
*/
u32 frameno;
/* This field must be last, for states_equal() reasons. */ /* This field must be last, for states_equal() reasons. */
enum bpf_reg_liveness live; enum bpf_reg_liveness live;
}; };
...@@ -83,7 +91,8 @@ struct bpf_reg_state { ...@@ -83,7 +91,8 @@ struct bpf_reg_state {
enum bpf_stack_slot_type { enum bpf_stack_slot_type {
STACK_INVALID, /* nothing was stored in this stack slot */ STACK_INVALID, /* nothing was stored in this stack slot */
STACK_SPILL, /* register spilled into stack */ STACK_SPILL, /* register spilled into stack */
STACK_MISC /* BPF program wrote some data into this slot */ STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
}; };
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
...@@ -96,13 +105,34 @@ struct bpf_stack_state { ...@@ -96,13 +105,34 @@ struct bpf_stack_state {
/* state of the program: /* state of the program:
* type of all registers and stack info * type of all registers and stack info
*/ */
struct bpf_verifier_state { struct bpf_func_state {
struct bpf_reg_state regs[MAX_BPF_REG]; struct bpf_reg_state regs[MAX_BPF_REG];
struct bpf_verifier_state *parent; struct bpf_verifier_state *parent;
/* index of call instruction that called into this func */
int callsite;
/* stack frame number of this function state from pov of
* enclosing bpf_verifier_state.
* 0 = main function, 1 = first callee.
*/
u32 frameno;
/* subprog number == index within subprog_stack_depth
* zero == main subprog
*/
u32 subprogno;
/* should be second to last. See copy_func_state() */
int allocated_stack; int allocated_stack;
struct bpf_stack_state *stack; struct bpf_stack_state *stack;
}; };
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
u32 curframe;
};
/* linked list of verifier states used to prune search */ /* linked list of verifier states used to prune search */
struct bpf_verifier_state_list { struct bpf_verifier_state_list {
struct bpf_verifier_state state; struct bpf_verifier_state state;
...@@ -113,6 +143,7 @@ struct bpf_insn_aux_data { ...@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
union { union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
s32 call_imm; /* saved imm field of call insn */
}; };
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
bool seen; /* this insn was processed by the verifier */ bool seen; /* this insn was processed by the verifier */
...@@ -141,6 +172,8 @@ struct bpf_ext_analyzer_ops { ...@@ -141,6 +172,8 @@ struct bpf_ext_analyzer_ops {
int insn_idx, int prev_insn_idx); int insn_idx, int prev_insn_idx);
}; };
#define BPF_MAX_SUBPROGS 256
/* single container for all structs /* single container for all structs
* one verifier_env per bpf_check() call * one verifier_env per bpf_check() call
*/ */
...@@ -159,13 +192,17 @@ struct bpf_verifier_env { ...@@ -159,13 +192,17 @@ struct bpf_verifier_env {
bool allow_ptr_leaks; bool allow_ptr_leaks;
bool seen_direct_write; bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
struct bpf_verifer_log log; struct bpf_verifer_log log;
u32 subprog_starts[BPF_MAX_SUBPROGS];
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
}; };
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{ {
return env->cur_state->regs; struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[cur->curframe]->regs;
} }
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
......
...@@ -58,6 +58,9 @@ struct bpf_prog_aux; ...@@ -58,6 +58,9 @@ struct bpf_prog_aux;
/* unused opcode to mark special call to bpf_tail_call() helper */ /* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0 #define BPF_TAIL_CALL 0xf0
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
/* As per nm, we expose JITed images as text (code) section for /* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match * kallsyms. That way, tools like perf can find it to match
* addresses. * addresses.
...@@ -455,10 +458,13 @@ struct bpf_binary_header { ...@@ -455,10 +458,13 @@ struct bpf_binary_header {
struct bpf_prog { struct bpf_prog {
u16 pages; /* Number of allocated pages */ u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */ u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
locked:1, /* Program image locked? */ locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */ gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */ cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */ dst_needed:1, /* Do we need dst entry? */
blinded:1, /* Was blinded */
is_func:1, /* program is a bpf function */
kprobe_override:1; /* Do we override a kprobe? */ kprobe_override:1; /* Do we override a kprobe? */
enum bpf_prog_type type; /* Type of BPF program */ enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */ u32 len; /* Number of filter blocks */
...@@ -710,6 +716,9 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); ...@@ -710,6 +716,9 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog);
...@@ -798,7 +807,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) ...@@ -798,7 +807,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
return fp->jited && bpf_jit_is_ebpf(); return fp->jited && bpf_jit_is_ebpf();
} }
static inline bool bpf_jit_blinding_enabled(void) static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{ {
/* These are the prerequisites, should someone ever have the /* These are the prerequisites, should someone ever have the
* idea to call blinding outside of them, we make sure to * idea to call blinding outside of them, we make sure to
...@@ -806,7 +815,7 @@ static inline bool bpf_jit_blinding_enabled(void) ...@@ -806,7 +815,7 @@ static inline bool bpf_jit_blinding_enabled(void)
*/ */
if (!bpf_jit_is_ebpf()) if (!bpf_jit_is_ebpf())
return false; return false;
if (!bpf_jit_enable) if (!prog->jit_requested)
return false; return false;
if (!bpf_jit_harden) if (!bpf_jit_harden)
return false; return false;
......
...@@ -197,8 +197,14 @@ enum bpf_attach_type { ...@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/ */
#define BPF_F_STRICT_ALIGNMENT (1U << 0) #define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1 #define BPF_PSEUDO_MAP_FD 1
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */ /* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */ #define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */
......
...@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) ...@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
fp->pages = size / PAGE_SIZE; fp->pages = size / PAGE_SIZE;
fp->aux = aux; fp->aux = aux;
fp->aux->prog = fp; fp->aux->prog = fp;
fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
...@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) ...@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0; return 0;
} }
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
/* Call and Exit are both special jumps with no
* target inside the BPF instruction image.
*/
BPF_OP(insn->code) != BPF_CALL &&
BPF_OP(insn->code) != BPF_EXIT;
}
static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
{ {
struct bpf_insn *insn = prog->insnsi; struct bpf_insn *insn = prog->insnsi;
u32 i, insn_cnt = prog->len; u32 i, insn_cnt = prog->len;
bool pseudo_call;
u8 code;
int off;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
if (!bpf_is_jmp_and_has_target(insn)) code = insn->code;
if (BPF_CLASS(code) != BPF_JMP)
continue; continue;
if (BPF_OP(code) == BPF_EXIT)
continue;
if (BPF_OP(code) == BPF_CALL) {
if (insn->src_reg == BPF_PSEUDO_CALL)
pseudo_call = true;
else
continue;
} else {
pseudo_call = false;
}
off = pseudo_call ? insn->imm : insn->off;
/* Adjust offset of jmps if we cross boundaries. */ /* Adjust offset of jmps if we cross boundaries. */
if (i < pos && i + insn->off + 1 > pos) if (i < pos && i + off + 1 > pos)
insn->off += delta; off += delta;
else if (i > pos + delta && i + insn->off + 1 <= pos + delta) else if (i > pos + delta && i + off + 1 <= pos + delta)
insn->off -= delta; off -= delta;
if (pseudo_call)
insn->imm = off;
else
insn->off = off;
} }
} }
...@@ -711,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) ...@@ -711,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
struct bpf_insn *insn; struct bpf_insn *insn;
int i, rewritten; int i, rewritten;
if (!bpf_jit_blinding_enabled()) if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
return prog; return prog;
clone = bpf_prog_clone_create(prog, GFP_USER); clone = bpf_prog_clone_create(prog, GFP_USER);
...@@ -753,6 +764,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) ...@@ -753,6 +764,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
i += insn_delta; i += insn_delta;
} }
clone->blinded = 1;
return clone; return clone;
} }
#endif /* CONFIG_BPF_JIT */ #endif /* CONFIG_BPF_JIT */
...@@ -774,8 +786,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); ...@@ -774,8 +786,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
* *
* Decode and execute eBPF instructions. * Decode and execute eBPF instructions.
*/ */
static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
u64 *stack)
{ {
u64 tmp; u64 tmp;
static const void *jumptable[256] = { static const void *jumptable[256] = {
...@@ -835,6 +846,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -835,6 +846,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
/* Call instruction */ /* Call instruction */
[BPF_JMP | BPF_CALL] = &&JMP_CALL, [BPF_JMP | BPF_CALL] = &&JMP_CALL,
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
/* Jumps */ /* Jumps */
[BPF_JMP | BPF_JA] = &&JMP_JA, [BPF_JMP | BPF_JA] = &&JMP_JA,
...@@ -1025,6 +1037,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, ...@@ -1025,6 +1037,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
BPF_R4, BPF_R5); BPF_R4, BPF_R5);
CONT; CONT;
JMP_CALL_ARGS:
BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
BPF_R3, BPF_R4,
BPF_R5,
insn + insn->off + 1);
CONT;
JMP_TAIL_CALL: { JMP_TAIL_CALL: {
struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
...@@ -1297,6 +1316,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn ...@@ -1297,6 +1316,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
return ___bpf_prog_run(regs, insn, stack); \ return ___bpf_prog_run(regs, insn, stack); \
} }
#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
const struct bpf_insn *insn) \
{ \
u64 stack[stack_size / sizeof(u64)]; \
u64 regs[MAX_BPF_REG]; \
\
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
BPF_R1 = r1; \
BPF_R2 = r2; \
BPF_R3 = r3; \
BPF_R4 = r4; \
BPF_R5 = r5; \
return ___bpf_prog_run(regs, insn, stack); \
}
#define EVAL1(FN, X) FN(X) #define EVAL1(FN, X) FN(X)
#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
...@@ -1308,6 +1344,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); ...@@ -1308,6 +1344,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
static unsigned int (*interpreters[])(const void *ctx, static unsigned int (*interpreters[])(const void *ctx,
...@@ -1316,6 +1356,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) ...@@ -1316,6 +1356,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
}; };
#undef PROG_NAME_LIST
#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
const struct bpf_insn *insn) = {
EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
};
#undef PROG_NAME_LIST
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
{
stack_depth = max_t(u32, stack_depth, 1);
insn->off = (s16) insn->imm;
insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
__bpf_call_base_args;
insn->code = BPF_JMP | BPF_CALL_ARGS;
}
bool bpf_prog_array_compatible(struct bpf_array *array, bool bpf_prog_array_compatible(struct bpf_array *array,
const struct bpf_prog *fp) const struct bpf_prog *fp)
...@@ -1572,11 +1630,19 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, ...@@ -1572,11 +1630,19 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
static void bpf_prog_free_deferred(struct work_struct *work) static void bpf_prog_free_deferred(struct work_struct *work)
{ {
struct bpf_prog_aux *aux; struct bpf_prog_aux *aux;
int i;
aux = container_of(work, struct bpf_prog_aux, work); aux = container_of(work, struct bpf_prog_aux, work);
if (bpf_prog_is_dev_bound(aux)) if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog); bpf_prog_offload_destroy(aux->prog);
bpf_jit_free(aux->prog); for (i = 0; i < aux->func_cnt; i++)
bpf_jit_free(aux->func[i]);
if (aux->func_cnt) {
kfree(aux->func);
bpf_prog_unlock_free(aux->prog);
} else {
bpf_jit_free(aux->prog);
}
} }
/* Free internal BPF program */ /* Free internal BPF program */
......
...@@ -189,8 +189,12 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, ...@@ -189,8 +189,12 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { if (opcode == BPF_CALL) {
verbose(env, "(%02x) call %s#%d\n", insn->code, if (insn->src_reg == BPF_PSEUDO_CALL)
func_id_name(insn->imm), insn->imm); verbose(env, "(%02x) call pc%+d\n", insn->code,
insn->imm);
else
verbose(env, "(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm);
} else if (insn->code == (BPF_JMP | BPF_JA)) { } else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(env, "(%02x) goto pc%+d\n", verbose(env, "(%02x) goto pc%+d\n",
insn->code, insn->off); insn->code, insn->off);
......
...@@ -1194,7 +1194,8 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -1194,7 +1194,8 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_used_maps; goto free_used_maps;
/* eBPF program is ready to be JITed */ /* eBPF program is ready to be JITed */
prog = bpf_prog_select_runtime(prog, &err); if (!prog->bpf_func)
prog = bpf_prog_select_runtime(prog, &err);
if (err < 0) if (err < 0)
goto free_used_maps; goto free_used_maps;
......
This diff is collapsed.
...@@ -197,8 +197,14 @@ enum bpf_attach_type { ...@@ -197,8 +197,14 @@ enum bpf_attach_type {
*/ */
#define BPF_F_STRICT_ALIGNMENT (1U << 0) #define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1 #define BPF_PSEUDO_MAP_FD 1
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */ /* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */ #define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */
......
...@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, ...@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
__u32 map_flags); __u32 map_flags);
/* Recommend log buffer size */ /* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE 65536 #define BPF_LOG_BUF_SIZE (256 * 1024)
int bpf_load_program_name(enum bpf_prog_type type, const char *name, int bpf_load_program_name(enum bpf_prog_type type, const char *name,
const struct bpf_insn *insns, const struct bpf_insn *insns,
size_t insns_cnt, const char *license, size_t insns_cnt, const char *license,
......
...@@ -174,12 +174,19 @@ struct bpf_program { ...@@ -174,12 +174,19 @@ struct bpf_program {
char *name; char *name;
char *section_name; char *section_name;
struct bpf_insn *insns; struct bpf_insn *insns;
size_t insns_cnt; size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type; enum bpf_prog_type type;
struct { struct reloc_desc {
enum {
RELO_LD64,
RELO_CALL,
} type;
int insn_idx; int insn_idx;
int map_idx; union {
int map_idx;
int text_off;
};
} *reloc_desc; } *reloc_desc;
int nr_reloc; int nr_reloc;
...@@ -234,6 +241,7 @@ struct bpf_object { ...@@ -234,6 +241,7 @@ struct bpf_object {
} *reloc; } *reloc;
int nr_reloc; int nr_reloc;
int maps_shndx; int maps_shndx;
int text_shndx;
} efile; } efile;
/* /*
* All loaded bpf_object is linked in a list, which is * All loaded bpf_object is linked in a list, which is
...@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj) ...@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
size_t pi, si; size_t pi, si;
for (pi = 0; pi < obj->nr_programs; pi++) { for (pi = 0; pi < obj->nr_programs; pi++) {
char *name = NULL; const char *name = NULL;
prog = &obj->programs[pi]; prog = &obj->programs[pi];
if (prog->idx == obj->efile.text_shndx) {
name = ".text";
goto skip_search;
}
for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
si++) { si++) {
...@@ -405,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj) ...@@ -405,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
prog->section_name); prog->section_name);
return -EINVAL; return -EINVAL;
} }
skip_search:
prog->name = strdup(name); prog->name = strdup(name);
if (!prog->name) { if (!prog->name) {
pr_warning("failed to allocate memory for prog sym %s\n", pr_warning("failed to allocate memory for prog sym %s\n",
...@@ -795,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) ...@@ -795,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if ((sh.sh_type == SHT_PROGBITS) && } else if ((sh.sh_type == SHT_PROGBITS) &&
(sh.sh_flags & SHF_EXECINSTR) && (sh.sh_flags & SHF_EXECINSTR) &&
(data->d_size > 0)) { (data->d_size > 0)) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
err = bpf_object__add_program(obj, data->d_buf, err = bpf_object__add_program(obj, data->d_buf,
data->d_size, name, idx); data->d_size, name, idx);
if (err) { if (err) {
...@@ -856,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) ...@@ -856,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
} }
static int static int
bpf_program__collect_reloc(struct bpf_program *prog, bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
size_t nr_maps, GElf_Shdr *shdr, Elf_Data *data, struct bpf_object *obj)
Elf_Data *data, Elf_Data *symbols,
int maps_shndx, struct bpf_map *maps)
{ {
Elf_Data *symbols = obj->efile.symbols;
int text_shndx = obj->efile.text_shndx;
int maps_shndx = obj->efile.maps_shndx;
struct bpf_map *maps = obj->maps;
size_t nr_maps = obj->nr_maps;
int i, nrels; int i, nrels;
pr_debug("collecting relocating info for: '%s'\n", pr_debug("collecting relocating info for: '%s'\n",
...@@ -893,8 +910,10 @@ bpf_program__collect_reloc(struct bpf_program *prog, ...@@ -893,8 +910,10 @@ bpf_program__collect_reloc(struct bpf_program *prog,
GELF_R_SYM(rel.r_info)); GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
} }
pr_debug("relo for %ld value %ld name %d\n",
rel.r_info >> 32, sym.st_value, sym.st_name);
if (sym.st_shndx != maps_shndx) { if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
prog->section_name, sym.st_shndx); prog->section_name, sym.st_shndx);
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
...@@ -903,6 +922,17 @@ bpf_program__collect_reloc(struct bpf_program *prog, ...@@ -903,6 +922,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
insn_idx = rel.r_offset / sizeof(struct bpf_insn); insn_idx = rel.r_offset / sizeof(struct bpf_insn);
pr_debug("relocation: insn_idx=%u\n", insn_idx); pr_debug("relocation: insn_idx=%u\n", insn_idx);
if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
pr_warning("incorrect bpf_call opcode\n");
return -LIBBPF_ERRNO__RELOC;
}
prog->reloc_desc[i].type = RELO_CALL;
prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].text_off = sym.st_value;
continue;
}
if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
insn_idx, insns[insn_idx].code); insn_idx, insns[insn_idx].code);
...@@ -924,6 +954,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, ...@@ -924,6 +954,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
} }
prog->reloc_desc[i].type = RELO_LD64;
prog->reloc_desc[i].insn_idx = insn_idx; prog->reloc_desc[i].insn_idx = insn_idx;
prog->reloc_desc[i].map_idx = map_idx; prog->reloc_desc[i].map_idx = map_idx;
} }
...@@ -962,28 +993,77 @@ bpf_object__create_maps(struct bpf_object *obj) ...@@ -962,28 +993,77 @@ bpf_object__create_maps(struct bpf_object *obj)
return 0; return 0;
} }
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
{
struct bpf_insn *insn, *new_insn;
struct bpf_program *text;
size_t new_cnt;
if (relo->type != RELO_CALL)
return -LIBBPF_ERRNO__RELOC;
if (prog->idx == obj->efile.text_shndx) {
pr_warning("relo in .text insn %d into off %d\n",
relo->insn_idx, relo->text_off);
return -LIBBPF_ERRNO__RELOC;
}
if (prog->main_prog_cnt == 0) {
text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
if (!text) {
pr_warning("no .text section found yet relo into text exist\n");
return -LIBBPF_ERRNO__RELOC;
}
new_cnt = prog->insns_cnt + text->insns_cnt;
new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
if (!new_insn) {
pr_warning("oom in prog realloc\n");
return -ENOMEM;
}
memcpy(new_insn + prog->insns_cnt, text->insns,
text->insns_cnt * sizeof(*insn));
prog->insns = new_insn;
prog->main_prog_cnt = prog->insns_cnt;
prog->insns_cnt = new_cnt;
}
insn = &prog->insns[relo->insn_idx];
insn->imm += prog->main_prog_cnt - relo->insn_idx;
pr_debug("added %zd insn from %s to prog %s\n",
text->insns_cnt, text->section_name, prog->section_name);
return 0;
}
static int static int
bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{ {
int i; int i, err;
if (!prog || !prog->reloc_desc) if (!prog || !prog->reloc_desc)
return 0; return 0;
for (i = 0; i < prog->nr_reloc; i++) { for (i = 0; i < prog->nr_reloc; i++) {
int insn_idx, map_idx; if (prog->reloc_desc[i].type == RELO_LD64) {
struct bpf_insn *insns = prog->insns; struct bpf_insn *insns = prog->insns;
int insn_idx, map_idx;
insn_idx = prog->reloc_desc[i].insn_idx; insn_idx = prog->reloc_desc[i].insn_idx;
map_idx = prog->reloc_desc[i].map_idx; map_idx = prog->reloc_desc[i].map_idx;
if (insn_idx >= (int)prog->insns_cnt) { if (insn_idx >= (int)prog->insns_cnt) {
pr_warning("relocation out of range: '%s'\n", pr_warning("relocation out of range: '%s'\n",
prog->section_name); prog->section_name);
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
}
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
insns[insn_idx].imm = obj->maps[map_idx].fd;
} else {
err = bpf_program__reloc_text(prog, obj,
&prog->reloc_desc[i]);
if (err)
return err;
} }
insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
insns[insn_idx].imm = obj->maps[map_idx].fd;
} }
zfree(&prog->reloc_desc); zfree(&prog->reloc_desc);
...@@ -1026,7 +1106,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) ...@@ -1026,7 +1106,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
Elf_Data *data = obj->efile.reloc[i].data; Elf_Data *data = obj->efile.reloc[i].data;
int idx = shdr->sh_info; int idx = shdr->sh_info;
struct bpf_program *prog; struct bpf_program *prog;
size_t nr_maps = obj->nr_maps;
if (shdr->sh_type != SHT_REL) { if (shdr->sh_type != SHT_REL) {
pr_warning("internal error at %d\n", __LINE__); pr_warning("internal error at %d\n", __LINE__);
...@@ -1040,11 +1119,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) ...@@ -1040,11 +1119,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return -LIBBPF_ERRNO__RELOC; return -LIBBPF_ERRNO__RELOC;
} }
err = bpf_program__collect_reloc(prog, nr_maps, err = bpf_program__collect_reloc(prog,
shdr, data, shdr, data,
obj->efile.symbols, obj);
obj->efile.maps_shndx,
obj->maps);
if (err) if (err)
return err; return err;
} }
...@@ -1197,6 +1274,8 @@ bpf_object__load_progs(struct bpf_object *obj) ...@@ -1197,6 +1274,8 @@ bpf_object__load_progs(struct bpf_object *obj)
int err; int err;
for (i = 0; i < obj->nr_programs; i++) { for (i = 0; i < obj->nr_programs; i++) {
if (obj->programs[i].idx == obj->efile.text_shndx)
continue;
err = bpf_program__load(&obj->programs[i], err = bpf_program__load(&obj->programs[i],
obj->license, obj->license,
obj->kern_version); obj->kern_version);
...@@ -1859,7 +1938,7 @@ long libbpf_get_error(const void *ptr) ...@@ -1859,7 +1938,7 @@ long libbpf_get_error(const void *ptr)
int bpf_prog_load(const char *file, enum bpf_prog_type type, int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd) struct bpf_object **pobj, int *prog_fd)
{ {
struct bpf_program *prog; struct bpf_program *prog, *first_prog = NULL;
struct bpf_object *obj; struct bpf_object *obj;
int err; int err;
...@@ -1867,25 +1946,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, ...@@ -1867,25 +1946,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
if (IS_ERR(obj)) if (IS_ERR(obj))
return -ENOENT; return -ENOENT;
prog = bpf_program__next(NULL, obj); bpf_object__for_each_program(prog, obj) {
if (!prog) { /*
bpf_object__close(obj); * If type is not specified, try to guess it based on
return -ENOENT; * section name.
} */
/*
* If type is not specified, try to guess it based on
* section name.
*/
if (type == BPF_PROG_TYPE_UNSPEC) {
type = bpf_program__guess_type(prog);
if (type == BPF_PROG_TYPE_UNSPEC) { if (type == BPF_PROG_TYPE_UNSPEC) {
bpf_object__close(obj); type = bpf_program__guess_type(prog);
return -EINVAL; if (type == BPF_PROG_TYPE_UNSPEC) {
bpf_object__close(obj);
return -EINVAL;
}
} }
bpf_program__set_type(prog, type);
if (prog->idx != obj->efile.text_shndx && !first_prog)
first_prog = prog;
}
if (!first_prog) {
pr_warning("object file doesn't contain bpf program\n");
bpf_object__close(obj);
return -ENOENT;
} }
bpf_program__set_type(prog, type);
err = bpf_object__load(obj); err = bpf_object__load(obj);
if (err) { if (err) {
bpf_object__close(obj); bpf_object__close(obj);
...@@ -1893,6 +1977,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, ...@@ -1893,6 +1977,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
} }
*pobj = obj; *pobj = obj;
*prog_fd = bpf_program__fd(prog); *prog_fd = bpf_program__fd(first_prog);
return 0; return 0;
} }
...@@ -17,7 +17,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test ...@@ -17,7 +17,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
test_l4lb_noinline.o test_xdp_noinline.o
TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \ TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
test_offload.py test_offload.py
...@@ -49,8 +50,13 @@ else ...@@ -49,8 +50,13 @@ else
CPU ?= generic CPU ?= generic
endif endif
CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
-Wno-compare-distinct-pointer-types
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
%.o: %.c %.o: %.c
$(CLANG) -I. -I./include/uapi -I../../../include/uapi \ $(CLANG) $(CLANG_FLAGS) \
-Wno-compare-distinct-pointer-types \
-O2 -target bpf -emit-llvm -c $< -o - | \ -O2 -target bpf -emit-llvm -c $< -o - | \
$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
This diff is collapsed.
...@@ -169,10 +169,9 @@ static void test_xdp(void) ...@@ -169,10 +169,9 @@ static void test_xdp(void)
#define NUM_ITER 100000 #define NUM_ITER 100000
#define VIP_NUM 5 #define VIP_NUM 5
static void test_l4lb(void) static void test_l4lb(const char *file)
{ {
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
const char *file = "./test_l4lb.o";
struct vip key = {.protocol = 6}; struct vip key = {.protocol = 6};
struct vip_meta { struct vip_meta {
__u32 flags; __u32 flags;
...@@ -249,6 +248,95 @@ static void test_l4lb(void) ...@@ -249,6 +248,95 @@ static void test_l4lb(void)
bpf_object__close(obj); bpf_object__close(obj);
} }
static void test_l4lb_all(void)
{
const char *file1 = "./test_l4lb.o";
const char *file2 = "./test_l4lb_noinline.o";
test_l4lb(file1);
test_l4lb(file2);
}
static void test_xdp_noinline(void)
{
const char *file = "./test_xdp_noinline.o";
unsigned int nr_cpus = bpf_num_possible_cpus();
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
__u32 vip_num;
} value = {.vip_num = VIP_NUM};
__u32 stats_key = VIP_NUM;
struct vip_stats {
__u64 bytes;
__u64 pkts;
} stats[nr_cpus];
struct real_definition {
union {
__be32 dst;
__be32 dstv6[4];
};
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
__u32 duration, retval, size;
int err, i, prog_fd, map_fd;
__u64 bytes = 0, pkts = 0;
struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
if (err) {
error_cnt++;
return;
}
map_fd = bpf_find_map(__func__, obj, "vip_map");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &key, &value, 0);
map_fd = bpf_find_map(__func__, obj, "ch_rings");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
map_fd = bpf_find_map(__func__, obj, "reals");
if (map_fd < 0)
goto out;
bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
CHECK(err || errno || retval != 1 || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
CHECK(err || errno || retval != 1 || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
map_fd = bpf_find_map(__func__, obj, "stats");
if (map_fd < 0)
goto out;
bpf_map_lookup_elem(map_fd, &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
error_cnt++;
printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
}
out:
bpf_object__close(obj);
}
static void test_tcp_estats(void) static void test_tcp_estats(void)
{ {
const char *file = "./test_tcp_estats.o"; const char *file = "./test_tcp_estats.o";
...@@ -757,7 +845,8 @@ int main(void) ...@@ -757,7 +845,8 @@ int main(void)
test_pkt_access(); test_pkt_access();
test_xdp(); test_xdp();
test_l4lb(); test_l4lb_all();
test_xdp_noinline();
test_tcp_estats(); test_tcp_estats();
test_bpf_obj_id(); test_bpf_obj_id();
test_pkt_md_access(); test_pkt_md_access();
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment