Commit 4eb47198 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2019-11-24

The following pull-request contains BPF updates for your *net-next* tree.

We've added 27 non-merge commits during the last 4 day(s) which contain
a total of 50 files changed, 2031 insertions(+), 548 deletions(-).

The main changes are:

1) Optimize bpf_tail_call() from retpoline-ed indirect jump to direct jump,
   from Daniel.

2) Support global variables in libbpf, from Andrii.

3) Cleanup selftests with BPF_TRACE_x() macro, from Martin.

4) Fix devmap hash, from Toke.

5) Fix register bounds after 32-bit conditional jumps, from Yonghong.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5f04ed74 b553a6ec
...@@ -203,8 +203,9 @@ struct jit_context { ...@@ -203,8 +203,9 @@ struct jit_context {
/* Maximum number of bytes emitted while JITing one eBPF insn */ /* Maximum number of bytes emitted while JITing one eBPF insn */
#define BPF_MAX_INSN_SIZE 128 #define BPF_MAX_INSN_SIZE 128
#define BPF_INSN_SAFETY 64 #define BPF_INSN_SAFETY 64
/* number of bytes emit_call() needs to generate call instruction */
#define X86_CALL_SIZE 5 /* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
#define PROLOGUE_SIZE 25 #define PROLOGUE_SIZE 25
...@@ -215,7 +216,7 @@ struct jit_context { ...@@ -215,7 +216,7 @@ struct jit_context {
static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int cnt = X86_CALL_SIZE; int cnt = X86_PATCH_SIZE;
/* BPF trampoline can be made to work without these nops, /* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later * but let's waste 5 bytes for now and optimize later
...@@ -238,6 +239,89 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) ...@@ -238,6 +239,89 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
*pprog = prog; *pprog = prog;
} }
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
int cnt = 0;
s64 offset;
offset = func - (ip + X86_PATCH_SIZE);
if (!is_simm32(offset)) {
pr_err("Target call %p is out of range\n", func);
return -ERANGE;
}
EMIT1_off32(opcode, offset);
*pprog = prog;
return 0;
}
static int emit_call(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE8);
}
static int emit_jump(u8 **pprog, void *func, void *ip)
{
return emit_patch(pprog, func, ip, 0xE9);
}
static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr,
const bool text_live)
{
const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
u8 old_insn[X86_PATCH_SIZE];
u8 new_insn[X86_PATCH_SIZE];
u8 *prog;
int ret;
memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
if (old_addr) {
prog = old_insn;
ret = t == BPF_MOD_CALL ?
emit_call(&prog, old_addr, ip) :
emit_jump(&prog, old_addr, ip);
if (ret)
return ret;
}
memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
if (new_addr) {
prog = new_insn;
ret = t == BPF_MOD_CALL ?
emit_call(&prog, new_addr, ip) :
emit_jump(&prog, new_addr, ip);
if (ret)
return ret;
}
ret = -EBUSY;
mutex_lock(&text_mutex);
if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
if (text_live)
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
else
memcpy(ip, new_insn, X86_PATCH_SIZE);
}
ret = 0;
out:
mutex_unlock(&text_mutex);
return ret;
}
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
if (!is_kernel_text((long)ip) &&
!is_bpf_text_address((long)ip))
/* BPF poking in modules is not supported */
return -EINVAL;
return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
}
/* /*
* Generate the following code: * Generate the following code:
* *
...@@ -252,7 +336,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) ...@@ -252,7 +336,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
* goto *(prog->bpf_func + prologue_size); * goto *(prog->bpf_func + prologue_size);
* out: * out:
*/ */
static void emit_bpf_tail_call(u8 **pprog) static void emit_bpf_tail_call_indirect(u8 **pprog)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
int label1, label2, label3; int label1, label2, label3;
...@@ -319,6 +403,68 @@ static void emit_bpf_tail_call(u8 **pprog) ...@@ -319,6 +403,68 @@ static void emit_bpf_tail_call(u8 **pprog)
*pprog = prog; *pprog = prog;
} }
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
u8 **pprog, int addr, u8 *image)
{
u8 *prog = *pprog;
int cnt = 0;
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
EMIT2(X86_JA, 14); /* ja out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
poke->ip = image + (addr - X86_PATCH_SIZE);
poke->adj_off = PROLOGUE_SIZE;
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
/* out: */
*pprog = prog;
}
static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
{
struct bpf_jit_poke_descriptor *poke;
struct bpf_array *array;
struct bpf_prog *target;
int i, ret;
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
array = container_of(poke->tail_call.map, struct bpf_array, map);
mutex_lock(&array->aux->poke_mutex);
target = array->ptrs[poke->tail_call.key];
if (target) {
/* Plain memcpy is used when image is not live yet
* and still not locked as read-only. Once poke
* location is active (poke->ip_stable), any parallel
* bpf_arch_text_poke() might occur still on the
* read-write image until we finally locked it as
* read-only. Both modifications on the given image
* are under text_mutex to avoid interference.
*/
ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func +
poke->adj_off, false);
BUG_ON(ret < 0);
}
WRITE_ONCE(poke->ip_stable, true);
mutex_unlock(&array->aux->poke_mutex);
}
}
static void emit_mov_imm32(u8 **pprog, bool sign_propagate, static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
u32 dst_reg, const u32 imm32) u32 dst_reg, const u32 imm32)
{ {
...@@ -480,72 +626,6 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) ...@@ -480,72 +626,6 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
*pprog = prog; *pprog = prog;
} }
static int emit_call(u8 **pprog, void *func, void *ip)
{
u8 *prog = *pprog;
int cnt = 0;
s64 offset;
offset = func - (ip + X86_CALL_SIZE);
if (!is_simm32(offset)) {
pr_err("Target call %p is out of range\n", func);
return -EINVAL;
}
EMIT1_off32(0xE8, offset);
*pprog = prog;
return 0;
}
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr)
{
u8 old_insn[X86_CALL_SIZE] = {};
u8 new_insn[X86_CALL_SIZE] = {};
u8 *prog;
int ret;
if (!is_kernel_text((long)ip) &&
!is_bpf_text_address((long)ip))
/* BPF trampoline in modules is not supported */
return -EINVAL;
if (old_addr) {
prog = old_insn;
ret = emit_call(&prog, old_addr, (void *)ip);
if (ret)
return ret;
}
if (new_addr) {
prog = new_insn;
ret = emit_call(&prog, new_addr, (void *)ip);
if (ret)
return ret;
}
ret = -EBUSY;
mutex_lock(&text_mutex);
switch (t) {
case BPF_MOD_NOP_TO_CALL:
if (memcmp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE))
goto out;
text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL);
break;
case BPF_MOD_CALL_TO_CALL:
if (memcmp(ip, old_insn, X86_CALL_SIZE))
goto out;
text_poke_bp(ip, new_insn, X86_CALL_SIZE, NULL);
break;
case BPF_MOD_CALL_TO_NOP:
if (memcmp(ip, old_insn, X86_CALL_SIZE))
goto out;
text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE, NULL);
break;
}
ret = 0;
out:
mutex_unlock(&text_mutex);
return ret;
}
static bool ex_handler_bpf(const struct exception_table_entry *x, static bool ex_handler_bpf(const struct exception_table_entry *x,
struct pt_regs *regs, int trapnr, struct pt_regs *regs, int trapnr,
unsigned long error_code, unsigned long fault_addr) unsigned long error_code, unsigned long fault_addr)
...@@ -1013,7 +1093,11 @@ xadd: if (is_imm8(insn->off)) ...@@ -1013,7 +1093,11 @@ xadd: if (is_imm8(insn->off))
break; break;
case BPF_JMP | BPF_TAIL_CALL: case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(&prog); if (imm32)
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
&prog, addrs[i], image);
else
emit_bpf_tail_call_indirect(&prog);
break; break;
/* cond jump */ /* cond jump */
...@@ -1394,7 +1478,7 @@ int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags ...@@ -1394,7 +1478,7 @@ int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags
/* skip patched call instruction and point orig_call to actual /* skip patched call instruction and point orig_call to actual
* body of the kernel function. * body of the kernel function.
*/ */
orig_call += X86_CALL_SIZE; orig_call += X86_PATCH_SIZE;
prog = image; prog = image;
...@@ -1571,6 +1655,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -1571,6 +1655,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (image) { if (image) {
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
bpf_tail_call_direct_fixup(prog);
bpf_jit_binary_lock_ro(header); bpf_jit_binary_lock_ro(header);
} else { } else {
jit_data->addrs = addrs; jit_data->addrs = addrs;
......
...@@ -22,6 +22,7 @@ struct bpf_verifier_env; ...@@ -22,6 +22,7 @@ struct bpf_verifier_env;
struct bpf_verifier_log; struct bpf_verifier_log;
struct perf_event; struct perf_event;
struct bpf_prog; struct bpf_prog;
struct bpf_prog_aux;
struct bpf_map; struct bpf_map;
struct sock; struct sock;
struct seq_file; struct seq_file;
...@@ -64,6 +65,12 @@ struct bpf_map_ops { ...@@ -64,6 +65,12 @@ struct bpf_map_ops {
const struct btf_type *key_type, const struct btf_type *key_type,
const struct btf_type *value_type); const struct btf_type *value_type);
/* Prog poke tracking helpers. */
int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
struct bpf_prog *new);
/* Direct value access helpers. */ /* Direct value access helpers. */
int (*map_direct_value_addr)(const struct bpf_map *map, int (*map_direct_value_addr)(const struct bpf_map *map,
u64 *imm, u32 off); u64 *imm, u32 off);
...@@ -488,6 +495,24 @@ struct bpf_func_info_aux { ...@@ -488,6 +495,24 @@ struct bpf_func_info_aux {
bool unreliable; bool unreliable;
}; };
enum bpf_jit_poke_reason {
BPF_POKE_REASON_TAIL_CALL,
};
/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
void *ip;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
bool ip_stable;
u8 adj_off;
u16 reason;
};
struct bpf_prog_aux { struct bpf_prog_aux {
atomic64_t refcnt; atomic64_t refcnt;
u32 used_map_cnt; u32 used_map_cnt;
...@@ -513,6 +538,8 @@ struct bpf_prog_aux { ...@@ -513,6 +538,8 @@ struct bpf_prog_aux {
const char *attach_func_name; const char *attach_func_name;
struct bpf_prog **func; struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */ void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
u32 size_poke_tab;
struct latch_tree_node ksym_tnode; struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode; struct list_head ksym_lnode;
const struct bpf_prog_ops *ops; const struct bpf_prog_ops *ops;
...@@ -560,17 +587,26 @@ struct bpf_prog_aux { ...@@ -560,17 +587,26 @@ struct bpf_prog_aux {
}; };
}; };
struct bpf_array_aux {
/* 'Ownership' of prog array is claimed by the first program that
* is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
enum bpf_prog_type type;
bool jited;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
struct mutex poke_mutex;
struct work_struct work;
};
struct bpf_array { struct bpf_array {
struct bpf_map map; struct bpf_map map;
u32 elem_size; u32 elem_size;
u32 index_mask; u32 index_mask;
/* 'ownership' of prog_array is claimed by the first program that struct bpf_array_aux *aux;
* is going to use this map or by the first program which FD is stored
* in the map to make sure that all callers and callees have the same
* prog_type and JITed flag
*/
enum bpf_prog_type owner_prog_type;
bool owner_jited;
union { union {
char value[0] __aligned(8); char value[0] __aligned(8);
void *ptrs[0] __aligned(8); void *ptrs[0] __aligned(8);
...@@ -1031,6 +1067,10 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ...@@ -1031,6 +1067,10 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
{ {
return -ENOTSUPP; return -ENOTSUPP;
} }
static inline void bpf_map_put(struct bpf_map *map)
{
}
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
...@@ -1284,10 +1324,10 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, ...@@ -1284,10 +1324,10 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
enum bpf_text_poke_type { enum bpf_text_poke_type {
BPF_MOD_NOP_TO_CALL, BPF_MOD_CALL,
BPF_MOD_CALL_TO_CALL, BPF_MOD_JUMP,
BPF_MOD_CALL_TO_NOP,
}; };
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2); void *addr1, void *addr2);
......
...@@ -293,7 +293,7 @@ struct bpf_verifier_state_list { ...@@ -293,7 +293,7 @@ struct bpf_verifier_state_list {
struct bpf_insn_aux_data { struct bpf_insn_aux_data {
union { union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
unsigned long map_state; /* pointer/poison value for maps */ unsigned long map_ptr_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */ s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */ u32 alu_limit; /* limit for add/sub register with pointer */
struct { struct {
...@@ -301,6 +301,7 @@ struct bpf_insn_aux_data { ...@@ -301,6 +301,7 @@ struct bpf_insn_aux_data {
u32 map_off; /* offset from value base address */ u32 map_off; /* offset from value base address */
}; };
}; };
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */ bool seen; /* this insn was processed by the verifier */
......
...@@ -952,6 +952,9 @@ void *bpf_jit_alloc_exec(unsigned long size); ...@@ -952,6 +952,9 @@ void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr); void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp); void bpf_jit_free(struct bpf_prog *fp);
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke);
int bpf_jit_get_func_addr(const struct bpf_prog *prog, int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass, const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed); u64 *func_addr, bool *func_addr_fixed);
...@@ -1050,11 +1053,23 @@ static inline bool ebpf_jit_enabled(void) ...@@ -1050,11 +1053,23 @@ static inline bool ebpf_jit_enabled(void)
return false; return false;
} }
static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
{
return false;
}
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{ {
return false; return false;
} }
static inline int
bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke)
{
return -ENOTSUPP;
}
static inline void bpf_jit_free(struct bpf_prog *fp) static inline void bpf_jit_free(struct bpf_prog *fp)
{ {
bpf_prog_unlock_free(fp); bpf_prog_unlock_free(fp);
......
...@@ -586,10 +586,17 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, ...@@ -586,10 +586,17 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
if (IS_ERR(new_ptr)) if (IS_ERR(new_ptr))
return PTR_ERR(new_ptr); return PTR_ERR(new_ptr);
if (map->ops->map_poke_run) {
mutex_lock(&array->aux->poke_mutex);
old_ptr = xchg(array->ptrs + index, new_ptr); old_ptr = xchg(array->ptrs + index, new_ptr);
map->ops->map_poke_run(map, index, old_ptr, new_ptr);
mutex_unlock(&array->aux->poke_mutex);
} else {
old_ptr = xchg(array->ptrs + index, new_ptr);
}
if (old_ptr) if (old_ptr)
map->ops->map_fd_put_ptr(old_ptr); map->ops->map_fd_put_ptr(old_ptr);
return 0; return 0;
} }
...@@ -602,7 +609,15 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) ...@@ -602,7 +609,15 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
if (index >= array->map.max_entries) if (index >= array->map.max_entries)
return -E2BIG; return -E2BIG;
if (map->ops->map_poke_run) {
mutex_lock(&array->aux->poke_mutex);
old_ptr = xchg(array->ptrs + index, NULL);
map->ops->map_poke_run(map, index, old_ptr, NULL);
mutex_unlock(&array->aux->poke_mutex);
} else {
old_ptr = xchg(array->ptrs + index, NULL); old_ptr = xchg(array->ptrs + index, NULL);
}
if (old_ptr) { if (old_ptr) {
map->ops->map_fd_put_ptr(old_ptr); map->ops->map_fd_put_ptr(old_ptr);
return 0; return 0;
...@@ -671,17 +686,195 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, ...@@ -671,17 +686,195 @@ static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock(); rcu_read_unlock();
} }
struct prog_poke_elem {
struct list_head list;
struct bpf_prog_aux *aux;
};
static int prog_array_map_poke_track(struct bpf_map *map,
struct bpf_prog_aux *prog_aux)
{
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
int ret = 0;
aux = container_of(map, struct bpf_array, map)->aux;
mutex_lock(&aux->poke_mutex);
list_for_each_entry(elem, &aux->poke_progs, list) {
if (elem->aux == prog_aux)
goto out;
}
elem = kmalloc(sizeof(*elem), GFP_KERNEL);
if (!elem) {
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&elem->list);
/* We must track the program's aux info at this point in time
* since the program pointer itself may not be stable yet, see
* also comment in prog_array_map_poke_run().
*/
elem->aux = prog_aux;
list_add_tail(&elem->list, &aux->poke_progs);
out:
mutex_unlock(&aux->poke_mutex);
return ret;
}
static void prog_array_map_poke_untrack(struct bpf_map *map,
struct bpf_prog_aux *prog_aux)
{
struct prog_poke_elem *elem, *tmp;
struct bpf_array_aux *aux;
aux = container_of(map, struct bpf_array, map)->aux;
mutex_lock(&aux->poke_mutex);
list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
if (elem->aux == prog_aux) {
list_del_init(&elem->list);
kfree(elem);
break;
}
}
mutex_unlock(&aux->poke_mutex);
}
static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old,
struct bpf_prog *new)
{
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
aux = container_of(map, struct bpf_array, map)->aux;
WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
list_for_each_entry(elem, &aux->poke_progs, list) {
struct bpf_jit_poke_descriptor *poke;
int i, ret;
for (i = 0; i < elem->aux->size_poke_tab; i++) {
poke = &elem->aux->poke_tab[i];
/* Few things to be aware of:
*
* 1) We can only ever access aux in this context, but
* not aux->prog since it might not be stable yet and
* there could be danger of use after free otherwise.
* 2) Initially when we start tracking aux, the program
* is not JITed yet and also does not have a kallsyms
* entry. We skip these as poke->ip_stable is not
* active yet. The JIT will do the final fixup before
* setting it stable. The various poke->ip_stable are
* successively activated, so tail call updates can
* arrive from here while JIT is still finishing its
* final fixup for non-activated poke entries.
* 3) On program teardown, the program's kallsym entry gets
* removed out of RCU callback, but we can only untrack
* from sleepable context, therefore bpf_arch_text_poke()
* might not see that this is in BPF text section and
* bails out with -EINVAL. As these are unreachable since
* RCU grace period already passed, we simply skip them.
* 4) Also programs reaching refcount of zero while patching
* is in progress is okay since we're protected under
* poke_mutex and untrack the programs before the JIT
* buffer is freed. When we're still in the middle of
* patching and suddenly kallsyms entry of the program
* gets evicted, we just skip the rest which is fine due
* to point 3).
* 5) Any other error happening below from bpf_arch_text_poke()
* is a unexpected bug.
*/
if (!READ_ONCE(poke->ip_stable))
continue;
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
if (poke->tail_call.map != map ||
poke->tail_call.key != key)
continue;
ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
old ? (u8 *)old->bpf_func +
poke->adj_off : NULL,
new ? (u8 *)new->bpf_func +
poke->adj_off : NULL);
BUG_ON(ret < 0 && ret != -EINVAL);
}
}
}
static void prog_array_map_clear_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_array_aux,
work)->map;
bpf_fd_array_map_clear(map);
bpf_map_put(map);
}
static void prog_array_map_clear(struct bpf_map *map)
{
struct bpf_array_aux *aux = container_of(map, struct bpf_array,
map)->aux;
bpf_map_inc(map);
schedule_work(&aux->work);
}
static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
{
struct bpf_array_aux *aux;
struct bpf_map *map;
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
if (!aux)
return ERR_PTR(-ENOMEM);
INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex);
map = array_map_alloc(attr);
if (IS_ERR(map)) {
kfree(aux);
return map;
}
container_of(map, struct bpf_array, map)->aux = aux;
aux->map = map;
return map;
}
static void prog_array_map_free(struct bpf_map *map)
{
struct prog_poke_elem *elem, *tmp;
struct bpf_array_aux *aux;
aux = container_of(map, struct bpf_array, map)->aux;
list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
list_del_init(&elem->list);
kfree(elem);
}
kfree(aux);
fd_array_map_free(map);
}
const struct bpf_map_ops prog_array_map_ops = { const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check, .map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc, .map_alloc = prog_array_map_alloc,
.map_free = fd_array_map_free, .map_free = prog_array_map_free,
.map_poke_track = prog_array_map_poke_track,
.map_poke_untrack = prog_array_map_poke_untrack,
.map_poke_run = prog_array_map_poke_run,
.map_get_next_key = array_map_get_next_key, .map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem, .map_lookup_elem = fd_array_map_lookup_elem,
.map_delete_elem = fd_array_map_delete_elem, .map_delete_elem = fd_array_map_delete_elem,
.map_fd_get_ptr = prog_fd_array_get_ptr, .map_fd_get_ptr = prog_fd_array_get_ptr,
.map_fd_put_ptr = prog_fd_array_put_ptr, .map_fd_put_ptr = prog_fd_array_put_ptr,
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = bpf_fd_array_map_clear, .map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem, .map_seq_show_elem = prog_array_map_seq_show_elem,
}; };
......
...@@ -256,6 +256,7 @@ void __bpf_prog_free(struct bpf_prog *fp) ...@@ -256,6 +256,7 @@ void __bpf_prog_free(struct bpf_prog *fp)
{ {
if (fp->aux) { if (fp->aux) {
free_percpu(fp->aux->stats); free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab);
kfree(fp->aux); kfree(fp->aux);
} }
vfree(fp); vfree(fp);
...@@ -756,6 +757,39 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ...@@ -756,6 +757,39 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret; return ret;
} }
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke)
{
struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
static const u32 poke_tab_max = 1024;
u32 slot = prog->aux->size_poke_tab;
u32 size = slot + 1;
if (size > poke_tab_max)
return -ENOSPC;
if (poke->ip || poke->ip_stable || poke->adj_off)
return -EINVAL;
switch (poke->reason) {
case BPF_POKE_REASON_TAIL_CALL:
if (!poke->tail_call.map)
return -EINVAL;
break;
default:
return -EINVAL;
}
tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
if (!tab)
return -ENOMEM;
memcpy(&tab[slot], poke, sizeof(*poke));
prog->aux->size_poke_tab = size;
prog->aux->poke_tab = tab;
return slot;
}
static atomic_long_t bpf_jit_current; static atomic_long_t bpf_jit_current;
/* Can be overridden by an arch's JIT compiler if it has a custom, /* Can be overridden by an arch's JIT compiler if it has a custom,
...@@ -1691,18 +1725,17 @@ bool bpf_prog_array_compatible(struct bpf_array *array, ...@@ -1691,18 +1725,17 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
if (fp->kprobe_override) if (fp->kprobe_override)
return false; return false;
if (!array->owner_prog_type) { if (!array->aux->type) {
/* There's no owner yet where we could check for /* There's no owner yet where we could check for
* compatibility. * compatibility.
*/ */
array->owner_prog_type = fp->type; array->aux->type = fp->type;
array->owner_jited = fp->jited; array->aux->jited = fp->jited;
return true; return true;
} }
return array->owner_prog_type == fp->type && return array->aux->type == fp->type &&
array->owner_jited == fp->jited; array->aux->jited == fp->jited;
} }
static int bpf_check_tail_call(const struct bpf_prog *fp) static int bpf_check_tail_call(const struct bpf_prog *fp)
...@@ -2003,12 +2036,40 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, ...@@ -2003,12 +2036,40 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
: 0; : 0;
} }
static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux)
{
enum bpf_cgroup_storage_type stype;
for_each_cgroup_storage_type(stype) {
if (!aux->cgroup_storage[stype])
continue;
bpf_cgroup_storage_release(aux->prog,
aux->cgroup_storage[stype]);
}
}
static void bpf_free_used_maps(struct bpf_prog_aux *aux)
{
struct bpf_map *map;
int i;
bpf_free_cgroup_storage(aux);
for (i = 0; i < aux->used_map_cnt; i++) {
map = aux->used_maps[i];
if (map->ops->map_poke_untrack)
map->ops->map_poke_untrack(map, aux);
bpf_map_put(map);
}
kfree(aux->used_maps);
}
static void bpf_prog_free_deferred(struct work_struct *work) static void bpf_prog_free_deferred(struct work_struct *work)
{ {
struct bpf_prog_aux *aux; struct bpf_prog_aux *aux;
int i; int i;
aux = container_of(work, struct bpf_prog_aux, work); aux = container_of(work, struct bpf_prog_aux, work);
bpf_free_used_maps(aux);
if (bpf_prog_is_dev_bound(aux)) if (bpf_prog_is_dev_bound(aux))
bpf_prog_offload_destroy(aux->prog); bpf_prog_offload_destroy(aux->prog);
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
......
...@@ -74,7 +74,7 @@ struct bpf_dtab_netdev { ...@@ -74,7 +74,7 @@ struct bpf_dtab_netdev {
struct bpf_dtab { struct bpf_dtab {
struct bpf_map map; struct bpf_map map;
struct bpf_dtab_netdev **netdev_map; struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */
struct list_head __percpu *flush_list; struct list_head __percpu *flush_list;
struct list_head list; struct list_head list;
...@@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries) ...@@ -101,6 +101,12 @@ static struct hlist_head *dev_map_create_hash(unsigned int entries)
return hash; return hash;
} }
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
int idx)
{
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
{ {
int err, cpu; int err, cpu;
...@@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -120,8 +126,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr); bpf_map_init_from_attr(&dtab->map, attr);
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); cost = (u64) sizeof(struct list_head) * num_possible_cpus();
cost += sizeof(struct list_head) * num_possible_cpus();
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
...@@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -129,6 +134,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
if (!dtab->n_buckets) /* Overflow check */ if (!dtab->n_buckets) /* Overflow check */
return -EINVAL; return -EINVAL;
cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
} else {
cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
} }
/* if map size is larger than memlock limit, reject it */ /* if map size is larger than memlock limit, reject it */
...@@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ...@@ -143,24 +150,22 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu));
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_percpu;
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
if (!dtab->dev_index_head) if (!dtab->dev_index_head)
goto free_map_area; goto free_percpu;
spin_lock_init(&dtab->index_lock); spin_lock_init(&dtab->index_lock);
} else {
dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
goto free_percpu;
} }
return 0; return 0;
free_map_area:
bpf_map_area_free(dtab->netdev_map);
free_percpu: free_percpu:
free_percpu(dtab->flush_list); free_percpu(dtab->flush_list);
free_charge: free_charge:
...@@ -228,6 +233,24 @@ static void dev_map_free(struct bpf_map *map) ...@@ -228,6 +233,24 @@ static void dev_map_free(struct bpf_map *map)
cond_resched(); cond_resched();
} }
if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
for (i = 0; i < dtab->n_buckets; i++) {
struct bpf_dtab_netdev *dev;
struct hlist_head *head;
struct hlist_node *next;
head = dev_map_index_hash(dtab, i);
hlist_for_each_entry_safe(dev, next, head, index_hlist) {
hlist_del_rcu(&dev->index_hlist);
free_percpu(dev->bulkq);
dev_put(dev->dev);
kfree(dev);
}
}
kfree(dtab->dev_index_head);
} else {
for (i = 0; i < dtab->map.max_entries; i++) { for (i = 0; i < dtab->map.max_entries; i++) {
struct bpf_dtab_netdev *dev; struct bpf_dtab_netdev *dev;
...@@ -240,9 +263,10 @@ static void dev_map_free(struct bpf_map *map) ...@@ -240,9 +263,10 @@ static void dev_map_free(struct bpf_map *map)
kfree(dev); kfree(dev);
} }
free_percpu(dtab->flush_list);
bpf_map_area_free(dtab->netdev_map); bpf_map_area_free(dtab->netdev_map);
kfree(dtab->dev_index_head); }
free_percpu(dtab->flush_list);
kfree(dtab); kfree(dtab);
} }
...@@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) ...@@ -263,12 +287,6 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0; return 0;
} }
static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
int idx)
{
return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
}
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
{ {
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
......
...@@ -17,9 +17,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -17,9 +17,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
if (IS_ERR(inner_map)) if (IS_ERR(inner_map))
return inner_map; return inner_map;
/* prog_array->owner_prog_type and owner_jited /* prog_array->aux->{type,jited} is a runtime binding.
* is a runtime binding. Doing static check alone * Doing static check alone in the verifier is not enough.
* in the verifier is not enough.
*/ */
if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
......
...@@ -25,12 +25,13 @@ ...@@ -25,12 +25,13 @@
#include <linux/nospec.h> #include <linux/nospec.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
#define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
IS_FD_HASH(map))
#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) #define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY)
...@@ -389,13 +390,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -389,13 +390,12 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{ {
const struct bpf_map *map = filp->private_data; const struct bpf_map *map = filp->private_data;
const struct bpf_array *array; const struct bpf_array *array;
u32 owner_prog_type = 0; u32 type = 0, jited = 0;
u32 owner_jited = 0;
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
array = container_of(map, struct bpf_array, map); array = container_of(map, struct bpf_array, map);
owner_prog_type = array->owner_prog_type; type = array->aux->type;
owner_jited = array->owner_jited; jited = array->aux->jited;
} }
seq_printf(m, seq_printf(m,
...@@ -415,12 +415,9 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -415,12 +415,9 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
map->memory.pages * 1ULL << PAGE_SHIFT, map->memory.pages * 1ULL << PAGE_SHIFT,
map->id, map->id,
READ_ONCE(map->frozen)); READ_ONCE(map->frozen));
if (type) {
if (owner_prog_type) { seq_printf(m, "owner_prog_type:\t%u\n", type);
seq_printf(m, "owner_prog_type:\t%u\n", seq_printf(m, "owner_jited:\t%u\n", jited);
owner_prog_type);
seq_printf(m, "owner_jited:\t%u\n",
owner_jited);
} }
} }
#endif #endif
...@@ -881,7 +878,7 @@ static int map_lookup_elem(union bpf_attr *attr) ...@@ -881,7 +878,7 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_percpu_cgroup_storage_copy(map, key, value); err = bpf_percpu_cgroup_storage_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
err = bpf_stackmap_copy(map, key, value); err = bpf_stackmap_copy(map, key, value);
} else if (IS_FD_ARRAY(map)) { } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
err = bpf_fd_array_map_lookup_elem(map, key, value); err = bpf_fd_array_map_lookup_elem(map, key, value);
} else if (IS_FD_HASH(map)) { } else if (IS_FD_HASH(map)) {
err = bpf_fd_htab_map_lookup_elem(map, key, value); err = bpf_fd_htab_map_lookup_elem(map, key, value);
...@@ -1008,6 +1005,10 @@ static int map_update_elem(union bpf_attr *attr) ...@@ -1008,6 +1005,10 @@ static int map_update_elem(union bpf_attr *attr)
map->map_type == BPF_MAP_TYPE_SOCKMAP) { map->map_type == BPF_MAP_TYPE_SOCKMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags); err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out; goto out;
} else if (IS_FD_PROG_ARRAY(map)) {
err = bpf_fd_array_map_update_elem(map, f.file, key, value,
attr->flags);
goto out;
} }
/* must increment bpf_prog_active to avoid kprobe+bpf triggering from /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
...@@ -1090,6 +1091,9 @@ static int map_delete_elem(union bpf_attr *attr) ...@@ -1090,6 +1091,9 @@ static int map_delete_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) { if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_delete_elem(map, key); err = bpf_map_offload_delete_elem(map, key);
goto out; goto out;
} else if (IS_FD_PROG_ARRAY(map)) {
err = map->ops->map_delete_elem(map, key);
goto out;
} }
preempt_disable(); preempt_disable();
...@@ -1302,25 +1306,6 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) ...@@ -1302,25 +1306,6 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
return 0; return 0;
} }
/* drop refcnt on maps used by eBPF program and free auxilary data */
static void free_used_maps(struct bpf_prog_aux *aux)
{
enum bpf_cgroup_storage_type stype;
int i;
for_each_cgroup_storage_type(stype) {
if (!aux->cgroup_storage[stype])
continue;
bpf_cgroup_storage_release(aux->prog,
aux->cgroup_storage[stype]);
}
for (i = 0; i < aux->used_map_cnt; i++)
bpf_map_put(aux->used_maps[i]);
kfree(aux->used_maps);
}
int __bpf_prog_charge(struct user_struct *user, u32 pages) int __bpf_prog_charge(struct user_struct *user, u32 pages)
{ {
unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
...@@ -1415,7 +1400,6 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) ...@@ -1415,7 +1400,6 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
kvfree(aux->func_info); kvfree(aux->func_info);
kfree(aux->func_info_aux); kfree(aux->func_info_aux);
free_used_maps(aux);
bpf_prog_uncharge_memlock(aux->prog); bpf_prog_uncharge_memlock(aux->prog);
security_bpf_prog_free(aux); security_bpf_prog_free(aux);
bpf_prog_free(aux->prog); bpf_prog_free(aux->prog);
......
...@@ -77,7 +77,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -77,7 +77,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
int err; int err;
if (fentry_cnt + fexit_cnt == 0) { if (fentry_cnt + fexit_cnt == 0) {
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_NOP, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, NULL); old_image, NULL);
tr->selector = 0; tr->selector = 0;
goto out; goto out;
...@@ -105,12 +105,12 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) ...@@ -105,12 +105,12 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
if (tr->selector) if (tr->selector)
/* progs already running at this address */ /* progs already running at this address */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL_TO_CALL, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL,
old_image, new_image); old_image, new_image);
else else
/* first time registering */ /* first time registering */
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_NOP_TO_CALL, err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL,
NULL, new_image); new_image);
if (err) if (err)
goto out; goto out;
tr->selector++; tr->selector++;
......
...@@ -171,6 +171,9 @@ struct bpf_verifier_stack_elem { ...@@ -171,6 +171,9 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
#define BPF_COMPLEXITY_LIMIT_STATES 64 #define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_KEY_POISON (1ULL << 63)
#define BPF_MAP_KEY_SEEN (1ULL << 62)
#define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
POISON_POINTER_DELTA)) POISON_POINTER_DELTA))
...@@ -178,12 +181,12 @@ struct bpf_verifier_stack_elem { ...@@ -178,12 +181,12 @@ struct bpf_verifier_stack_elem {
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{ {
return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
} }
static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
{ {
return aux->map_state & BPF_MAP_PTR_UNPRIV; return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
} }
static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
...@@ -191,10 +194,33 @@ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, ...@@ -191,10 +194,33 @@ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
{ {
BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
unpriv |= bpf_map_ptr_unpriv(aux); unpriv |= bpf_map_ptr_unpriv(aux);
aux->map_state = (unsigned long)map | aux->map_ptr_state = (unsigned long)map |
(unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
} }
static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
{
return aux->map_key_state & BPF_MAP_KEY_POISON;
}
static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
{
return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
}
static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
{
return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
}
static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
{
bool poisoned = bpf_map_key_poisoned(aux);
aux->map_key_state = state | BPF_MAP_KEY_SEEN |
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
struct bpf_call_arg_meta { struct bpf_call_arg_meta {
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
bool raw_mode; bool raw_mode;
...@@ -1007,6 +1033,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg) ...@@ -1007,6 +1033,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
reg->umax_value)); reg->umax_value));
} }
static void __reg_bound_offset32(struct bpf_reg_state *reg)
{
u64 mask = 0xffffFFFF;
struct tnum range = tnum_range(reg->umin_value & mask,
reg->umax_value & mask);
struct tnum lo32 = tnum_cast(reg->var_off, 4);
struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
}
/* Reset the min/max bounds of a register */ /* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg) static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{ {
...@@ -4079,15 +4116,49 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, ...@@ -4079,15 +4116,49 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
return -EACCES; return -EACCES;
} }
if (!BPF_MAP_PTR(aux->map_state)) if (!BPF_MAP_PTR(aux->map_ptr_state))
bpf_map_ptr_store(aux, meta->map_ptr, bpf_map_ptr_store(aux, meta->map_ptr,
meta->map_ptr->unpriv_array); meta->map_ptr->unpriv_array);
else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
meta->map_ptr->unpriv_array); meta->map_ptr->unpriv_array);
return 0; return 0;
} }
static int
record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
int func_id, int insn_idx)
{
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
struct bpf_reg_state *regs = cur_regs(env), *reg;
struct bpf_map *map = meta->map_ptr;
struct tnum range;
u64 val;
if (func_id != BPF_FUNC_tail_call)
return 0;
if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
verbose(env, "kernel subsystem misconfigured verifier\n");
return -EINVAL;
}
range = tnum_range(0, map->max_entries - 1);
reg = &regs[BPF_REG_3];
if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
return 0;
}
val = reg->var_off.value;
if (bpf_map_key_unseen(aux))
bpf_map_key_store(aux, val);
else if (!bpf_map_key_poisoned(aux) &&
bpf_map_key_immediate(aux) != val)
bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
return 0;
}
static int check_reference_leak(struct bpf_verifier_env *env) static int check_reference_leak(struct bpf_verifier_env *env)
{ {
struct bpf_func_state *state = cur_func(env); struct bpf_func_state *state = cur_func(env);
...@@ -4162,6 +4233,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -4162,6 +4233,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (err) if (err)
return err; return err;
err = record_func_key(env, &meta, func_id, insn_idx);
if (err)
return err;
/* Mark slots with STACK_MISC in case of raw mode, stack offset /* Mark slots with STACK_MISC in case of raw mode, stack offset
* is inferred from register state. * is inferred from register state.
*/ */
...@@ -5589,6 +5664,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -5589,6 +5664,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */ /* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg); __reg_bound_offset(false_reg);
__reg_bound_offset(true_reg); __reg_bound_offset(true_reg);
if (is_jmp32) {
__reg_bound_offset32(false_reg);
__reg_bound_offset32(true_reg);
}
/* Intersecting with the old var_off might have improved our bounds /* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax. * then new var_off is (0; 0x7f...fc) which improves our umax.
...@@ -5698,6 +5777,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -5698,6 +5777,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */ /* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg); __reg_bound_offset(false_reg);
__reg_bound_offset(true_reg); __reg_bound_offset(true_reg);
if (is_jmp32) {
__reg_bound_offset32(false_reg);
__reg_bound_offset32(true_reg);
}
/* Intersecting with the old var_off might have improved our bounds /* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax. * then new var_off is (0; 0x7f...fc) which improves our umax.
...@@ -9046,6 +9129,7 @@ static int fixup_call_args(struct bpf_verifier_env *env) ...@@ -9046,6 +9129,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
static int fixup_bpf_calls(struct bpf_verifier_env *env) static int fixup_bpf_calls(struct bpf_verifier_env *env)
{ {
struct bpf_prog *prog = env->prog; struct bpf_prog *prog = env->prog;
bool expect_blinding = bpf_jit_blinding_enabled(prog);
struct bpf_insn *insn = prog->insnsi; struct bpf_insn *insn = prog->insnsi;
const struct bpf_func_proto *fn; const struct bpf_func_proto *fn;
const int insn_cnt = prog->len; const int insn_cnt = prog->len;
...@@ -9054,7 +9138,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -9054,7 +9138,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
struct bpf_insn insn_buf[16]; struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
int i, cnt, delta = 0; int i, ret, cnt, delta = 0;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
...@@ -9198,6 +9282,26 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -9198,6 +9282,26 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->code = BPF_JMP | BPF_TAIL_CALL; insn->code = BPF_JMP | BPF_TAIL_CALL;
aux = &env->insn_aux_data[i + delta]; aux = &env->insn_aux_data[i + delta];
if (prog->jit_requested && !expect_blinding &&
!bpf_map_key_poisoned(aux) &&
!bpf_map_ptr_poisoned(aux) &&
!bpf_map_ptr_unpriv(aux)) {
struct bpf_jit_poke_descriptor desc = {
.reason = BPF_POKE_REASON_TAIL_CALL,
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
.tail_call.key = bpf_map_key_immediate(aux),
};
ret = bpf_jit_add_poke_descriptor(prog, &desc);
if (ret < 0) {
verbose(env, "adding tail call poke descriptor failed\n");
return ret;
}
insn->imm = ret + 1;
continue;
}
if (!bpf_map_ptr_unpriv(aux)) if (!bpf_map_ptr_unpriv(aux))
continue; continue;
...@@ -9212,7 +9316,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -9212,7 +9316,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
return -EINVAL; return -EINVAL;
} }
map_ptr = BPF_MAP_PTR(aux->map_state); map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
map_ptr->max_entries, 2); map_ptr->max_entries, 2);
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
...@@ -9246,7 +9350,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -9246,7 +9350,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (bpf_map_ptr_poisoned(aux)) if (bpf_map_ptr_poisoned(aux))
goto patch_call_imm; goto patch_call_imm;
map_ptr = BPF_MAP_PTR(aux->map_state); map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
ops = map_ptr->ops; ops = map_ptr->ops;
if (insn->imm == BPF_FUNC_map_lookup_elem && if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) { ops->map_gen_lookup) {
...@@ -9326,6 +9430,23 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) ...@@ -9326,6 +9430,23 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->imm = fn->func - __bpf_call_base; insn->imm = fn->func - __bpf_call_base;
} }
/* Since poke tab is now finalized, publish aux to tracker. */
for (i = 0; i < prog->aux->size_poke_tab; i++) {
map_ptr = prog->aux->poke_tab[i].tail_call.map;
if (!map_ptr->ops->map_poke_track ||
!map_ptr->ops->map_poke_untrack ||
!map_ptr->ops->map_poke_run) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
if (ret < 0) {
verbose(env, "tracking tail call prog failed\n");
return ret;
}
}
return 0; return 0;
} }
......
...@@ -447,10 +447,10 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ...@@ -447,10 +447,10 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return __xsk_sendmsg(sk); return __xsk_sendmsg(sk);
} }
static unsigned int xsk_poll(struct file *file, struct socket *sock, static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait) struct poll_table_struct *wait)
{ {
unsigned int mask = datagram_poll(file, sock, wait); __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk); struct xdp_sock *xs = xdp_sk(sk);
struct net_device *dev; struct net_device *dev;
...@@ -472,9 +472,9 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock, ...@@ -472,9 +472,9 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
} }
if (xs->rx && !xskq_empty_desc(xs->rx)) if (xs->rx && !xskq_empty_desc(xs->rx))
mask |= POLLIN | POLLRDNORM; mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && !xskq_full_desc(xs->tx)) if (xs->tx && !xskq_full_desc(xs->tx))
mask |= POLLOUT | POLLWRNORM; mask |= EPOLLOUT | EPOLLWRNORM;
return mask; return mask;
} }
......
...@@ -16,7 +16,13 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include ...@@ -16,7 +16,13 @@ CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
# isn't set and when invoked from selftests build, where srctree # isn't set and when invoked from selftests build, where srctree
# is set to ".". building_out_of_srctree is undefined for in srctree # is set to ".". building_out_of_srctree is undefined for in srctree
# builds # builds
ifeq ($(srctree),)
update_srctree := 1
endif
ifndef building_out_of_srctree ifndef building_out_of_srctree
update_srctree := 1
endif
ifeq ($(update_srctree),1)
srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree)))
endif endif
......
...@@ -428,15 +428,15 @@ static struct btf *btf__parse_raw(const char *file) ...@@ -428,15 +428,15 @@ static struct btf *btf__parse_raw(const char *file)
static bool is_btf_raw(const char *file) static bool is_btf_raw(const char *file)
{ {
__u16 magic = 0; __u16 magic = 0;
int fd; int fd, nb_read;
fd = open(file, O_RDONLY); fd = open(file, O_RDONLY);
if (fd < 0) if (fd < 0)
return false; return false;
read(fd, &magic, sizeof(magic)); nb_read = read(fd, &magic, sizeof(magic));
close(fd); close(fd);
return magic == BTF_MAGIC; return nb_read == sizeof(magic) && magic == BTF_MAGIC;
} }
static int do_dump(int argc, char **argv) static int do_dump(int argc, char **argv)
......
...@@ -44,17 +44,4 @@ enum libbpf_pin_type { ...@@ -44,17 +44,4 @@ enum libbpf_pin_type {
LIBBPF_PIN_BY_NAME, LIBBPF_PIN_BY_NAME,
}; };
/* The following types should be used by BPF_PROG_TYPE_TRACING program to
* access kernel function arguments. BPF trampoline and raw tracepoints
* typecast arguments to 'unsigned long long'.
*/
typedef int __attribute__((aligned(8))) ks32;
typedef char __attribute__((aligned(8))) ks8;
typedef short __attribute__((aligned(8))) ks16;
typedef long long __attribute__((aligned(8))) ks64;
typedef unsigned int __attribute__((aligned(8))) ku32;
typedef unsigned char __attribute__((aligned(8))) ku8;
typedef unsigned short __attribute__((aligned(8))) ku16;
typedef unsigned long long __attribute__((aligned(8))) ku64;
#endif #endif
This diff is collapsed.
...@@ -161,7 +161,7 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h ...@@ -161,7 +161,7 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
define CLANG_BPF_BUILD_RULE define CLANG_BPF_BUILD_RULE
($(CLANG) $3 -O2 -target bpf -emit-llvm \ ($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \ -c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2 $(LLC) -mattr=dwarfris -march=bpf -mcpu=probe $4 -filetype=obj -o $2
endef endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE define CLANG_NOALU32_BPF_BUILD_RULE
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BPF_TRACE_HELPERS_H
#define __BPF_TRACE_HELPERS_H
#include "bpf_helpers.h"
#define __BPF_MAP_0(i, m, v, ...) v
#define __BPF_MAP_1(i, m, v, t, a, ...) m(t, a, ctx[i])
#define __BPF_MAP_2(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_1(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_3(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_2(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_4(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_3(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_5(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_4(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_6(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_5(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_7(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_6(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_8(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_7(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_9(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_8(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_10(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_9(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_11(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_10(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP_12(i, m, v, t, a, ...) m(t, a, ctx[i]), __BPF_MAP_11(i+1, m, v, __VA_ARGS__)
#define __BPF_MAP(n, ...) __BPF_MAP_##n(0, __VA_ARGS__)
/* BPF sizeof(void *) is always 8, so no need to cast to long first
* for ptr to avoid compiler warning.
*/
#define __BPF_CAST(t, a, ctx) (t) ctx
#define __BPF_V void
#define __BPF_N
#define __BPF_DECL_ARGS(t, a, ctx) t a
#define BPF_TRACE_x(x, sec_name, fname, ret_type, ...) \
static __always_inline ret_type \
____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
\
SEC(sec_name) \
ret_type fname(__u64 *ctx) \
{ \
return ____##fname(__BPF_MAP(x, __BPF_CAST, __BPF_N, __VA_ARGS__));\
} \
\
static __always_inline \
ret_type ____##fname(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
#define BPF_TRACE_0(sec, fname, ...) BPF_TRACE_x(0, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_1(sec, fname, ...) BPF_TRACE_x(1, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_2(sec, fname, ...) BPF_TRACE_x(2, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_3(sec, fname, ...) BPF_TRACE_x(3, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_4(sec, fname, ...) BPF_TRACE_x(4, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_5(sec, fname, ...) BPF_TRACE_x(5, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_6(sec, fname, ...) BPF_TRACE_x(6, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_7(sec, fname, ...) BPF_TRACE_x(7, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_8(sec, fname, ...) BPF_TRACE_x(8, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_9(sec, fname, ...) BPF_TRACE_x(9, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_10(sec, fname, ...) BPF_TRACE_x(10, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_11(sec, fname, ...) BPF_TRACE_x(11, sec, fname, int, __VA_ARGS__)
#define BPF_TRACE_12(sec, fname, ...) BPF_TRACE_x(12, sec, fname, int, __VA_ARGS__)
#endif
...@@ -15,6 +15,8 @@ static int libbpf_debug_print(enum libbpf_print_level level, ...@@ -15,6 +15,8 @@ static int libbpf_debug_print(enum libbpf_print_level level,
return 0; return 0;
} }
extern int extra_prog_load_log_flags;
static int check_load(const char *file, enum bpf_prog_type type) static int check_load(const char *file, enum bpf_prog_type type)
{ {
struct bpf_prog_load_attr attr; struct bpf_prog_load_attr attr;
...@@ -24,7 +26,7 @@ static int check_load(const char *file, enum bpf_prog_type type) ...@@ -24,7 +26,7 @@ static int check_load(const char *file, enum bpf_prog_type type)
memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
attr.file = file; attr.file = file;
attr.prog_type = type; attr.prog_type = type;
attr.log_level = 4; attr.log_level = 4 | extra_prog_load_log_flags;
attr.prog_flags = BPF_F_TEST_RND_HI32; attr.prog_flags = BPF_F_TEST_RND_HI32;
err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
bpf_object__close(obj); bpf_object__close(obj);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <test_progs.h> #include <test_progs.h>
#include "progs/core_reloc_types.h" #include "progs/core_reloc_types.h"
#include <sys/mman.h> #include <sys/mman.h>
#include <sys/syscall.h>
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) #define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
...@@ -452,6 +453,7 @@ static struct core_reloc_test_case test_cases[] = { ...@@ -452,6 +453,7 @@ static struct core_reloc_test_case test_cases[] = {
struct data { struct data {
char in[256]; char in[256];
char out[256]; char out[256];
uint64_t my_pid_tgid;
}; };
static size_t roundup_page(size_t sz) static size_t roundup_page(size_t sz)
...@@ -471,9 +473,12 @@ void test_core_reloc(void) ...@@ -471,9 +473,12 @@ void test_core_reloc(void)
struct bpf_map *data_map; struct bpf_map *data_map;
struct bpf_program *prog; struct bpf_program *prog;
struct bpf_object *obj; struct bpf_object *obj;
uint64_t my_pid_tgid;
struct data *data; struct data *data;
void *mmap_data = NULL; void *mmap_data = NULL;
my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32);
for (i = 0; i < ARRAY_SIZE(test_cases); i++) { for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
test_case = &test_cases[i]; test_case = &test_cases[i];
if (!test__start_subtest(test_case->case_name)) if (!test__start_subtest(test_case->case_name))
...@@ -517,11 +522,6 @@ void test_core_reloc(void) ...@@ -517,11 +522,6 @@ void test_core_reloc(void)
goto cleanup; goto cleanup;
} }
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
PTR_ERR(link)))
goto cleanup;
data_map = bpf_object__find_map_by_name(obj, "test_cor.bss"); data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
if (CHECK(!data_map, "find_data_map", "data map not found\n")) if (CHECK(!data_map, "find_data_map", "data map not found\n"))
goto cleanup; goto cleanup;
...@@ -537,6 +537,12 @@ void test_core_reloc(void) ...@@ -537,6 +537,12 @@ void test_core_reloc(void)
memset(mmap_data, 0, sizeof(*data)); memset(mmap_data, 0, sizeof(*data));
memcpy(data->in, test_case->input, test_case->input_len); memcpy(data->in, test_case->input, test_case->input_len);
data->my_pid_tgid = my_pid_tgid;
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
PTR_ERR(link)))
goto cleanup;
/* trigger test run */ /* trigger test run */
usleep(1); usleep(1);
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019 Facebook */
#define _GNU_SOURCE
#include <sched.h>
#include <test_progs.h>
#define MAX_CNT 100000
static __u64 time_get_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ull + ts.tv_nsec;
}
static int test_task_rename(const char *prog)
{
int i, fd, duration = 0, err;
char buf[] = "test\n";
__u64 start_time;
fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
if (CHECK(fd < 0, "open /proc", "err %d", errno))
return -1;
start_time = time_get_ns();
for (i = 0; i < MAX_CNT; i++) {
err = write(fd, buf, sizeof(buf));
if (err < 0) {
CHECK(err < 0, "task rename", "err %d", errno);
close(fd);
return -1;
}
}
printf("task_rename %s\t%lluK events per sec\n", prog,
MAX_CNT * 1000000ll / (time_get_ns() - start_time));
close(fd);
return 0;
}
static void test_run(const char *prog)
{
test_task_rename(prog);
}
static void setaffinity(void)
{
cpu_set_t cpuset;
int cpu = 0;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
sched_setaffinity(0, sizeof(cpuset), &cpuset);
}
void test_test_overhead(void)
{
const char *kprobe_name = "kprobe/__set_task_comm";
const char *kretprobe_name = "kretprobe/__set_task_comm";
const char *raw_tp_name = "raw_tp/task_rename";
const char *fentry_name = "fentry/__set_task_comm";
const char *fexit_name = "fexit/__set_task_comm";
const char *kprobe_func = "__set_task_comm";
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
struct bpf_program *fentry_prog, *fexit_prog;
struct bpf_object *obj;
struct bpf_link *link;
int err, duration = 0;
obj = bpf_object__open_file("./test_overhead.o", NULL);
if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj)))
return;
kprobe_prog = bpf_object__find_program_by_title(obj, kprobe_name);
if (CHECK(!kprobe_prog, "find_probe",
"prog '%s' not found\n", kprobe_name))
goto cleanup;
kretprobe_prog = bpf_object__find_program_by_title(obj, kretprobe_name);
if (CHECK(!kretprobe_prog, "find_probe",
"prog '%s' not found\n", kretprobe_name))
goto cleanup;
raw_tp_prog = bpf_object__find_program_by_title(obj, raw_tp_name);
if (CHECK(!raw_tp_prog, "find_probe",
"prog '%s' not found\n", raw_tp_name))
goto cleanup;
fentry_prog = bpf_object__find_program_by_title(obj, fentry_name);
if (CHECK(!fentry_prog, "find_probe",
"prog '%s' not found\n", fentry_name))
goto cleanup;
fexit_prog = bpf_object__find_program_by_title(obj, fexit_name);
if (CHECK(!fexit_prog, "find_probe",
"prog '%s' not found\n", fexit_name))
goto cleanup;
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
setaffinity();
/* base line run */
test_run("base");
/* attach kprobe */
link = bpf_program__attach_kprobe(kprobe_prog, false /* retprobe */,
kprobe_func);
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
goto cleanup;
test_run("kprobe");
bpf_link__destroy(link);
/* attach kretprobe */
link = bpf_program__attach_kprobe(kretprobe_prog, true /* retprobe */,
kprobe_func);
if (CHECK(IS_ERR(link), "attach kretprobe", "err %ld\n", PTR_ERR(link)))
goto cleanup;
test_run("kretprobe");
bpf_link__destroy(link);
/* attach raw_tp */
link = bpf_program__attach_raw_tracepoint(raw_tp_prog, "task_rename");
if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
goto cleanup;
test_run("raw_tp");
bpf_link__destroy(link);
/* attach fentry */
link = bpf_program__attach_trace(fentry_prog);
if (CHECK(IS_ERR(link), "attach fentry", "err %ld\n", PTR_ERR(link)))
goto cleanup;
test_run("fentry");
bpf_link__destroy(link);
/* attach fexit */
link = bpf_program__attach_trace(fexit_prog);
if (CHECK(IS_ERR(link), "attach fexit", "err %ld\n", PTR_ERR(link)))
goto cleanup;
test_run("fexit");
bpf_link__destroy(link);
cleanup:
bpf_object__close(obj);
}
...@@ -2,89 +2,53 @@ ...@@ -2,89 +2,53 @@
/* Copyright (c) 2019 Facebook */ /* Copyright (c) 2019 Facebook */
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_trace_helpers.h"
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
struct test1 {
ks32 a;
};
static volatile __u64 test1_result; static volatile __u64 test1_result;
SEC("fentry/bpf_fentry_test1") BPF_TRACE_1("fentry/bpf_fentry_test1", test1, int, a)
int test1(struct test1 *ctx)
{ {
test1_result = ctx->a == 1; test1_result = a == 1;
return 0; return 0;
} }
struct test2 {
ks32 a;
ku64 b;
};
static volatile __u64 test2_result; static volatile __u64 test2_result;
SEC("fentry/bpf_fentry_test2") BPF_TRACE_2("fentry/bpf_fentry_test2", test2, int, a, __u64, b)
int test2(struct test2 *ctx)
{ {
test2_result = ctx->a == 2 && ctx->b == 3; test2_result = a == 2 && b == 3;
return 0; return 0;
} }
struct test3 {
ks8 a;
ks32 b;
ku64 c;
};
static volatile __u64 test3_result; static volatile __u64 test3_result;
SEC("fentry/bpf_fentry_test3") BPF_TRACE_3("fentry/bpf_fentry_test3", test3, char, a, int, b, __u64, c)
int test3(struct test3 *ctx)
{ {
test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6; test3_result = a == 4 && b == 5 && c == 6;
return 0; return 0;
} }
struct test4 {
void *a;
ks8 b;
ks32 c;
ku64 d;
};
static volatile __u64 test4_result; static volatile __u64 test4_result;
SEC("fentry/bpf_fentry_test4") BPF_TRACE_4("fentry/bpf_fentry_test4", test4,
int test4(struct test4 *ctx) void *, a, char, b, int, c, __u64, d)
{ {
test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10;
ctx->d == 10;
return 0; return 0;
} }
struct test5 {
ku64 a;
void *b;
ks16 c;
ks32 d;
ku64 e;
};
static volatile __u64 test5_result; static volatile __u64 test5_result;
SEC("fentry/bpf_fentry_test5") BPF_TRACE_5("fentry/bpf_fentry_test5", test5,
int test5(struct test5 *ctx) __u64, a, void *, b, short, c, int, d, __u64, e)
{ {
test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
ctx->d == 14 && ctx->e == 15; e == 15;
return 0; return 0;
} }
struct test6 {
ku64 a;
void *b;
ks16 c;
ks32 d;
void *e;
ks64 f;
};
static volatile __u64 test6_result; static volatile __u64 test6_result;
SEC("fentry/bpf_fentry_test6") BPF_TRACE_6("fentry/bpf_fentry_test6", test6,
int test6(struct test6 *ctx) __u64, a, void *, b, short, c, int, d, void *, e, __u64, f)
{ {
test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21; e == (void *)20 && f == 21;
return 0; return 0;
} }
...@@ -2,46 +2,37 @@ ...@@ -2,46 +2,37 @@
/* Copyright (c) 2019 Facebook */ /* Copyright (c) 2019 Facebook */
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_trace_helpers.h"
struct sk_buff { struct sk_buff {
unsigned int len; unsigned int len;
}; };
struct args {
struct sk_buff *skb;
ks32 ret;
};
static volatile __u64 test_result; static volatile __u64 test_result;
SEC("fexit/test_pkt_access") BPF_TRACE_2("fexit/test_pkt_access", test_main,
int test_main(struct args *ctx) struct sk_buff *, skb, int, ret)
{ {
struct sk_buff *skb = ctx->skb;
int len; int len;
__builtin_preserve_access_index(({ __builtin_preserve_access_index(({
len = skb->len; len = skb->len;
})); }));
if (len != 74 || ctx->ret != 0) if (len != 74 || ret != 0)
return 0; return 0;
test_result = 1; test_result = 1;
return 0; return 0;
} }
struct args_subprog1 {
struct sk_buff *skb;
ks32 ret;
};
static volatile __u64 test_result_subprog1; static volatile __u64 test_result_subprog1;
SEC("fexit/test_pkt_access_subprog1") BPF_TRACE_2("fexit/test_pkt_access_subprog1", test_subprog1,
int test_subprog1(struct args_subprog1 *ctx) struct sk_buff *, skb, int, ret)
{ {
struct sk_buff *skb = ctx->skb;
int len; int len;
__builtin_preserve_access_index(({ __builtin_preserve_access_index(({
len = skb->len; len = skb->len;
})); }));
if (len != 74 || ctx->ret != 148) if (len != 74 || ret != 148)
return 0; return 0;
test_result_subprog1 = 1; test_result_subprog1 = 1;
return 0; return 0;
...@@ -62,8 +53,8 @@ int test_subprog1(struct args_subprog1 *ctx) ...@@ -62,8 +53,8 @@ int test_subprog1(struct args_subprog1 *ctx)
* instead of accurate types. * instead of accurate types.
*/ */
struct args_subprog2 { struct args_subprog2 {
ku64 args[5]; __u64 args[5];
ku64 ret; __u64 ret;
}; };
static volatile __u64 test_result_subprog2; static volatile __u64 test_result_subprog2;
SEC("fexit/test_pkt_access_subprog2") SEC("fexit/test_pkt_access_subprog2")
......
...@@ -2,97 +2,56 @@ ...@@ -2,97 +2,56 @@
/* Copyright (c) 2019 Facebook */ /* Copyright (c) 2019 Facebook */
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_trace_helpers.h"
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
struct test1 {
ks32 a;
ks32 ret;
};
static volatile __u64 test1_result; static volatile __u64 test1_result;
SEC("fexit/bpf_fentry_test1") BPF_TRACE_2("fexit/bpf_fentry_test1", test1, int, a, int, ret)
int test1(struct test1 *ctx)
{ {
test1_result = ctx->a == 1 && ctx->ret == 2; test1_result = a == 1 && ret == 2;
return 0; return 0;
} }
struct test2 {
ks32 a;
ku64 b;
ks32 ret;
};
static volatile __u64 test2_result; static volatile __u64 test2_result;
SEC("fexit/bpf_fentry_test2") BPF_TRACE_3("fexit/bpf_fentry_test2", test2, int, a, __u64, b, int, ret)
int test2(struct test2 *ctx)
{ {
test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5; test2_result = a == 2 && b == 3 && ret == 5;
return 0; return 0;
} }
struct test3 {
ks8 a;
ks32 b;
ku64 c;
ks32 ret;
};
static volatile __u64 test3_result; static volatile __u64 test3_result;
SEC("fexit/bpf_fentry_test3") BPF_TRACE_4("fexit/bpf_fentry_test3", test3, char, a, int, b, __u64, c, int, ret)
int test3(struct test3 *ctx)
{ {
test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 && test3_result = a == 4 && b == 5 && c == 6 && ret == 15;
ctx->ret == 15;
return 0; return 0;
} }
struct test4 {
void *a;
ks8 b;
ks32 c;
ku64 d;
ks32 ret;
};
static volatile __u64 test4_result; static volatile __u64 test4_result;
SEC("fexit/bpf_fentry_test4") BPF_TRACE_5("fexit/bpf_fentry_test4", test4,
int test4(struct test4 *ctx) void *, a, char, b, int, c, __u64, d, int, ret)
{ {
test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 &&
ctx->d == 10 && ctx->ret == 34; test4_result = a == (void *)7 && b == 8 && c == 9 && d == 10 &&
ret == 34;
return 0; return 0;
} }
struct test5 {
ku64 a;
void *b;
ks16 c;
ks32 d;
ku64 e;
ks32 ret;
};
static volatile __u64 test5_result; static volatile __u64 test5_result;
SEC("fexit/bpf_fentry_test5") BPF_TRACE_6("fexit/bpf_fentry_test5", test5,
int test5(struct test5 *ctx) __u64, a, void *, b, short, c, int, d, __u64, e, int, ret)
{ {
test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && test5_result = a == 11 && b == (void *)12 && c == 13 && d == 14 &&
ctx->d == 14 && ctx->e == 15 && ctx->ret == 65; e == 15 && ret == 65;
return 0; return 0;
} }
struct test6 {
ku64 a;
void *b;
ks16 c;
ks32 d;
void *e;
ks64 f;
ks32 ret;
};
static volatile __u64 test6_result; static volatile __u64 test6_result;
SEC("fexit/bpf_fentry_test6") BPF_TRACE_7("fexit/bpf_fentry_test6", test6,
int test6(struct test6 *ctx) __u64, a, void *, b, short, c, int, d, void *, e, __u64, f,
int, ret)
{ {
test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && test6_result = a == 16 && b == (void *)17 && c == 18 && d == 19 &&
ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 && e == (void *)20 && f == 21 && ret == 111;
ctx->ret == 111;
return 0; return 0;
} }
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <stdbool.h> #include <stdbool.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
#include "bpf_endian.h" #include "bpf_endian.h"
#include "bpf_trace_helpers.h"
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
struct { struct {
...@@ -47,28 +48,18 @@ struct sk_buff { ...@@ -47,28 +48,18 @@ struct sk_buff {
char cb[48]; char cb[48];
}; };
/* copy arguments from
* include/trace/events/skb.h:
* TRACE_EVENT(kfree_skb,
* TP_PROTO(struct sk_buff *skb, void *location),
*
* into struct below:
*/
struct trace_kfree_skb {
struct sk_buff *skb;
void *location;
};
struct meta { struct meta {
int ifindex; int ifindex;
__u32 cb32_0; __u32 cb32_0;
__u8 cb8_0; __u8 cb8_0;
}; };
SEC("tp_btf/kfree_skb") /* TRACE_EVENT(kfree_skb,
int trace_kfree_skb(struct trace_kfree_skb *ctx) * TP_PROTO(struct sk_buff *skb, void *location),
*/
BPF_TRACE_2("tp_btf/kfree_skb", trace_kfree_skb,
struct sk_buff *, skb, void *, location)
{ {
struct sk_buff *skb = ctx->skb;
struct net_device *dev; struct net_device *dev;
struct callback_head *ptr; struct callback_head *ptr;
void *func; void *func;
...@@ -123,17 +114,10 @@ static volatile struct { ...@@ -123,17 +114,10 @@ static volatile struct {
bool fexit_test_ok; bool fexit_test_ok;
} result; } result;
struct eth_type_trans_args { BPF_TRACE_3("fentry/eth_type_trans", fentry_eth_type_trans,
struct sk_buff *skb; struct sk_buff *, skb, struct net_device *, dev,
struct net_device *dev; unsigned short, protocol)
unsigned short protocol; /* return value available to fexit progs */
};
SEC("fentry/eth_type_trans")
int fentry_eth_type_trans(struct eth_type_trans_args *ctx)
{ {
struct sk_buff *skb = ctx->skb;
struct net_device *dev = ctx->dev;
int len, ifindex; int len, ifindex;
__builtin_preserve_access_index(({ __builtin_preserve_access_index(({
...@@ -148,11 +132,10 @@ int fentry_eth_type_trans(struct eth_type_trans_args *ctx) ...@@ -148,11 +132,10 @@ int fentry_eth_type_trans(struct eth_type_trans_args *ctx)
return 0; return 0;
} }
SEC("fexit/eth_type_trans") BPF_TRACE_3("fexit/eth_type_trans", fexit_eth_type_trans,
int fexit_eth_type_trans(struct eth_type_trans_args *ctx) struct sk_buff *, skb, struct net_device *, dev,
unsigned short, protocol)
{ {
struct sk_buff *skb = ctx->skb;
struct net_device *dev = ctx->dev;
int len, ifindex; int len, ifindex;
__builtin_preserve_access_index(({ __builtin_preserve_access_index(({
...@@ -163,7 +146,7 @@ int fexit_eth_type_trans(struct eth_type_trans_args *ctx) ...@@ -163,7 +146,7 @@ int fexit_eth_type_trans(struct eth_type_trans_args *ctx)
/* fexit sees packet without L2 header that eth_type_trans should have /* fexit sees packet without L2 header that eth_type_trans should have
* consumed. * consumed.
*/ */
if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1) if (len != 60 || protocol != bpf_htons(0x86dd) || ifindex != 1)
return 0; return 0;
result.fexit_test_ok = true; result.fexit_test_ok = true;
return 0; return 0;
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
int bpf_func_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("classifier")
int entry(struct __sk_buff *skb)
{
/* Multiple locations to make sure we patch
* all of them.
*/
bpf_tail_call(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2);
return 3;
}
char __license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 5);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 1);
return 0;
}
SEC("classifier/1")
int bpf_func_1(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 2);
return 1;
}
SEC("classifier/2")
int bpf_func_2(struct __sk_buff *skb)
{
return 2;
}
SEC("classifier/3")
int bpf_func_3(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 4);
return 3;
}
SEC("classifier/4")
int bpf_func_4(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 3);
return 4;
}
SEC("classifier")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 0);
/* Check multi-prog update. */
bpf_tail_call(skb, &jmp_table, 2);
/* Check tail call limit. */
bpf_tail_call(skb, &jmp_table, 3);
return 3;
}
char __license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int count;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
count++;
bpf_tail_call(skb, &jmp_table, 0);
return 1;
}
SEC("classifier")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, 0);
return 0;
}
char __license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int selector;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
int bpf_func_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("classifier")
int entry(struct __sk_buff *skb)
{
bpf_tail_call(skb, &jmp_table, selector);
return 3;
}
char __license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "bpf_helpers.h"
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 3);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int selector;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \
int bpf_func_##x(struct __sk_buff *skb) \
{ \
return x; \
}
TAIL_FUNC(0)
TAIL_FUNC(1)
TAIL_FUNC(2)
SEC("classifier")
int entry(struct __sk_buff *skb)
{
int idx = 0;
if (selector == 1234)
idx = 1;
else if (selector == 5678)
idx = 2;
bpf_tail_call(skb, &jmp_table, idx);
return 3;
}
char __license[] SEC("license") = "GPL";
int _version SEC("version") = 1;
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_arrays_output { struct core_reloc_arrays_output {
int a2; int a2;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_bitfields { struct core_reloc_bitfields {
/* unsigned bitfields */ /* unsigned bitfields */
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_bitfields { struct core_reloc_bitfields {
/* unsigned bitfields */ /* unsigned bitfields */
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_existence_output { struct core_reloc_existence_output {
int a_exists; int a_exists;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_flavors { struct core_reloc_flavors {
int a; int a;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_ints { struct core_reloc_ints {
uint8_t u8_field; uint8_t u8_field;
......
...@@ -8,10 +8,11 @@ ...@@ -8,10 +8,11 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; uint64_t my_pid_tgid;
} data = {};
struct core_reloc_kernel_output { struct core_reloc_kernel_output {
int valid[10]; int valid[10];
...@@ -38,6 +39,9 @@ int test_core_kernel(void *ctx) ...@@ -38,6 +39,9 @@ int test_core_kernel(void *ctx)
uint32_t real_tgid = (uint32_t)pid_tgid; uint32_t real_tgid = (uint32_t)pid_tgid;
int pid, tgid; int pid, tgid;
if (data.my_pid_tgid != pid_tgid)
return 0;
if (CORE_READ(&pid, &task->pid) || if (CORE_READ(&pid, &task->pid) ||
CORE_READ(&tgid, &task->tgid)) CORE_READ(&tgid, &task->tgid))
return 1; return 1;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_misc_output { struct core_reloc_misc_output {
int a, b, c; int a, b, c;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_mods_output { struct core_reloc_mods_output {
int a, b, c, d, e, f, g, h; int a, b, c, d, e, f, g, h;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_nesting_substruct { struct core_reloc_nesting_substruct {
int a; int a;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
enum core_reloc_primitives_enum { enum core_reloc_primitives_enum {
A = 0, A = 0,
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_ptr_as_arr { struct core_reloc_ptr_as_arr {
int a; int a;
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
static volatile struct data { struct {
char in[256]; char in[256];
char out[256]; char out[256];
} data; } data = {};
struct core_reloc_size_output { struct core_reloc_size_output {
int int_sz; int int_sz;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_tracing.h"
#include "bpf_trace_helpers.h"
SEC("kprobe/__set_task_comm")
int prog1(struct pt_regs *ctx)
{
return 0;
}
SEC("kretprobe/__set_task_comm")
int prog2(struct pt_regs *ctx)
{
return 0;
}
SEC("raw_tp/task_rename")
int prog3(struct bpf_raw_tracepoint_args *ctx)
{
return 0;
}
struct task_struct;
BPF_TRACE_3("fentry/__set_task_comm", prog4,
struct task_struct *, tsk, const char *, buf, __u8, exec)
{
return 0;
}
BPF_TRACE_3("fexit/__set_task_comm", prog5,
struct task_struct *, tsk, const char *, buf, __u8, exec)
{
return 0;
}
char _license[] SEC("license") = "GPL";
#!/bin/bash #!/bin/bash
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
ERROR=0
TMPDIR=
# If one build fails, continue but return non-0 on exit.
return_value() {
if [ -d "$TMPDIR" ] ; then
rm -rf -- $TMPDIR
fi
exit $ERROR
}
trap return_value EXIT
case $1 in case $1 in
-h|--help) -h|--help)
echo -e "$0 [-j <n>]" echo -e "$0 [-j <n>]"
...@@ -20,7 +8,7 @@ case $1 in ...@@ -20,7 +8,7 @@ case $1 in
echo -e "" echo -e ""
echo -e "\tOptions:" echo -e "\tOptions:"
echo -e "\t\t-j <n>:\tPass -j flag to 'make'." echo -e "\t\t-j <n>:\tPass -j flag to 'make'."
exit exit 0
;; ;;
esac esac
...@@ -32,6 +20,22 @@ SCRIPT_REL_PATH=$(realpath --relative-to=$PWD $0) ...@@ -32,6 +20,22 @@ SCRIPT_REL_PATH=$(realpath --relative-to=$PWD $0)
SCRIPT_REL_DIR=$(dirname $SCRIPT_REL_PATH) SCRIPT_REL_DIR=$(dirname $SCRIPT_REL_PATH)
KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../) KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../)
cd $KDIR_ROOT_DIR cd $KDIR_ROOT_DIR
if [ ! -e tools/bpf/bpftool/Makefile ]; then
echo -e "skip: bpftool files not found!\n"
exit 0
fi
ERROR=0
TMPDIR=
# If one build fails, continue but return non-0 on exit.
return_value() {
if [ -d "$TMPDIR" ] ; then
rm -rf -- $TMPDIR
fi
exit $ERROR
}
trap return_value EXIT
check() { check() {
local dir=$(realpath $1) local dir=$(realpath $1)
......
...@@ -45,7 +45,7 @@ static void dump_test_log(const struct prog_test_def *test, bool failed) ...@@ -45,7 +45,7 @@ static void dump_test_log(const struct prog_test_def *test, bool failed)
fflush(stdout); /* exports env.log_buf & env.log_cnt */ fflush(stdout); /* exports env.log_buf & env.log_cnt */
if (env.verbose || test->force_log || failed) { if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
if (env.log_cnt) { if (env.log_cnt) {
env.log_buf[env.log_cnt] = '\0'; env.log_buf[env.log_cnt] = '\0';
fprintf(env.stdout, "%s", env.log_buf); fprintf(env.stdout, "%s", env.log_buf);
...@@ -346,14 +346,14 @@ static const struct argp_option opts[] = { ...@@ -346,14 +346,14 @@ static const struct argp_option opts[] = {
{ "verifier-stats", ARG_VERIFIER_STATS, NULL, 0, { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
"Output verifier statistics", }, "Output verifier statistics", },
{ "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
"Verbose output (use -vv for extra verbose output)" }, "Verbose output (use -vv or -vvv for progressively verbose output)" },
{}, {},
}; };
static int libbpf_print_fn(enum libbpf_print_level level, static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args) const char *format, va_list args)
{ {
if (!env.very_verbose && level == LIBBPF_DEBUG) if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0; return 0;
vprintf(format, args); vprintf(format, args);
return 0; return 0;
...@@ -419,6 +419,8 @@ int parse_num_list(const char *s, struct test_selector *sel) ...@@ -419,6 +419,8 @@ int parse_num_list(const char *s, struct test_selector *sel)
return 0; return 0;
} }
extern int extra_prog_load_log_flags;
static error_t parse_arg(int key, char *arg, struct argp_state *state) static error_t parse_arg(int key, char *arg, struct argp_state *state)
{ {
struct test_env *env = state->input; struct test_env *env = state->input;
...@@ -460,9 +462,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) ...@@ -460,9 +462,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
env->verifier_stats = true; env->verifier_stats = true;
break; break;
case ARG_VERBOSE: case ARG_VERBOSE:
env->verbosity = VERBOSE_NORMAL;
if (arg) { if (arg) {
if (strcmp(arg, "v") == 0) { if (strcmp(arg, "v") == 0) {
env->very_verbose = true; env->verbosity = VERBOSE_VERY;
extra_prog_load_log_flags = 1;
} else if (strcmp(arg, "vv") == 0) {
env->verbosity = VERBOSE_SUPER;
extra_prog_load_log_flags = 2;
} else { } else {
fprintf(stderr, fprintf(stderr,
"Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n", "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
...@@ -470,7 +477,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) ...@@ -470,7 +477,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return -EINVAL; return -EINVAL;
} }
} }
env->verbose = true;
break; break;
case ARGP_KEY_ARG: case ARGP_KEY_ARG:
argp_usage(state); argp_usage(state);
...@@ -489,7 +495,7 @@ static void stdio_hijack(void) ...@@ -489,7 +495,7 @@ static void stdio_hijack(void)
env.stdout = stdout; env.stdout = stdout;
env.stderr = stderr; env.stderr = stderr;
if (env.verbose) { if (env.verbosity > VERBOSE_NONE) {
/* nothing to do, output to stdout by default */ /* nothing to do, output to stdout by default */
return; return;
} }
......
...@@ -39,6 +39,13 @@ typedef __u16 __sum16; ...@@ -39,6 +39,13 @@ typedef __u16 __sum16;
#include "trace_helpers.h" #include "trace_helpers.h"
#include "flow_dissector_load.h" #include "flow_dissector_load.h"
enum verbosity {
VERBOSE_NONE,
VERBOSE_NORMAL,
VERBOSE_VERY,
VERBOSE_SUPER,
};
struct test_selector { struct test_selector {
const char *name; const char *name;
bool *num_set; bool *num_set;
...@@ -49,8 +56,7 @@ struct test_env { ...@@ -49,8 +56,7 @@ struct test_env {
struct test_selector test_selector; struct test_selector test_selector;
struct test_selector subtest_selector; struct test_selector subtest_selector;
bool verifier_stats; bool verifier_stats;
bool verbose; enum verbosity verbosity;
bool very_verbose;
bool jit_enabled; bool jit_enabled;
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <bpf/libbpf.h> #include <bpf/libbpf.h>
#include <string.h> #include <string.h>
int extra_prog_load_log_flags = 0;
int bpf_prog_test_load(const char *file, enum bpf_prog_type type, int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd) struct bpf_object **pobj, int *prog_fd)
{ {
...@@ -15,6 +17,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, ...@@ -15,6 +17,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
attr.prog_type = type; attr.prog_type = type;
attr.expected_attach_type = 0; attr.expected_attach_type = 0;
attr.prog_flags = BPF_F_TEST_RND_HI32; attr.prog_flags = BPF_F_TEST_RND_HI32;
attr.log_level = extra_prog_load_log_flags;
return bpf_prog_load_xattr(&attr, pobj, prog_fd); return bpf_prog_load_xattr(&attr, pobj, prog_fd);
} }
...@@ -35,6 +38,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, ...@@ -35,6 +38,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
load_attr.license = license; load_attr.license = license;
load_attr.kern_version = kern_version; load_attr.kern_version = kern_version;
load_attr.prog_flags = BPF_F_TEST_RND_HI32; load_attr.prog_flags = BPF_F_TEST_RND_HI32;
load_attr.log_level = extra_prog_load_log_flags;
return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz); return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
} }
...@@ -744,3 +744,86 @@ ...@@ -744,3 +744,86 @@
.result = ACCEPT, .result = ACCEPT,
.retval = 2, .retval = 2,
}, },
{
"jgt32: range bound deduction, reg op imm",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: range bound deduction, reg1 op reg2, reg2 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment