Commit 74661776 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpftool-improvements-kallsymfix'

Daniel Borkmann says:

====================
This work adds correlation of maps and calls into the bpftool
xlated dump in order to help debugging and introspection of
loaded BPF progs. First patch makes kallsyms work on subprogs
with bpf calls, and second implements the actual correlation.
Details and example output can be found in the 2nd patch.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 7d9890ef 7105e828
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
...@@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); ...@@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_helper_changes_pkt_data(void *func); bool bpf_helper_changes_pkt_data(void *func);
static inline bool bpf_dump_raw_ok(void)
{
/* Reconstruction of call-sites is dependent on kallsyms,
* thus make dump the same restriction.
*/
return kallsyms_show_value() == 1;
}
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len); const struct bpf_insn *patch, u32 len);
......
...@@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) ...@@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
/* Base function for offset calculation. Needs to go into .text section, /* Base function for offset calculation. Needs to go into .text section,
* therefore keeping it non-static as well; will also be used by JITs * therefore keeping it non-static as well; will also be used by JITs
* anyway later on, so do not let the compiler omit it. * anyway later on, so do not let the compiler omit it. This also needs
* to go into kallsyms for correlation from e.g. bpftool, so naming
* must not change.
*/ */
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{ {
......
...@@ -21,10 +21,39 @@ static const char * const func_id_str[] = { ...@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
}; };
#undef __BPF_FUNC_STR_FN #undef __BPF_FUNC_STR_FN
const char *func_id_name(int id) static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
const struct bpf_insn *insn,
char *buff, size_t len)
{ {
BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
if (insn->src_reg != BPF_PSEUDO_CALL &&
insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
func_id_str[insn->imm])
return func_id_str[insn->imm];
if (cbs && cbs->cb_call)
return cbs->cb_call(cbs->private_data, insn);
if (insn->src_reg == BPF_PSEUDO_CALL)
snprintf(buff, len, "%+d", insn->imm);
return buff;
}
static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
const struct bpf_insn *insn,
u64 full_imm, char *buff, size_t len)
{
if (cbs && cbs->cb_imm)
return cbs->cb_imm(cbs->private_data, insn, full_imm);
snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
return buff;
}
const char *func_id_name(int id)
{
if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
return func_id_str[id]; return func_id_str[id];
else else
...@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = { ...@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
[BPF_EXIT >> 4] = "exit", [BPF_EXIT >> 4] = "exit",
}; };
static void print_bpf_end_insn(bpf_insn_print_cb verbose, static void print_bpf_end_insn(bpf_insn_print_t verbose,
struct bpf_verifier_env *env, struct bpf_verifier_env *env,
const struct bpf_insn *insn) const struct bpf_insn *insn)
{ {
...@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose, ...@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
insn->imm, insn->dst_reg); insn->imm, insn->dst_reg);
} }
void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, void print_bpf_insn(const struct bpf_insn_cbs *cbs,
const struct bpf_insn *insn, bool allow_ptr_leaks) struct bpf_verifier_env *env,
const struct bpf_insn *insn,
bool allow_ptr_leaks)
{ {
const bpf_insn_print_t verbose = cbs->cb_print;
u8 class = BPF_CLASS(insn->code); u8 class = BPF_CLASS(insn->code);
if (class == BPF_ALU || class == BPF_ALU64) { if (class == BPF_ALU || class == BPF_ALU64) {
...@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, ...@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
*/ */
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
char tmp[64];
if (map_ptr && !allow_ptr_leaks) if (map_ptr && !allow_ptr_leaks)
imm = 0; imm = 0;
verbose(env, "(%02x) r%d = 0x%llx\n", insn->code, verbose(env, "(%02x) r%d = %s\n",
insn->dst_reg, (unsigned long long)imm); insn->code, insn->dst_reg,
__func_imm_name(cbs, insn, imm,
tmp, sizeof(tmp)));
} else { } else {
verbose(env, "BUG_ld_%02x\n", insn->code); verbose(env, "BUG_ld_%02x\n", insn->code);
return; return;
...@@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, ...@@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
if (opcode == BPF_CALL) { if (opcode == BPF_CALL) {
if (insn->src_reg == BPF_PSEUDO_CALL) char tmp[64];
verbose(env, "(%02x) call pc%+d\n", insn->code,
insn->imm); if (insn->src_reg == BPF_PSEUDO_CALL) {
else verbose(env, "(%02x) call pc%s\n",
insn->code,
__func_get_name(cbs, insn,
tmp, sizeof(tmp)));
} else {
strcpy(tmp, "unknown");
verbose(env, "(%02x) call %s#%d\n", insn->code, verbose(env, "(%02x) call %s#%d\n", insn->code,
func_id_name(insn->imm), insn->imm); __func_get_name(cbs, insn,
tmp, sizeof(tmp)),
insn->imm);
}
} else if (insn->code == (BPF_JMP | BPF_JA)) { } else if (insn->code == (BPF_JMP | BPF_JA)) {
verbose(env, "(%02x) goto pc%+d\n", verbose(env, "(%02x) goto pc%+d\n",
insn->code, insn->off); insn->code, insn->off);
......
...@@ -17,16 +17,35 @@ ...@@ -17,16 +17,35 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#ifndef __KERNEL__
#include <stdio.h>
#include <string.h>
#endif
struct bpf_verifier_env;
extern const char *const bpf_alu_string[16]; extern const char *const bpf_alu_string[16];
extern const char *const bpf_class_string[8]; extern const char *const bpf_class_string[8];
const char *func_id_name(int id); const char *func_id_name(int id);
struct bpf_verifier_env; typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
const char *, ...); const char *, ...);
void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
const struct bpf_insn *insn, bool allow_ptr_leaks); const struct bpf_insn *insn);
typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
const struct bpf_insn *insn,
__u64 full_imm);
struct bpf_insn_cbs {
bpf_insn_print_t cb_print;
bpf_insn_revmap_call_t cb_call;
bpf_insn_print_imm_t cb_imm;
void *private_data;
};
void print_bpf_insn(const struct bpf_insn_cbs *cbs,
struct bpf_verifier_env *env,
const struct bpf_insn *insn,
bool allow_ptr_leaks);
#endif #endif
...@@ -937,10 +937,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) ...@@ -937,10 +937,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{ {
if (atomic_dec_and_test(&prog->aux->refcnt)) { if (atomic_dec_and_test(&prog->aux->refcnt)) {
int i;
trace_bpf_prog_put_rcu(prog); trace_bpf_prog_put_rcu(prog);
/* bpf_prog_free_id() must be called first */ /* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock); bpf_prog_free_id(prog, do_idr_lock);
for (i = 0; i < prog->aux->func_cnt; i++)
bpf_prog_kallsyms_del(prog->aux->func[i]);
bpf_prog_kallsyms_del(prog); bpf_prog_kallsyms_del(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
} }
} }
...@@ -1552,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) ...@@ -1552,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
return fd; return fd;
} }
static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
unsigned long addr)
{
int i;
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (prog->aux->used_maps[i] == (void *)addr)
return prog->aux->used_maps[i];
return NULL;
}
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
{
const struct bpf_map *map;
struct bpf_insn *insns;
u64 imm;
int i;
insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
GFP_USER);
if (!insns)
return insns;
for (i = 0; i < prog->len; i++) {
if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
insns[i].code = BPF_JMP | BPF_CALL;
insns[i].imm = BPF_FUNC_tail_call;
/* fall-through */
}
if (insns[i].code == (BPF_JMP | BPF_CALL) ||
insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
insns[i].code = BPF_JMP | BPF_CALL;
if (!bpf_dump_raw_ok())
insns[i].imm = 0;
continue;
}
if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
continue;
imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
map = bpf_map_from_imm(prog, imm);
if (map) {
insns[i].src_reg = BPF_PSEUDO_MAP_FD;
insns[i].imm = map->id;
insns[i + 1].imm = 0;
continue;
}
if (!bpf_dump_raw_ok() &&
imm == (unsigned long)prog->aux) {
insns[i].imm = 0;
insns[i + 1].imm = 0;
continue;
}
}
return insns;
}
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
const union bpf_attr *attr, const union bpf_attr *attr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
...@@ -1602,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ...@@ -1602,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.jited_prog_len; ulen = info.jited_prog_len;
info.jited_prog_len = prog->jited_len; info.jited_prog_len = prog->jited_len;
if (info.jited_prog_len && ulen) { if (info.jited_prog_len && ulen) {
if (bpf_dump_raw_ok()) {
uinsns = u64_to_user_ptr(info.jited_prog_insns); uinsns = u64_to_user_ptr(info.jited_prog_insns);
ulen = min_t(u32, info.jited_prog_len, ulen); ulen = min_t(u32, info.jited_prog_len, ulen);
if (copy_to_user(uinsns, prog->bpf_func, ulen)) if (copy_to_user(uinsns, prog->bpf_func, ulen))
return -EFAULT; return -EFAULT;
} else {
info.jited_prog_insns = 0;
}
} }
ulen = info.xlated_prog_len; ulen = info.xlated_prog_len;
info.xlated_prog_len = bpf_prog_insn_size(prog); info.xlated_prog_len = bpf_prog_insn_size(prog);
if (info.xlated_prog_len && ulen) { if (info.xlated_prog_len && ulen) {
struct bpf_insn *insns_sanitized;
bool fault;
if (prog->blinded && !bpf_dump_raw_ok()) {
info.xlated_prog_insns = 0;
goto done;
}
insns_sanitized = bpf_insn_prepare_dump(prog);
if (!insns_sanitized)
return -ENOMEM;
uinsns = u64_to_user_ptr(info.xlated_prog_insns); uinsns = u64_to_user_ptr(info.xlated_prog_insns);
ulen = min_t(u32, info.xlated_prog_len, ulen); ulen = min_t(u32, info.xlated_prog_len, ulen);
if (copy_to_user(uinsns, prog->insnsi, ulen)) fault = copy_to_user(uinsns, insns_sanitized, ulen);
kfree(insns_sanitized);
if (fault)
return -EFAULT; return -EFAULT;
} }
......
...@@ -4427,9 +4427,12 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -4427,9 +4427,12 @@ static int do_check(struct bpf_verifier_env *env)
} }
if (env->log.level) { if (env->log.level) {
const struct bpf_insn_cbs cbs = {
.cb_print = verbose,
};
verbose(env, "%d: ", insn_idx); verbose(env, "%d: ", insn_idx);
print_bpf_insn(verbose, env, insn, print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
env->allow_ptr_leaks);
} }
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
...@@ -5017,14 +5020,14 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -5017,14 +5020,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
{ {
struct bpf_prog *prog = env->prog, **func, *tmp; struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog; int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn = prog->insnsi; struct bpf_insn *insn;
void *old_bpf_func; void *old_bpf_func;
int err = -ENOMEM; int err = -ENOMEM;
if (env->subprog_cnt == 0) if (env->subprog_cnt == 0)
return 0; return 0;
for (i = 0; i < prog->len; i++, insn++) { for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) || if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL) insn->src_reg != BPF_PSEUDO_CALL)
continue; continue;
...@@ -5063,7 +5066,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -5063,7 +5066,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
goto out_free; goto out_free;
memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
len * sizeof(struct bpf_insn)); len * sizeof(struct bpf_insn));
func[i]->type = prog->type;
func[i]->len = len; func[i]->len = len;
if (bpf_prog_calc_tag(func[i]))
goto out_free;
func[i]->is_func = 1; func[i]->is_func = 1;
/* Use bpf_prog_F_tag to indicate functions in stack traces. /* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names * Long term would need debug info to populate names
...@@ -5113,6 +5119,25 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -5113,6 +5119,25 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_lock_ro(func[i]); bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]); bpf_prog_kallsyms_add(func[i]);
} }
/* Last step: make now unused interpreter insns from main
* prog consistent for later dump requests, so they can
* later look the same as if they were interpreted only.
*/
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
unsigned long addr;
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
insn->off = env->insn_aux_data[i].call_imm;
subprog = find_subprog(env, i + insn->off + 1);
addr = (unsigned long)func[subprog + 1]->bpf_func;
addr &= PAGE_MASK;
insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
addr - __bpf_call_base;
}
prog->jited = 1; prog->jited = 1;
prog->bpf_func = func[0]->bpf_func; prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func; prog->aux->func = func;
......
...@@ -401,6 +401,88 @@ static int do_show(int argc, char **argv) ...@@ -401,6 +401,88 @@ static int do_show(int argc, char **argv)
return err; return err;
} }
#define SYM_MAX_NAME 256
struct kernel_sym {
unsigned long address;
char name[SYM_MAX_NAME];
};
struct dump_data {
unsigned long address_call_base;
struct kernel_sym *sym_mapping;
__u32 sym_count;
char scratch_buff[SYM_MAX_NAME];
};
static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
{
return ((struct kernel_sym *)sym_a)->address -
((struct kernel_sym *)sym_b)->address;
}
static void kernel_syms_load(struct dump_data *dd)
{
struct kernel_sym *sym;
char buff[256];
void *tmp, *address;
FILE *fp;
fp = fopen("/proc/kallsyms", "r");
if (!fp)
return;
while (!feof(fp)) {
if (!fgets(buff, sizeof(buff), fp))
break;
tmp = realloc(dd->sym_mapping,
(dd->sym_count + 1) *
sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
dd->sym_mapping = NULL;
fclose(fp);
return;
}
dd->sym_mapping = tmp;
sym = &dd->sym_mapping[dd->sym_count];
if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
continue;
sym->address = (unsigned long)address;
if (!strcmp(sym->name, "__bpf_call_base")) {
dd->address_call_base = sym->address;
/* sysctl kernel.kptr_restrict was set */
if (!sym->address)
goto out;
}
if (sym->address)
dd->sym_count++;
}
fclose(fp);
qsort(dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp);
}
static void kernel_syms_destroy(struct dump_data *dd)
{
free(dd->sym_mapping);
}
static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
unsigned long key)
{
struct kernel_sym sym = {
.address = key,
};
return dd->sym_mapping ?
bsearch(&sym, dd->sym_mapping, dd->sym_count,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
{ {
va_list args; va_list args;
...@@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) ...@@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args); va_end(args);
} }
static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) static const char *print_call_pcrel(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address,
const struct bpf_insn *insn)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#%s", insn->off, sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%+d#0x%lx", insn->off, address);
return dd->scratch_buff;
}
static const char *print_call_helper(struct dump_data *dd,
struct kernel_sym *sym,
unsigned long address)
{
if (sym)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"%s", sym->name);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%lx", address);
return dd->scratch_buff;
}
static const char *print_call(void *private_data,
const struct bpf_insn *insn)
{ {
struct dump_data *dd = private_data;
unsigned long address = dd->address_call_base + insn->imm;
struct kernel_sym *sym;
sym = kernel_syms_search(dd, address);
if (insn->src_reg == BPF_PSEUDO_CALL)
return print_call_pcrel(dd, sym, address, insn);
else
return print_call_helper(dd, sym, address);
}
static const char *print_imm(void *private_data,
const struct bpf_insn *insn,
__u64 full_imm)
{
struct dump_data *dd = private_data;
if (insn->src_reg == BPF_PSEUDO_MAP_FD)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
return dd->scratch_buff;
}
static void dump_xlated_plain(struct dump_data *dd, void *buf,
unsigned int len, bool opcodes)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf; struct bpf_insn *insn = buf;
bool double_insn = false; bool double_insn = false;
unsigned int i; unsigned int i;
...@@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) ...@@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i); printf("% 4d: ", i);
print_bpf_insn(print_insn, NULL, insn + i, true); print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) { if (opcodes) {
printf(" "); printf(" ");
...@@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...) ...@@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
va_end(args); va_end(args);
} }
static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) static void dump_xlated_json(struct dump_data *dd, void *buf,
unsigned int len, bool opcodes)
{ {
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_insn *insn = buf; struct bpf_insn *insn = buf;
bool double_insn = false; bool double_insn = false;
unsigned int i; unsigned int i;
...@@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) ...@@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
jsonw_start_object(json_wtr); jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "disasm"); jsonw_name(json_wtr, "disasm");
print_bpf_insn(print_insn_json, NULL, insn + i, true); print_bpf_insn(&cbs, NULL, insn + i, true);
if (opcodes) { if (opcodes) {
jsonw_name(json_wtr, "opcodes"); jsonw_name(json_wtr, "opcodes");
...@@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) ...@@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
static int do_dump(int argc, char **argv) static int do_dump(int argc, char **argv)
{ {
struct bpf_prog_info info = {}; struct bpf_prog_info info = {};
struct dump_data dd = {};
__u32 len = sizeof(info); __u32 len = sizeof(info);
unsigned int buf_size; unsigned int buf_size;
char *filepath = NULL; char *filepath = NULL;
...@@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv) ...@@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv)
goto err_free; goto err_free;
} }
if ((member_len == &info.jited_prog_len &&
info.jited_prog_insns == 0) ||
(member_len == &info.xlated_prog_len &&
info.xlated_prog_insns == 0)) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
goto err_free;
}
if (filepath) { if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) { if (fd < 0) {
...@@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv) ...@@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv)
goto err_free; goto err_free;
} }
} else { } else {
if (member_len == &info.jited_prog_len) if (member_len == &info.jited_prog_len) {
disasm_print_insn(buf, *member_len, opcodes); disasm_print_insn(buf, *member_len, opcodes);
else } else {
kernel_syms_load(&dd);
if (json_output) if (json_output)
dump_xlated_json(buf, *member_len, opcodes); dump_xlated_json(&dd, buf, *member_len, opcodes);
else else
dump_xlated_plain(buf, *member_len, opcodes); dump_xlated_plain(&dd, buf, *member_len, opcodes);
kernel_syms_destroy(&dd);
}
} }
free(buf); free(buf);
return 0; return 0;
err_free: err_free:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment