Commit 192b6638 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Daniel Borkmann

libbpf: Prevent loading vmlinux BTF twice

Prevent loading/parsing vmlinux BTF twice in some cases: for CO-RE relocations
and for BTF-aware hooks (tp_btf, fentry/fexit, etc).

Fixes: a6ed02ca ("libbpf: Load btf_vmlinux only once per object.")
Signed-off-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200624043805.1794620-1-andriin@fb.com
parent 135c783f
...@@ -2504,11 +2504,24 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) ...@@ -2504,11 +2504,24 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
{ {
bool need_vmlinux_btf = false;
struct bpf_program *prog; struct bpf_program *prog;
int err; int err;
/* CO-RE relocations need kernel BTF */
if (obj->btf_ext && obj->btf_ext->field_reloc_info.len)
need_vmlinux_btf = true;
bpf_object__for_each_program(prog, obj) { bpf_object__for_each_program(prog, obj) {
if (libbpf_prog_needs_vmlinux_btf(prog)) { if (libbpf_prog_needs_vmlinux_btf(prog)) {
need_vmlinux_btf = true;
break;
}
}
if (!need_vmlinux_btf)
return 0;
obj->btf_vmlinux = libbpf_find_kernel_btf(); obj->btf_vmlinux = libbpf_find_kernel_btf();
if (IS_ERR(obj->btf_vmlinux)) { if (IS_ERR(obj->btf_vmlinux)) {
err = PTR_ERR(obj->btf_vmlinux); err = PTR_ERR(obj->btf_vmlinux);
...@@ -2516,10 +2529,6 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) ...@@ -2516,10 +2529,6 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
obj->btf_vmlinux = NULL; obj->btf_vmlinux = NULL;
return err; return err;
} }
return 0;
}
}
return 0; return 0;
} }
...@@ -4945,8 +4954,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) ...@@ -4945,8 +4954,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
if (targ_btf_path) if (targ_btf_path)
targ_btf = btf__parse_elf(targ_btf_path, NULL); targ_btf = btf__parse_elf(targ_btf_path, NULL);
else else
targ_btf = libbpf_find_kernel_btf(); targ_btf = obj->btf_vmlinux;
if (IS_ERR(targ_btf)) { if (IS_ERR_OR_NULL(targ_btf)) {
pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf)); pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
return PTR_ERR(targ_btf); return PTR_ERR(targ_btf);
} }
...@@ -4987,6 +4996,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) ...@@ -4987,6 +4996,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
} }
out: out:
/* obj->btf_vmlinux is freed at the end of object load phase */
if (targ_btf != obj->btf_vmlinux)
btf__free(targ_btf); btf__free(targ_btf);
if (!IS_ERR_OR_NULL(cand_cache)) { if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) { hashmap__for_each_entry(cand_cache, entry, i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment