Commit 2977282b authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-libbpf-old-kernel'

Andrii Nakryiko says:

====================
This patch set improves libbpf's support of old kernels, missing features like
BTF support, global variables support, etc.

Most critical one is a silent drop of CO-RE relocations if libbpf fails to
load BTF (despite sanitization efforts). This is frequently the case for
kernels that have no BTF support whatsoever. There are still useful BPF
applications that could work on such kernels and do rely on CO-RE. To that
end, this series revamps the way BTF is handled in libbpf. Failure to load BTF
into kernel doesn't prevent libbpf from using BTF in its full capability
(e.g., for CO-RE relocations) internally.

Another issue that was identified was reliance of perf_buffer__new() on
BPF_OBJ_GET_INFO_BY_FD command, which is more recent that perf_buffer support
itself. Furthermore, BPF_OBJ_GET_INFO_BY_FD is needed just for some sanity
checks to provide better user errors, so could be safely omitted if kernel
doesn't provide it.

Perf_buffer selftest was adjusted to use skeleton, instead of bpf_prog_load().
The latter uses BPF_F_TEST_RND_HI32 flag, which is a relatively recent
addition and unnecessary fails selftest in libbpf's Travis CI tests. By using
skeleton we both get a shorter selftest and it work on pretty ancient kernels,
giving better libbpf test coverage.

One new selftest was added that relies on basic CO-RE features, but otherwise
doesn't expect any recent features (like global variables) from kernel. Again,
it's good to have better coverage of old kernels in libbpf testing.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents b8c50df0 6984cbc6
......@@ -389,7 +389,7 @@ void btf__free(struct btf *btf)
if (!btf)
return;
if (btf->fd != -1)
if (btf->fd >= 0)
close(btf->fd);
free(btf->data);
......@@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf)
return btf->fd;
}
void btf__set_fd(struct btf *btf, int fd)
{
btf->fd = fd;
}
const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
{
*size = btf->data_size;
......
......@@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
......
......@@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return false;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj)
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = obj->caps.btf_func_global;
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
return !has_func || !has_datasec || !has_func_global;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
bool has_func_global = obj->caps.btf_func_global;
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
struct btf *btf = obj->btf;
struct btf_type *t;
int i, j, vlen;
if (!obj->btf || (has_func && has_datasec && has_func_global))
return;
for (i = 1; i <= btf__get_nr_types(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
......@@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
}
}
static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
{
if (!obj->btf_ext)
return;
if (!obj->caps.btf_func) {
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
}
}
static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
......@@ -2473,19 +2467,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
return 0;
err = btf__finalize_data(obj, obj->btf);
if (!err)
return 0;
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
if (libbpf_needs_btf(obj)) {
pr_warn("BTF is required, but is missing or corrupted.\n");
return -ENOENT;
if (err) {
pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
return err;
}
return 0;
}
......@@ -2538,30 +2524,50 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
struct btf *kern_btf = obj->btf;
bool btf_mandatory, sanitize;
int err = 0;
if (!obj->btf)
return 0;
bpf_object__sanitize_btf(obj);
bpf_object__sanitize_btf_ext(obj);
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *orig_data;
void *san_data;
__u32 sz;
err = btf__load(obj->btf);
if (err) {
pr_warn("Error loading %s into kernel: %d.\n",
BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
/* btf_ext can't exist without btf, so free it as well */
if (obj->btf_ext) {
btf_ext__free(obj->btf_ext);
obj->btf_ext = NULL;
}
/* clone BTF to sanitize a copy and leave the original intact */
orig_data = btf__get_raw_data(obj->btf, &sz);
san_data = malloc(sz);
if (!san_data)
return -ENOMEM;
memcpy(san_data, orig_data, sz);
kern_btf = btf__new(san_data, sz);
if (IS_ERR(kern_btf))
return PTR_ERR(kern_btf);
if (kernel_needs_btf(obj))
return err;
bpf_object__sanitize_btf(obj, kern_btf);
}
return 0;
err = btf__load(kern_btf);
if (sanitize) {
if (!err) {
/* move fd to libbpf's BTF */
btf__set_fd(obj->btf, btf__fd(kern_btf));
btf__set_fd(kern_btf, -1);
}
btf__free(kern_btf);
}
if (err) {
btf_mandatory = kernel_needs_btf(obj);
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
btf_mandatory ? "BTF is mandatory, can't proceed."
: "BTF is optional, ignoring.");
if (!btf_mandatory)
err = 0;
}
return err;
}
static int bpf_object__elf_collect(struct bpf_object *obj)
......@@ -3785,7 +3791,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id;
......@@ -5369,18 +5375,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog->prog_ifindex;
}
/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
if (prog->obj->btf_ext)
btf_fd = bpf_object__btf_fd(prog->obj);
else
btf_fd = -1;
load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
load_attr.line_info = prog->line_info;
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
if (btf_fd >= 0 && prog->obj->caps.btf_func) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
load_attr.line_info = prog->line_info;
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
}
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
......@@ -8586,7 +8591,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p)
{
const char *online_cpus_file = "/sys/devices/system/cpu/online";
struct bpf_map_info map = {};
struct bpf_map_info map;
char msg[STRERR_BUFSIZE];
struct perf_buffer *pb;
bool *online = NULL;
......@@ -8599,19 +8604,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
return ERR_PTR(-EINVAL);
}
/* best-effort sanity checks */
memset(&map, 0, sizeof(map));
map_info_len = sizeof(map);
err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
if (err) {
err = -errno;
pr_warn("failed to get map info for map FD %d: %s\n",
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
return ERR_PTR(err);
}
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
map.name);
return ERR_PTR(-EINVAL);
/* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
* -EBADFD, -EFAULT, or -E2BIG on real error
*/
if (err != -EINVAL) {
pr_warn("failed to get map info for map FD %d: %s\n",
map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
return ERR_PTR(err);
}
pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
map_fd);
} else {
if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
map.name);
return ERR_PTR(-EINVAL);
}
}
pb = calloc(1, sizeof(*pb));
......@@ -8643,7 +8657,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = pb->cpu_cnt;
goto error;
}
if (map.max_entries < pb->cpu_cnt)
if (map.max_entries && map.max_entries < pb->cpu_cnt)
pb->cpu_cnt = map.max_entries;
}
......
......@@ -288,4 +288,5 @@ LIBBPF_0.1.0 {
bpf_map__value_size;
bpf_program__autoload;
bpf_program__set_autoload;
btf__set_fd;
} LIBBPF_0.0.9;
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#define _GNU_SOURCE
#include <test_progs.h>
#include "test_core_retro.skel.h"
void test_core_retro(void)
{
int err, zero = 0, res, duration = 0;
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* trigger */
usleep(1);
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
goto out_close;
CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid());
out_close:
test_core_retro__destroy(skel);
}
......@@ -4,6 +4,7 @@
#include <sched.h>
#include <sys/socket.h>
#include <test_progs.h>
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
/* AddressSanitizer sometimes crashes due to data dereference below, due to
......@@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
void test_perf_buffer(void)
{
int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
const char *prog_name = "kprobe/sys_nanosleep";
const char *file = "./test_perf_buffer.o";
int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
struct perf_buffer_opts pb_opts = {};
struct bpf_map *perf_buf_map;
struct test_perf_buffer *skel;
cpu_set_t cpu_set, cpu_seen;
struct bpf_program *prog;
struct bpf_object *obj;
struct perf_buffer *pb;
struct bpf_link *link;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
......@@ -51,33 +47,21 @@ void test_perf_buffer(void)
nr_on_cpus++;
/* load program */
err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
obj = NULL;
goto out_close;
}
prog = bpf_object__find_program_by_title(obj, prog_name);
if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
skel = test_perf_buffer__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
goto out_close;
/* load map */
perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
goto out_close;
/* attach kprobe */
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
/* attach probe */
err = test_perf_buffer__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
goto out_close;
/* set up perf buffer */
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &cpu_seen;
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto out_detach;
goto out_close;
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
......@@ -94,7 +78,7 @@ void test_perf_buffer(void)
&cpu_set);
if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
i, err))
goto out_detach;
goto out_close;
usleep(1);
}
......@@ -110,9 +94,7 @@ void test_perf_buffer(void)
out_free_pb:
perf_buffer__free(pb);
out_detach:
bpf_link__destroy(link);
out_close:
bpf_object__close(obj);
test_perf_buffer__destroy(skel);
free(online);
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
struct task_struct {
int tgid;
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} results SEC(".maps");
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
struct task_struct *task = (void *)bpf_get_current_task();
int tgid = BPF_CORE_READ(task, tgid);
int zero = 0;
bpf_map_update_elem(&results, &zero, &tgid, 0);
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -12,8 +12,8 @@ struct {
__uint(value_size, sizeof(int));
} perf_buf_map SEC(".maps");
SEC("kprobe/sys_nanosleep")
int BPF_KPROBE(handle_sys_nanosleep_entry)
SEC("tp/raw_syscalls/sys_enter")
int handle_sys_enter(void *ctx)
{
int cpu = bpf_get_smp_processor_id();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment