Commit 92d3bff2 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'bpf/selftests: page size fixes'

Yauheni Kaliuta says:

====================

A set of fixes for selftests to make them working on systems with PAGE_SIZE > 4K
+ cleanup (version) and ringbuf_multi extention.
---
v3->v4:
- zero initialize BPF programs' static variables;
- add bpf_map__inner_map to libbpf.map in alphabetical order;
- add bpf_map__set_inner_map_fd test to ringbuf_multi;

v2->v3:
 - reorder: move version removing patch first to keep main patches in
   one group;
 - rename "selftests/bpf: pass page size from userspace in sockopt_sk"
   as suggested;
 - convert sockopt_sk test to use ASSERT macros;
 - set page size from userspace
 - split patches to pairs userspace/bpf. It's easier to check that
   every conversion works as expected;

v1->v2:

- add missed 'selftests/bpf: test_progs/sockopt_sk: Convert to use BPF skeleton'
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 957dca3d cfc0889c
......@@ -2194,6 +2194,7 @@ static int parse_btf_map_def(struct bpf_object *obj,
map->inner_map = calloc(1, sizeof(*map->inner_map));
if (!map->inner_map)
return -ENOMEM;
map->inner_map->fd = -1;
map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
map->inner_map->name = malloc(strlen(map->name) +
sizeof(".inner") + 1);
......@@ -3845,6 +3846,14 @@ __u32 bpf_map__max_entries(const struct bpf_map *map)
return map->def.max_entries;
}
struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
{
if (!bpf_map_type__is_map_in_map(map->def.type))
return NULL;
return map->inner_map;
}
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
if (map->fd >= 0)
......@@ -9476,6 +9485,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
pr_warn("error: inner_map_fd already specified\n");
return -EINVAL;
}
zfree(&map->inner_map);
map->inner_map_fd = fd;
return 0;
}
......
......@@ -480,6 +480,7 @@ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
LIBBPF_API long libbpf_get_error(const void *ptr);
......
......@@ -359,5 +359,6 @@ LIBBPF_0.4.0 {
bpf_linker__finalize;
bpf_linker__free;
bpf_linker__new;
bpf_map__inner_map;
bpf_object__set_kversion;
} LIBBPF_0.3.0;
......@@ -12,11 +12,22 @@ void test_map_ptr(void)
__u32 duration = 0, retval;
char buf[128];
int err;
int page_size = getpagesize();
skel = map_ptr_kern__open_and_load();
if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
skel = map_ptr_kern__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size);
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
goto cleanup;
err = map_ptr_kern__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
skel->bss->page_size = page_size;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
sizeof(pkt_v4), buf, NULL, &retval, NULL);
......
......@@ -29,22 +29,36 @@ void test_mmap(void)
struct test_mmap *skel;
__u64 val = 0;
skel = test_mmap__open_and_load();
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
skel = test_mmap__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
/* at least 4 pages of data */
err = bpf_map__set_max_entries(skel->maps.data_map,
4 * (page_size / sizeof(u64)));
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = test_mmap__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
bss_map = skel->maps.bss;
data_map = skel->maps.data_map;
data_map_fd = bpf_map__fd(data_map);
rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
munmap(tmp1, 4096);
munmap(tmp1, page_size);
goto cleanup;
}
/* now double-check if it's mmap()'able at all */
tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
goto cleanup;
......
......@@ -87,11 +87,20 @@ void test_ringbuf(void)
pthread_t thread;
long bg_ret = -1;
int err, cnt;
int page_size = getpagesize();
skel = test_ringbuf__open_and_load();
if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
skel = test_ringbuf__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = test_ringbuf__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
......@@ -110,9 +119,9 @@ void test_ringbuf(void)
CHECK(skel->bss->avail_data != 3 * rec_sz,
"err_avail_size", "exp %ld, got %ld\n",
3L * rec_sz, skel->bss->avail_data);
CHECK(skel->bss->ring_size != 4096,
CHECK(skel->bss->ring_size != page_size,
"err_ring_size", "exp %ld, got %ld\n",
4096L, skel->bss->ring_size);
(long)page_size, skel->bss->ring_size);
CHECK(skel->bss->cons_pos != 0,
"err_cons_pos", "exp %ld, got %ld\n",
0L, skel->bss->cons_pos);
......
......@@ -41,13 +41,42 @@ static int process_sample(void *ctx, void *data, size_t len)
void test_ringbuf_multi(void)
{
struct test_ringbuf_multi *skel;
struct ring_buffer *ringbuf;
struct ring_buffer *ringbuf = NULL;
int err;
int page_size = getpagesize();
int proto_fd = -1;
skel = test_ringbuf_multi__open_and_load();
if (CHECK(!skel, "skel_open_load", "skeleton open&load failed\n"))
skel = test_ringbuf_multi__open();
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
proto_fd = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, page_size, 0);
if (CHECK(proto_fd == -1, "bpf_create_map", "bpf_create_map failed\n"))
goto cleanup;
err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
goto cleanup;
err = test_ringbuf_multi__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
goto cleanup;
close(proto_fd);
proto_fd = -1;
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
......@@ -97,6 +126,8 @@ void test_ringbuf_multi(void)
2L, skel->bss->total);
cleanup:
if (proto_fd >= 0)
close(proto_fd);
ring_buffer__free(ringbuf);
test_ringbuf_multi__destroy(skel);
}
......@@ -3,6 +3,7 @@
#include "cgroup_helpers.h"
#include <linux/tcp.h>
#include "sockopt_sk.skel.h"
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
......@@ -191,60 +192,30 @@ static int getsetsockopt(void)
return -1;
}
static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title)
static void run_test(int cgroup_fd)
{
enum bpf_attach_type attach_type;
enum bpf_prog_type prog_type;
struct bpf_program *prog;
int err;
struct sockopt_sk *skel;
err = libbpf_prog_type_by_name(title, &prog_type, &attach_type);
if (err) {
log_err("Failed to deduct types for %s BPF program", title);
return -1;
}
skel = sockopt_sk__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
prog = bpf_object__find_program_by_title(obj, title);
if (!prog) {
log_err("Failed to find %s BPF program", title);
return -1;
}
err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd,
attach_type, 0);
if (err) {
log_err("Failed to attach %s BPF program", title);
return -1;
}
return 0;
}
static void run_test(int cgroup_fd)
{
struct bpf_prog_load_attr attr = {
.file = "./sockopt_sk.o",
};
struct bpf_object *obj;
int ignored;
int err;
err = bpf_prog_load_xattr(&attr, &obj, &ignored);
if (CHECK_FAIL(err))
return;
skel->bss->page_size = getpagesize();
err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt");
if (CHECK_FAIL(err))
goto close_bpf_object;
skel->links._setsockopt =
bpf_program__attach_cgroup(skel->progs._setsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._setsockopt, "setsockopt_link"))
goto cleanup;
err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt");
if (CHECK_FAIL(err))
goto close_bpf_object;
skel->links._getsockopt =
bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd);
if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link"))
goto cleanup;
CHECK_FAIL(getsetsockopt());
ASSERT_OK(getsetsockopt(), "getsetsockopt");
close_bpf_object:
bpf_object__close(obj);
cleanup:
sockopt_sk__destroy(skel);
}
void test_sockopt_sk(void)
......
......@@ -12,6 +12,7 @@ _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
__u32 g_line = 0;
int page_size = 0; /* userspace should set it */
#define VERIFY_TYPE(type, func) ({ \
g_map_type = type; \
......@@ -635,7 +636,6 @@ struct bpf_ringbuf_map {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} m_ringbuf SEC(".maps");
static inline int check_ringbuf(void)
......@@ -643,7 +643,7 @@ static inline int check_ringbuf(void)
struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
return 1;
}
......
......@@ -6,11 +6,8 @@
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
__u32 _version SEC("version") = 1;
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
int page_size = 0; /* userspace should set it */
#ifndef SOL_TCP
#define SOL_TCP IPPROTO_TCP
......@@ -90,7 +87,7 @@ int _getsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
if (optval_end - optval != PAGE_SIZE)
if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */
return 1;
......@@ -161,7 +158,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
/* Original optlen is larger than PAGE_SIZE. */
if (ctx->optlen != PAGE_SIZE * 2)
if (ctx->optlen != page_size * 2)
return 0; /* EPERM, unexpected data size */
if (optval + 1 > optval_end)
......@@ -175,7 +172,7 @@ int _setsockopt(struct bpf_sockopt *ctx)
* program can only see the first PAGE_SIZE
* bytes of data.
*/
if (optval_end - optval != PAGE_SIZE)
if (optval_end - optval != page_size)
return 0; /* EPERM, unexpected data size */
return 1;
......
......@@ -9,7 +9,6 @@ char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4096);
__uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
__type(key, __u32);
__type(value, char);
......@@ -17,7 +16,6 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 512 * 4); /* at least 4 pages of data */
__uint(map_flags, BPF_F_MMAPABLE);
__type(key, __u32);
__type(value, __u64);
......
......@@ -15,7 +15,6 @@ struct sample {
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
/* inputs */
......
......@@ -15,7 +15,6 @@ struct sample {
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
......@@ -31,6 +30,17 @@ struct {
},
};
struct {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 1);
__type(key, int);
__array(values, struct ringbuf_map);
} ringbuf_hash SEC(".maps") = {
.values = {
[0] = &ringbuf1,
},
};
/* inputs */
int pid = 0;
int target_ring = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment