Commit d5baf0ca authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

selftests/bpf: Add BPF object loading tests with explicit token passing

Add a few tests that attempt to load BPF object containing privileged
map, program, and the one requiring mandatory BTF uploading into the
kernel (to validate token FD propagation to BPF_BTF_LOAD command).
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-27-andrii@kernel.org
parent 6b434b61
......@@ -14,6 +14,9 @@
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/un.h>
#include "priv_map.skel.h"
#include "priv_prog.skel.h"
#include "dummy_st_ops_success.skel.h"
static inline int sys_mount(const char *dev_name, const char *dir_name,
const char *type, unsigned long flags,
......@@ -666,6 +669,104 @@ static int userns_prog_load(int mnt_fd)
return err;
}
static int userns_obj_priv_map(int mnt_fd)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
char buf[256];
struct priv_map *skel;
int err;
skel = priv_map__open_and_load();
if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
priv_map__destroy(skel);
return -EINVAL;
}
/* use bpf_token_path to provide BPF FS path */
snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
opts.bpf_token_path = buf;
skel = priv_map__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
return -EINVAL;
err = priv_map__load(skel);
priv_map__destroy(skel);
if (!ASSERT_OK(err, "obj_token_path_load"))
return -EINVAL;
return 0;
}
static int userns_obj_priv_prog(int mnt_fd)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
char buf[256];
struct priv_prog *skel;
int err;
skel = priv_prog__open_and_load();
if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) {
priv_prog__destroy(skel);
return -EINVAL;
}
/* use bpf_token_path to provide BPF FS path */
snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
opts.bpf_token_path = buf;
skel = priv_prog__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
return -EINVAL;
err = priv_prog__load(skel);
priv_prog__destroy(skel);
if (!ASSERT_OK(err, "obj_token_path_load"))
return -EINVAL;
return 0;
}
/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command,
* which should cause struct_ops application to fail, as BTF won't be uploaded
* into the kernel, even if STRUCT_OPS programs themselves are allowed
*/
static int validate_struct_ops_load(int mnt_fd, bool expect_success)
{
LIBBPF_OPTS(bpf_object_open_opts, opts);
char buf[256];
struct dummy_st_ops_success *skel;
int err;
snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd);
opts.bpf_token_path = buf;
skel = dummy_st_ops_success__open_opts(&opts);
if (!ASSERT_OK_PTR(skel, "obj_token_path_open"))
return -EINVAL;
err = dummy_st_ops_success__load(skel);
dummy_st_ops_success__destroy(skel);
if (expect_success) {
if (!ASSERT_OK(err, "obj_token_path_load"))
return -EINVAL;
} else /* expect failure */ {
if (!ASSERT_ERR(err, "obj_token_path_load"))
return -EINVAL;
}
return 0;
}
static int userns_obj_priv_btf_fail(int mnt_fd)
{
return validate_struct_ops_load(mnt_fd, false /* should fail */);
}
static int userns_obj_priv_btf_success(int mnt_fd)
{
return validate_struct_ops_load(mnt_fd, true /* should succeed */);
}
#define bit(n) (1ULL << (n))
void test_token(void)
{
if (test__start_subtest("map_token")) {
......@@ -692,4 +793,43 @@ void test_token(void)
subtest_userns(&opts, userns_prog_load);
}
if (test__start_subtest("obj_priv_map")) {
struct bpffs_opts opts = {
.cmds = bit(BPF_MAP_CREATE),
.maps = bit(BPF_MAP_TYPE_QUEUE),
};
subtest_userns(&opts, userns_obj_priv_map);
}
if (test__start_subtest("obj_priv_prog")) {
struct bpffs_opts opts = {
.cmds = bit(BPF_PROG_LOAD),
.progs = bit(BPF_PROG_TYPE_KPROBE),
.attachs = ~0ULL,
};
subtest_userns(&opts, userns_obj_priv_prog);
}
if (test__start_subtest("obj_priv_btf_fail")) {
struct bpffs_opts opts = {
/* disallow BTF loading */
.cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
.attachs = ~0ULL,
};
subtest_userns(&opts, userns_obj_priv_btf_fail);
}
if (test__start_subtest("obj_priv_btf_success")) {
struct bpffs_opts opts = {
/* allow BTF loading */
.cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD),
.maps = bit(BPF_MAP_TYPE_STRUCT_OPS),
.progs = bit(BPF_PROG_TYPE_STRUCT_OPS),
.attachs = ~0ULL,
};
subtest_userns(&opts, userns_obj_priv_btf_success);
}
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_QUEUE);
__uint(max_entries, 1);
__type(value, __u32);
} priv_map SEC(".maps");
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
SEC("kprobe")
int kprobe_prog(void *ctx)
{
return 1;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment