Commit a177fc2b authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

bpf: Add BPF token support to BPF_MAP_CREATE command

Allow providing token_fd for BPF_MAP_CREATE command to allow controlled
BPF map creation from unprivileged process through delegated BPF token.
New BPF_F_TOKEN_FD flag is added to specify together with BPF token FD
for BPF_MAP_CREATE command.

Wire through a set of allowed BPF map types to BPF token, derived from
BPF FS at BPF token creation time. This, in combination with allowed_cmds
allows to create a narrowly-focused BPF token (controlled by privileged
agent) with a restrictive set of BPF maps that application can attempt
to create.
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-5-andrii@kernel.org
parent 35f96de0
......@@ -1630,6 +1630,7 @@ struct bpf_token {
atomic64_t refcnt;
struct user_namespace *userns;
u64 allowed_cmds;
u64 allowed_maps;
};
struct bpf_struct_ops_value;
......@@ -2297,6 +2298,7 @@ int bpf_token_create(union bpf_attr *attr);
struct bpf_token *bpf_token_get_from_fd(u32 ufd);
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
......
......@@ -983,6 +983,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
__MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
......@@ -1365,6 +1366,9 @@ enum {
/* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
};
/* Flags for BPF_PROG_QUERY. */
......@@ -1443,6 +1447,10 @@ union bpf_attr {
* type data for
* btf_vmlinux_value_type_id.
*/
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
......@@ -620,7 +620,8 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
else if (opts->delegate_cmds)
seq_printf(m, ",delegate_cmds=0x%llx", opts->delegate_cmds);
if (opts->delegate_maps == ~0ULL)
mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
if ((opts->delegate_maps & mask) == mask)
seq_printf(m, ",delegate_maps=any");
else if (opts->delegate_maps)
seq_printf(m, ",delegate_maps=0x%llx", opts->delegate_maps);
......
......@@ -1011,8 +1011,8 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP;
}
static int map_check_btf(struct bpf_map *map, const struct btf *btf,
u32 btf_key_id, u32 btf_value_id)
static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
{
const struct btf_type *key_type, *value_type;
u32 key_size, value_size;
......@@ -1040,7 +1040,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (!IS_ERR_OR_NULL(map->record)) {
int i;
if (!bpf_capable()) {
if (!bpf_token_capable(token, CAP_BPF)) {
ret = -EPERM;
goto free_map_tab;
}
......@@ -1128,14 +1128,16 @@ static bool bpf_net_capable(void)
return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
}
#define BPF_MAP_CREATE_LAST_FIELD value_type_btf_obj_fd
#define BPF_MAP_CREATE_LAST_FIELD map_token_fd
/* called via syscall */
static int map_create(union bpf_attr *attr)
{
const struct bpf_map_ops *ops;
struct bpf_token *token = NULL;
int numa_node = bpf_map_attr_numa_node(attr);
u32 map_type = attr->map_type;
struct bpf_map *map;
bool token_flag;
int f_flags;
int err;
......@@ -1143,6 +1145,12 @@ static int map_create(union bpf_attr *attr)
if (err)
return -EINVAL;
/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
* to avoid per-map type checks tripping on unknown flag
*/
token_flag = attr->map_flags & BPF_F_TOKEN_FD;
attr->map_flags &= ~BPF_F_TOKEN_FD;
if (attr->btf_vmlinux_value_type_id) {
if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
attr->btf_key_type_id || attr->btf_value_type_id)
......@@ -1183,14 +1191,32 @@ static int map_create(union bpf_attr *attr)
if (!ops->map_mem_usage)
return -EINVAL;
if (token_flag) {
token = bpf_token_get_from_fd(attr->map_token_fd);
if (IS_ERR(token))
return PTR_ERR(token);
/* if current token doesn't grant map creation permissions,
* then we can't use this token, so ignore it and rely on
* system-wide capabilities checks
*/
if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
!bpf_token_allow_map_type(token, attr->map_type)) {
bpf_token_put(token);
token = NULL;
}
}
err = -EPERM;
/* Intent here is for unprivileged_bpf_disabled to block BPF map
* creation for unprivileged users; other actions depend
* on fd availability and access to bpffs, so are dependent on
* object creation success. Even with unprivileged BPF disabled,
* capability checks are still carried out.
*/
if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
return -EPERM;
if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
goto put_token;
/* check privileged map type permissions */
switch (map_type) {
......@@ -1223,25 +1249,27 @@ static int map_create(union bpf_attr *attr)
case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_STRUCT_OPS:
case BPF_MAP_TYPE_CPUMAP:
if (!bpf_capable())
return -EPERM;
if (!bpf_token_capable(token, CAP_BPF))
goto put_token;
break;
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_XSKMAP:
if (!bpf_net_capable())
return -EPERM;
if (!bpf_token_capable(token, CAP_NET_ADMIN))
goto put_token;
break;
default:
WARN(1, "unsupported map type %d", map_type);
return -EPERM;
goto put_token;
}
map = ops->map_alloc(attr);
if (IS_ERR(map))
return PTR_ERR(map);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto put_token;
}
map->ops = ops;
map->map_type = map_type;
......@@ -1278,7 +1306,7 @@ static int map_create(union bpf_attr *attr)
map->btf = btf;
if (attr->btf_value_type_id) {
err = map_check_btf(map, btf, attr->btf_key_type_id,
err = map_check_btf(map, token, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
if (err)
goto free_map;
......@@ -1299,6 +1327,7 @@ static int map_create(union bpf_attr *attr)
goto free_map_sec;
bpf_map_save_memcg(map);
bpf_token_put(token);
err = bpf_map_new_fd(map, f_flags);
if (err < 0) {
......@@ -1319,6 +1348,8 @@ static int map_create(union bpf_attr *attr)
free_map:
btf_put(map->btf);
map->ops->map_free(map);
put_token:
bpf_token_put(token);
return err;
}
......
......@@ -73,6 +73,13 @@ static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
seq_printf(m, "allowed_cmds:\tany\n");
else
seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
if ((token->allowed_maps & mask) == mask)
seq_printf(m, "allowed_maps:\tany\n");
else
seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
}
#define BPF_TOKEN_INODE_NAME "bpf-token"
......@@ -168,6 +175,7 @@ int bpf_token_create(union bpf_attr *attr)
mnt_opts = path.dentry->d_sb->s_fs_info;
token->allowed_cmds = mnt_opts->delegate_cmds;
token->allowed_maps = mnt_opts->delegate_maps;
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
......@@ -215,3 +223,11 @@ bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
return false;
return token->allowed_cmds & (1ULL << cmd);
}
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
{
if (!token || type >= __MAX_BPF_MAP_TYPE)
return false;
return token->allowed_maps & (1ULL << type);
}
......@@ -983,6 +983,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
__MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
......@@ -1365,6 +1366,9 @@ enum {
/* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
};
/* Flags for BPF_PROG_QUERY. */
......@@ -1443,6 +1447,10 @@ union bpf_attr {
* type data for
* btf_vmlinux_value_type_id.
*/
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
......@@ -68,6 +68,8 @@ void test_libbpf_probe_map_types(void)
if (map_type == BPF_MAP_TYPE_UNSPEC)
continue;
if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
continue;
if (!test__start_subtest(map_type_name))
continue;
......
......@@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void)
const char *map_type_str;
char buf[256];
if (map_type == __MAX_BPF_MAP_TYPE)
continue;
map_type_name = btf__str_by_offset(btf, e->name_off);
map_type_str = libbpf_bpf_map_type_str(map_type);
ASSERT_OK_PTR(map_type_str, map_type_name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment