Commit a177fc2b authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

bpf: Add BPF token support to BPF_MAP_CREATE command

Allow providing token_fd for BPF_MAP_CREATE command to allow controlled
BPF map creation from unprivileged process through delegated BPF token.
New BPF_F_TOKEN_FD flag is added to specify together with BPF token FD
for BPF_MAP_CREATE command.

Wire through a set of allowed BPF map types to BPF token, derived from
BPF FS at BPF token creation time. This, in combination with allowed_cmds
allows to create a narrowly-focused BPF token (controlled by privileged
agent) with a restrictive set of BPF maps that application can attempt
to create.
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-5-andrii@kernel.org
parent 35f96de0
...@@ -1630,6 +1630,7 @@ struct bpf_token { ...@@ -1630,6 +1630,7 @@ struct bpf_token {
atomic64_t refcnt; atomic64_t refcnt;
struct user_namespace *userns; struct user_namespace *userns;
u64 allowed_cmds; u64 allowed_cmds;
u64 allowed_maps;
}; };
struct bpf_struct_ops_value; struct bpf_struct_ops_value;
...@@ -2297,6 +2298,7 @@ int bpf_token_create(union bpf_attr *attr); ...@@ -2297,6 +2298,7 @@ int bpf_token_create(union bpf_attr *attr);
struct bpf_token *bpf_token_get_from_fd(u32 ufd); struct bpf_token *bpf_token_get_from_fd(u32 ufd);
bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd); bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname); int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags); int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
......
...@@ -983,6 +983,7 @@ enum bpf_map_type { ...@@ -983,6 +983,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF, BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE, BPF_MAP_TYPE_CGRP_STORAGE,
__MAX_BPF_MAP_TYPE
}; };
/* Note that tracing related programs such as /* Note that tracing related programs such as
...@@ -1365,6 +1366,9 @@ enum { ...@@ -1365,6 +1366,9 @@ enum {
/* Flag for value_type_btf_obj_fd, the fd is available */ /* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
}; };
/* Flags for BPF_PROG_QUERY. */ /* Flags for BPF_PROG_QUERY. */
...@@ -1443,6 +1447,10 @@ union bpf_attr { ...@@ -1443,6 +1447,10 @@ union bpf_attr {
* type data for * type data for
* btf_vmlinux_value_type_id. * btf_vmlinux_value_type_id.
*/ */
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
...@@ -620,7 +620,8 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root) ...@@ -620,7 +620,8 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
else if (opts->delegate_cmds) else if (opts->delegate_cmds)
seq_printf(m, ",delegate_cmds=0x%llx", opts->delegate_cmds); seq_printf(m, ",delegate_cmds=0x%llx", opts->delegate_cmds);
if (opts->delegate_maps == ~0ULL) mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
if ((opts->delegate_maps & mask) == mask)
seq_printf(m, ",delegate_maps=any"); seq_printf(m, ",delegate_maps=any");
else if (opts->delegate_maps) else if (opts->delegate_maps)
seq_printf(m, ",delegate_maps=0x%llx", opts->delegate_maps); seq_printf(m, ",delegate_maps=0x%llx", opts->delegate_maps);
......
...@@ -1011,8 +1011,8 @@ int map_check_no_btf(const struct bpf_map *map, ...@@ -1011,8 +1011,8 @@ int map_check_no_btf(const struct bpf_map *map,
return -ENOTSUPP; return -ENOTSUPP;
} }
static int map_check_btf(struct bpf_map *map, const struct btf *btf, static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
u32 btf_key_id, u32 btf_value_id) const struct btf *btf, u32 btf_key_id, u32 btf_value_id)
{ {
const struct btf_type *key_type, *value_type; const struct btf_type *key_type, *value_type;
u32 key_size, value_size; u32 key_size, value_size;
...@@ -1040,7 +1040,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, ...@@ -1040,7 +1040,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (!IS_ERR_OR_NULL(map->record)) { if (!IS_ERR_OR_NULL(map->record)) {
int i; int i;
if (!bpf_capable()) { if (!bpf_token_capable(token, CAP_BPF)) {
ret = -EPERM; ret = -EPERM;
goto free_map_tab; goto free_map_tab;
} }
...@@ -1128,14 +1128,16 @@ static bool bpf_net_capable(void) ...@@ -1128,14 +1128,16 @@ static bool bpf_net_capable(void)
return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN); return capable(CAP_NET_ADMIN) || capable(CAP_SYS_ADMIN);
} }
#define BPF_MAP_CREATE_LAST_FIELD value_type_btf_obj_fd #define BPF_MAP_CREATE_LAST_FIELD map_token_fd
/* called via syscall */ /* called via syscall */
static int map_create(union bpf_attr *attr) static int map_create(union bpf_attr *attr)
{ {
const struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
struct bpf_token *token = NULL;
int numa_node = bpf_map_attr_numa_node(attr); int numa_node = bpf_map_attr_numa_node(attr);
u32 map_type = attr->map_type; u32 map_type = attr->map_type;
struct bpf_map *map; struct bpf_map *map;
bool token_flag;
int f_flags; int f_flags;
int err; int err;
...@@ -1143,6 +1145,12 @@ static int map_create(union bpf_attr *attr) ...@@ -1143,6 +1145,12 @@ static int map_create(union bpf_attr *attr)
if (err) if (err)
return -EINVAL; return -EINVAL;
/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
* to avoid per-map type checks tripping on unknown flag
*/
token_flag = attr->map_flags & BPF_F_TOKEN_FD;
attr->map_flags &= ~BPF_F_TOKEN_FD;
if (attr->btf_vmlinux_value_type_id) { if (attr->btf_vmlinux_value_type_id) {
if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS || if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
attr->btf_key_type_id || attr->btf_value_type_id) attr->btf_key_type_id || attr->btf_value_type_id)
...@@ -1183,14 +1191,32 @@ static int map_create(union bpf_attr *attr) ...@@ -1183,14 +1191,32 @@ static int map_create(union bpf_attr *attr)
if (!ops->map_mem_usage) if (!ops->map_mem_usage)
return -EINVAL; return -EINVAL;
if (token_flag) {
token = bpf_token_get_from_fd(attr->map_token_fd);
if (IS_ERR(token))
return PTR_ERR(token);
/* if current token doesn't grant map creation permissions,
* then we can't use this token, so ignore it and rely on
* system-wide capabilities checks
*/
if (!bpf_token_allow_cmd(token, BPF_MAP_CREATE) ||
!bpf_token_allow_map_type(token, attr->map_type)) {
bpf_token_put(token);
token = NULL;
}
}
err = -EPERM;
/* Intent here is for unprivileged_bpf_disabled to block BPF map /* Intent here is for unprivileged_bpf_disabled to block BPF map
* creation for unprivileged users; other actions depend * creation for unprivileged users; other actions depend
* on fd availability and access to bpffs, so are dependent on * on fd availability and access to bpffs, so are dependent on
* object creation success. Even with unprivileged BPF disabled, * object creation success. Even with unprivileged BPF disabled,
* capability checks are still carried out. * capability checks are still carried out.
*/ */
if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) if (sysctl_unprivileged_bpf_disabled && !bpf_token_capable(token, CAP_BPF))
return -EPERM; goto put_token;
/* check privileged map type permissions */ /* check privileged map type permissions */
switch (map_type) { switch (map_type) {
...@@ -1223,25 +1249,27 @@ static int map_create(union bpf_attr *attr) ...@@ -1223,25 +1249,27 @@ static int map_create(union bpf_attr *attr)
case BPF_MAP_TYPE_LRU_PERCPU_HASH: case BPF_MAP_TYPE_LRU_PERCPU_HASH:
case BPF_MAP_TYPE_STRUCT_OPS: case BPF_MAP_TYPE_STRUCT_OPS:
case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_CPUMAP:
if (!bpf_capable()) if (!bpf_token_capable(token, CAP_BPF))
return -EPERM; goto put_token;
break; break;
case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_DEVMAP: case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH: case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_XSKMAP: case BPF_MAP_TYPE_XSKMAP:
if (!bpf_net_capable()) if (!bpf_token_capable(token, CAP_NET_ADMIN))
return -EPERM; goto put_token;
break; break;
default: default:
WARN(1, "unsupported map type %d", map_type); WARN(1, "unsupported map type %d", map_type);
return -EPERM; goto put_token;
} }
map = ops->map_alloc(attr); map = ops->map_alloc(attr);
if (IS_ERR(map)) if (IS_ERR(map)) {
return PTR_ERR(map); err = PTR_ERR(map);
goto put_token;
}
map->ops = ops; map->ops = ops;
map->map_type = map_type; map->map_type = map_type;
...@@ -1278,7 +1306,7 @@ static int map_create(union bpf_attr *attr) ...@@ -1278,7 +1306,7 @@ static int map_create(union bpf_attr *attr)
map->btf = btf; map->btf = btf;
if (attr->btf_value_type_id) { if (attr->btf_value_type_id) {
err = map_check_btf(map, btf, attr->btf_key_type_id, err = map_check_btf(map, token, btf, attr->btf_key_type_id,
attr->btf_value_type_id); attr->btf_value_type_id);
if (err) if (err)
goto free_map; goto free_map;
...@@ -1299,6 +1327,7 @@ static int map_create(union bpf_attr *attr) ...@@ -1299,6 +1327,7 @@ static int map_create(union bpf_attr *attr)
goto free_map_sec; goto free_map_sec;
bpf_map_save_memcg(map); bpf_map_save_memcg(map);
bpf_token_put(token);
err = bpf_map_new_fd(map, f_flags); err = bpf_map_new_fd(map, f_flags);
if (err < 0) { if (err < 0) {
...@@ -1319,6 +1348,8 @@ static int map_create(union bpf_attr *attr) ...@@ -1319,6 +1348,8 @@ static int map_create(union bpf_attr *attr)
free_map: free_map:
btf_put(map->btf); btf_put(map->btf);
map->ops->map_free(map); map->ops->map_free(map);
put_token:
bpf_token_put(token);
return err; return err;
} }
......
...@@ -73,6 +73,13 @@ static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp) ...@@ -73,6 +73,13 @@ static void bpf_token_show_fdinfo(struct seq_file *m, struct file *filp)
seq_printf(m, "allowed_cmds:\tany\n"); seq_printf(m, "allowed_cmds:\tany\n");
else else
seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds); seq_printf(m, "allowed_cmds:\t0x%llx\n", token->allowed_cmds);
BUILD_BUG_ON(__MAX_BPF_MAP_TYPE >= 64);
mask = (1ULL << __MAX_BPF_MAP_TYPE) - 1;
if ((token->allowed_maps & mask) == mask)
seq_printf(m, "allowed_maps:\tany\n");
else
seq_printf(m, "allowed_maps:\t0x%llx\n", token->allowed_maps);
} }
#define BPF_TOKEN_INODE_NAME "bpf-token" #define BPF_TOKEN_INODE_NAME "bpf-token"
...@@ -168,6 +175,7 @@ int bpf_token_create(union bpf_attr *attr) ...@@ -168,6 +175,7 @@ int bpf_token_create(union bpf_attr *attr)
mnt_opts = path.dentry->d_sb->s_fs_info; mnt_opts = path.dentry->d_sb->s_fs_info;
token->allowed_cmds = mnt_opts->delegate_cmds; token->allowed_cmds = mnt_opts->delegate_cmds;
token->allowed_maps = mnt_opts->delegate_maps;
fd = get_unused_fd_flags(O_CLOEXEC); fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) { if (fd < 0) {
...@@ -215,3 +223,11 @@ bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd) ...@@ -215,3 +223,11 @@ bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
return false; return false;
return token->allowed_cmds & (1ULL << cmd); return token->allowed_cmds & (1ULL << cmd);
} }
bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type)
{
if (!token || type >= __MAX_BPF_MAP_TYPE)
return false;
return token->allowed_maps & (1ULL << type);
}
...@@ -983,6 +983,7 @@ enum bpf_map_type { ...@@ -983,6 +983,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF, BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE, BPF_MAP_TYPE_CGRP_STORAGE,
__MAX_BPF_MAP_TYPE
}; };
/* Note that tracing related programs such as /* Note that tracing related programs such as
...@@ -1365,6 +1366,9 @@ enum { ...@@ -1365,6 +1366,9 @@ enum {
/* Flag for value_type_btf_obj_fd, the fd is available */ /* Flag for value_type_btf_obj_fd, the fd is available */
BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
/* BPF token FD is passed in a corresponding command's token_fd field */
BPF_F_TOKEN_FD = (1U << 16),
}; };
/* Flags for BPF_PROG_QUERY. */ /* Flags for BPF_PROG_QUERY. */
...@@ -1443,6 +1447,10 @@ union bpf_attr { ...@@ -1443,6 +1447,10 @@ union bpf_attr {
* type data for * type data for
* btf_vmlinux_value_type_id. * btf_vmlinux_value_type_id.
*/ */
/* BPF token FD to use with BPF_MAP_CREATE operation.
* If provided, map_flags should have BPF_F_TOKEN_FD flag set.
*/
__s32 map_token_fd;
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
...@@ -68,6 +68,8 @@ void test_libbpf_probe_map_types(void) ...@@ -68,6 +68,8 @@ void test_libbpf_probe_map_types(void)
if (map_type == BPF_MAP_TYPE_UNSPEC) if (map_type == BPF_MAP_TYPE_UNSPEC)
continue; continue;
if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0)
continue;
if (!test__start_subtest(map_type_name)) if (!test__start_subtest(map_type_name))
continue; continue;
......
...@@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void) ...@@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void)
const char *map_type_str; const char *map_type_str;
char buf[256]; char buf[256];
if (map_type == __MAX_BPF_MAP_TYPE)
continue;
map_type_name = btf__str_by_offset(btf, e->name_off); map_type_name = btf__str_by_offset(btf, e->name_off);
map_type_str = libbpf_bpf_map_type_str(map_type); map_type_str = libbpf_bpf_map_type_str(map_type);
ASSERT_OK_PTR(map_type_str, map_type_name); ASSERT_OK_PTR(map_type_str, map_type_name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment