Commit ffa915f4 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf_metadata'

Stanislav Fomichev says:

====================
Currently, if a user wants to store arbitrary metadata for an eBPF
program, for example, the program build commit hash or version, they
could store it in a map, and conveniently libbpf uses .data section to
populate an internal map. However, if the program does not actually
reference the map, then the map would be de-refcounted and freed.

This patch set introduces a new syscall BPF_PROG_BIND_MAP to add a map
to a program's used_maps, even if the program instructions does not
reference the map.

libbpf is extended to always BPF_PROG_BIND_MAP .rodata section so the
metadata is kept in place.
bpftool is also extended to print metadata in the 'bpftool prog' list.

The variable is considered metadata if it starts with the
magic 'bpf_metadata_' prefix; everything after the prefix is the
metadata name.

An example use of this would be BPF C file declaring:

  volatile const char bpf_metadata_commit_hash[] SEC(".rodata") = "abcdef123456";

and bpftool would emit:

  $ bpftool prog
  [...]
        metadata:
                commit_hash = "abcdef123456"

v6 changes:
* libbpf: drop FEAT_GLOBAL_DATA from probe_prog_bind_map (Andrii Nakryiko)
* bpftool: combine find_metadata_map_id & find_metadata;
  drops extra bpf_map_get_fd_by_id and bpf_map_get_fd_by_id (Andrii Nakryiko)
* bpftool: use strncmp instead of strstr (Andrii Nakryiko)
* bpftool: memset(map_info) and extra empty line (Andrii Nakryiko)

v5 changes:
* selftest: verify that prog holds rodata (Andrii Nakryiko)
* selftest: use volatile for metadata (Andrii Nakryiko)
* bpftool: use sizeof in BPF_METADATA_PREFIX_LEN (Andrii Nakryiko)
* bpftool: new find_metadata that does map lookup (Andrii Nakryiko)
* libbpf: don't generalize probe_create_global_data (Andrii Nakryiko)
* libbpf: use OPTS_VALID in bpf_prog_bind_map (Andrii Nakryiko)
* libbpf: keep LIBBPF_0.2.0 sorted (Andrii Nakryiko)

v4 changes:
* Don't return EEXIST from syscall if already bound (Andrii Nakryiko)
* Removed --metadata argument (Andrii Nakryiko)
* Removed custom .metadata section (Alexei Starovoitov)
* Addressed Andrii's suggestions about btf helpers and vsi (Andrii Nakryiko)
* Moved bpf_prog_find_metadata into bpftool (Alexei Starovoitov)

v3 changes:
* API changes for bpf_prog_find_metadata (Toke Høiland-Jørgensen)

v2 changes:
* Made struct bpf_prog_bind_opts in libbpf so flags is optional.
* Deduped probe_kern_global_data and probe_prog_bind_map into a common
  helper.
* Added comment regarding why EEXIST is ignored in libbpf bind map.
* Froze all LIBBPF_MAP_METADATA internal maps.
* Moved bpf_prog_bind_map into new LIBBPF_0.1.1 in libbpf.map.
* Added p_err() calls on error cases in bpftool show_prog_metadata.
* Reverse christmas tree coding style in bpftool show_prog_metadata.
* Made bpftool gen skeleton recognize .metadata as an internal map and
  generate datasec definition in skeleton.
* Added C test using skeleton to see asset that the metadata is what we
  expect and rebinding causes EEXIST.

v1 changes:
* Fixed a few missing unlocks, and missing close while iterating map fds.
* Move mutex initialization to right after prog aux allocation, and mutex
  destroy to right after prog aux free.
* s/ADD_MAP/BIND_MAP/
* Use mutex only instead of RCU to protect the used_map array & count.

Cc: YiFei Zhu <zhuyifei1999@gmail.com>
====================
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents d317b0a8 d42d1cc4
......@@ -111,7 +111,9 @@ static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
int i, cnt, err;
int i, cnt, err = 0;
mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */
cnt = 0;
......@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++;
if (!cnt)
return 0;
goto out;
nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]),
GFP_KERNEL);
if (!nfp_prog->map_records)
return -ENOMEM;
if (!nfp_prog->map_records) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
......@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
prog->aux->used_maps[i]);
if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog);
return err;
goto out;
}
}
WARN_ON(cnt != nfp_prog->map_records_cnt);
return 0;
out:
mutex_unlock(&prog->aux->used_maps_mutex);
return err;
}
static int
......
......@@ -751,6 +751,7 @@ struct bpf_prog_aux {
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
......
......@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
......@@ -658,6 +659,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
struct { /* struct used by BPF_PROG_BIND_MAP command */
__u32 prog_fd;
__u32 map_fd;
__u32 flags; /* extra flags */
} prog_bind_map;
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
......
......@@ -98,6 +98,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
mutex_init(&fp->aux->used_maps_mutex);
return fp;
}
......@@ -253,6 +254,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
void __bpf_prog_free(struct bpf_prog *fp)
{
if (fp->aux) {
mutex_destroy(&fp->aux->used_maps_mutex);
free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab);
kfree(fp->aux);
......@@ -1747,8 +1749,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
static int bpf_check_tail_call(const struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
int i;
int i, ret = 0;
mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array;
......@@ -1757,11 +1760,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
continue;
array = container_of(map, struct bpf_array, map);
if (!bpf_prog_array_compatible(array, fp))
return -EINVAL;
if (!bpf_prog_array_compatible(array, fp)) {
ret = -EINVAL;
goto out;
}
}
return 0;
out:
mutex_unlock(&aux->used_maps_mutex);
return ret;
}
static void bpf_prog_select_func(struct bpf_prog *fp)
......
......@@ -3162,21 +3162,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
const struct bpf_map *map;
int i;
mutex_lock(&prog->aux->used_maps_mutex);
for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
map = prog->aux->used_maps[i];
if (map == (void *)addr) {
*type = BPF_PSEUDO_MAP_FD;
return map;
goto out;
}
if (!map->ops->map_direct_value_meta)
continue;
if (!map->ops->map_direct_value_meta(map, addr, off)) {
*type = BPF_PSEUDO_MAP_VALUE;
return map;
goto out;
}
}
map = NULL;
return NULL;
out:
mutex_unlock(&prog->aux->used_maps_mutex);
return map;
}
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
......@@ -3294,6 +3298,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
memcpy(info.tag, prog->tag, sizeof(prog->tag));
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
mutex_lock(&prog->aux->used_maps_mutex);
ulen = info.nr_map_ids;
info.nr_map_ids = prog->aux->used_map_cnt;
ulen = min_t(u32, info.nr_map_ids, ulen);
......@@ -3303,9 +3308,12 @@ static int bpf_prog_get_info_by_fd(struct file *file,
for (i = 0; i < ulen; i++)
if (put_user(prog->aux->used_maps[i]->id,
&user_map_ids[i]))
&user_map_ids[i])) {
mutex_unlock(&prog->aux->used_maps_mutex);
return -EFAULT;
}
}
mutex_unlock(&prog->aux->used_maps_mutex);
err = set_info_rec_size(&info);
if (err)
......@@ -4153,6 +4161,66 @@ static int bpf_iter_create(union bpf_attr *attr)
return err;
}
#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
static int bpf_prog_bind_map(union bpf_attr *attr)
{
struct bpf_prog *prog;
struct bpf_map *map;
struct bpf_map **used_maps_old, **used_maps_new;
int i, ret = 0;
if (CHECK_ATTR(BPF_PROG_BIND_MAP))
return -EINVAL;
if (attr->prog_bind_map.flags)
return -EINVAL;
prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
if (IS_ERR(prog))
return PTR_ERR(prog);
map = bpf_map_get(attr->prog_bind_map.map_fd);
if (IS_ERR(map)) {
ret = PTR_ERR(map);
goto out_prog_put;
}
mutex_lock(&prog->aux->used_maps_mutex);
used_maps_old = prog->aux->used_maps;
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (used_maps_old[i] == map)
goto out_unlock;
used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
sizeof(used_maps_new[0]),
GFP_KERNEL);
if (!used_maps_new) {
ret = -ENOMEM;
goto out_unlock;
}
memcpy(used_maps_new, used_maps_old,
sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
used_maps_new[prog->aux->used_map_cnt] = map;
prog->aux->used_map_cnt++;
prog->aux->used_maps = used_maps_new;
kfree(used_maps_old);
out_unlock:
mutex_unlock(&prog->aux->used_maps_mutex);
if (ret)
bpf_map_put(map);
out_prog_put:
bpf_prog_put(prog);
return ret;
}
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr;
......@@ -4286,6 +4354,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_LINK_DETACH:
err = link_detach(&attr);
break;
case BPF_PROG_BIND_MAP:
err = bpf_prog_bind_map(&attr);
break;
default:
err = -EINVAL;
break;
......
......@@ -5441,15 +5441,20 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
if (new) {
u32 i;
mutex_lock(&new->aux->used_maps_mutex);
/* generic XDP does not work with DEVMAPs that can
* have a bpf_prog installed on an entry
*/
for (i = 0; i < new->aux->used_map_cnt; i++) {
if (dev_map_can_have_prog(new->aux->used_maps[i]))
return -EINVAL;
if (cpu_map_prog_allowed(new->aux->used_maps[i]))
if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
cpu_map_prog_allowed(new->aux->used_maps[i])) {
mutex_unlock(&new->aux->used_maps_mutex);
return -EINVAL;
}
}
mutex_unlock(&new->aux->used_maps_mutex);
}
switch (xdp->command) {
......
......@@ -119,6 +119,12 @@ void jsonw_pretty(json_writer_t *self, bool on)
self->pretty = on;
}
void jsonw_reset(json_writer_t *self)
{
assert(self->depth == 0);
self->sep = '\0';
}
/* Basic blocks */
static void jsonw_begin(json_writer_t *self, int c)
{
......
......@@ -27,6 +27,9 @@ void jsonw_destroy(json_writer_t **self_p);
/* Cause output to have pretty whitespace */
void jsonw_pretty(json_writer_t *self, bool on);
/* Reset separator to create new JSON */
void jsonw_reset(json_writer_t *self);
/* Add property name */
void jsonw_name(json_writer_t *self, const char *name);
......
......@@ -29,6 +29,9 @@
#include "main.h"
#include "xlated_dumper.h"
#define BPF_METADATA_PREFIX "bpf_metadata_"
#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
......@@ -151,6 +154,198 @@ static void show_prog_maps(int fd, __u32 num_maps)
}
}
static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
{
struct bpf_prog_info prog_info;
__u32 prog_info_len;
__u32 map_info_len;
void *value = NULL;
__u32 *map_ids;
int nr_maps;
int key = 0;
int map_fd;
int ret;
__u32 i;
memset(&prog_info, 0, sizeof(prog_info));
prog_info_len = sizeof(prog_info);
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return NULL;
if (!prog_info.nr_map_ids)
return NULL;
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
if (!map_ids)
return NULL;
nr_maps = prog_info.nr_map_ids;
memset(&prog_info, 0, sizeof(prog_info));
prog_info.nr_map_ids = nr_maps;
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
goto free_map_ids;
for (i = 0; i < prog_info.nr_map_ids; i++) {
map_fd = bpf_map_get_fd_by_id(map_ids[i]);
if (map_fd < 0)
goto free_map_ids;
memset(map_info, 0, sizeof(*map_info));
map_info_len = sizeof(*map_info);
ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
if (ret < 0) {
close(map_fd);
goto free_map_ids;
}
if (map_info->type != BPF_MAP_TYPE_ARRAY ||
map_info->key_size != sizeof(int) ||
map_info->max_entries != 1 ||
!map_info->btf_value_type_id ||
!strstr(map_info->name, ".rodata")) {
close(map_fd);
continue;
}
value = malloc(map_info->value_size);
if (!value) {
close(map_fd);
goto free_map_ids;
}
if (bpf_map_lookup_elem(map_fd, &key, value)) {
close(map_fd);
free(value);
value = NULL;
goto free_map_ids;
}
close(map_fd);
break;
}
free_map_ids:
free(map_ids);
return value;
}
static bool has_metadata_prefix(const char *s)
{
return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
}
static void show_prog_metadata(int fd, __u32 num_maps)
{
const struct btf_type *t_datasec, *t_var;
struct bpf_map_info map_info;
struct btf_var_secinfo *vsi;
bool printed_header = false;
struct btf *btf = NULL;
unsigned int i, vlen;
void *value = NULL;
const char *name;
int err;
if (!num_maps)
return;
memset(&map_info, 0, sizeof(map_info));
value = find_metadata(fd, &map_info);
if (!value)
return;
err = btf__get_from_id(map_info.btf_id, &btf);
if (err || !btf)
goto out_free;
t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
if (!btf_is_datasec(t_datasec))
goto out_free;
vlen = btf_vlen(t_datasec);
vsi = btf_var_secinfos(t_datasec);
/* We don't proceed to check the kinds of the elements of the DATASEC.
* The verifier enforces them to be BTF_KIND_VAR.
*/
if (json_output) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
for (i = 0; i < vlen; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
name = btf__name_by_offset(btf, t_var->name_off);
if (!has_metadata_prefix(name))
continue;
if (!printed_header) {
jsonw_name(json_wtr, "metadata");
jsonw_start_object(json_wtr);
printed_header = true;
}
jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
if (err) {
p_err("btf dump failed: %d", err);
break;
}
}
if (printed_header)
jsonw_end_object(json_wtr);
} else {
json_writer_t *btf_wtr = jsonw_new(stdout);
struct btf_dumper d = {
.btf = btf,
.jw = btf_wtr,
.is_plain_text = true,
};
if (!btf_wtr) {
p_err("jsonw alloc failed");
goto out_free;
}
for (i = 0; i < vlen; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
name = btf__name_by_offset(btf, t_var->name_off);
if (!has_metadata_prefix(name))
continue;
if (!printed_header) {
printf("\tmetadata:");
printed_header = true;
}
printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
jsonw_reset(btf_wtr);
err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
if (err) {
p_err("btf dump failed: %d", err);
break;
}
}
if (printed_header)
jsonw_destroy(&btf_wtr);
}
out_free:
btf__free(btf);
free(value);
}
static void print_prog_header_json(struct bpf_prog_info *info)
{
jsonw_uint_field(json_wtr, "id", info->id);
......@@ -228,6 +423,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
emit_obj_refs_json(&refs_table, info->id, json_wtr);
show_prog_metadata(fd, info->nr_map_ids);
jsonw_end_object(json_wtr);
}
......@@ -297,6 +494,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
printf("\n");
show_prog_metadata(fd, info->nr_map_ids);
}
static int show_prog(int fd)
......
......@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
......@@ -658,6 +659,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
struct { /* struct used by BPF_PROG_BIND_MAP command */
__u32 prog_fd;
__u32 map_fd;
__u32 flags; /* extra flags */
} prog_bind_map;
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
......
......@@ -872,3 +872,19 @@ int bpf_enable_stats(enum bpf_stats_type type)
return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
}
int bpf_prog_bind_map(int prog_fd, int map_fd,
const struct bpf_prog_bind_opts *opts)
{
union bpf_attr attr;
if (!OPTS_VALID(opts, bpf_prog_bind_opts))
return -EINVAL;
memset(&attr, 0, sizeof(attr));
attr.prog_bind_map.prog_fd = prog_fd;
attr.prog_bind_map.map_fd = map_fd;
attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
}
......@@ -243,6 +243,14 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
struct bpf_prog_bind_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 flags;
};
#define bpf_prog_bind_opts__last_field flags
LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
const struct bpf_prog_bind_opts *opts);
#ifdef __cplusplus
} /* extern "C" */
#endif
......
......@@ -174,6 +174,8 @@ enum kern_feature_id {
FEAT_EXP_ATTACH_TYPE,
/* bpf_probe_read_{kernel,user}[_str] helpers */
FEAT_PROBE_READ_KERN,
/* BPF_PROG_BIND_MAP is supported */
FEAT_PROG_BIND_MAP,
__FEAT_CNT,
};
......@@ -409,6 +411,7 @@ struct bpf_object {
struct extern_desc *externs;
int nr_extern;
int kconfig_map_idx;
int rodata_map_idx;
bool loaded;
bool has_subcalls;
......@@ -1070,6 +1073,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
obj->rodata_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
......@@ -1428,6 +1432,8 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
obj->efile.rodata->d_size);
if (err)
return err;
obj->rodata_map_idx = obj->nr_maps - 1;
}
if (obj->efile.bss_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
......@@ -3894,6 +3900,52 @@ static int probe_kern_probe_read_kernel(void)
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
}
static int probe_prog_bind_map(void)
{
struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int ret, map, prog;
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_ARRAY;
map_attr.key_size = sizeof(int);
map_attr.value_size = 32;
map_attr.max_entries = 1;
map = bpf_create_map_xattr(&map_attr);
if (map < 0) {
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
__func__, cp, -ret);
return ret;
}
memset(&prg_attr, 0, sizeof(prg_attr));
prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
prg_attr.insns = insns;
prg_attr.insns_cnt = ARRAY_SIZE(insns);
prg_attr.license = "GPL";
prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
if (prog < 0) {
close(map);
return 0;
}
ret = bpf_prog_bind_map(prog, map, NULL);
close(map);
close(prog);
return ret >= 0;
}
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
......@@ -3934,6 +3986,9 @@ static struct kern_feature_desc {
},
[FEAT_PROBE_READ_KERN] = {
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
},
[FEAT_PROG_BIND_MAP] = {
"BPF_PROG_BIND_MAP support", probe_prog_bind_map,
}
};
......@@ -6468,6 +6523,20 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
if (ret >= 0) {
if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
if (prog->obj->rodata_map_idx >= 0 &&
kernel_supports(FEAT_PROG_BIND_MAP)) {
struct bpf_map *rodata_map =
&prog->obj->maps[prog->obj->rodata_map_idx];
if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': failed to bind .rodata map: %s\n",
prog->name, cp);
/* Don't fail hard if can't bind rodata. */
}
}
*pfd = ret;
ret = 0;
goto out;
......
......@@ -302,6 +302,7 @@ LIBBPF_0.1.0 {
LIBBPF_0.2.0 {
global:
bpf_prog_bind_map;
bpf_program__section_name;
perf_buffer__buffer_cnt;
perf_buffer__buffer_fd;
......
......@@ -68,7 +68,8 @@ TEST_PROGS := test_kmod.sh \
test_tc_edt.sh \
test_xdping.sh \
test_bpftool_build.sh \
test_bpftool.sh
test_bpftool.sh \
test_bpftool_metadata.sh \
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Google LLC.
*/
#include <test_progs.h>
#include <cgroup_helpers.h>
#include <network_helpers.h>
#include "metadata_unused.skel.h"
#include "metadata_used.skel.h"
static int duration;
static int prog_holds_map(int prog_fd, int map_fd)
{
struct bpf_prog_info prog_info = {};
struct bpf_prog_info map_info = {};
__u32 prog_info_len;
__u32 map_info_len;
__u32 *map_ids;
int nr_maps;
int ret;
int i;
map_info_len = sizeof(map_info);
ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
if (ret)
return -errno;
prog_info_len = sizeof(prog_info);
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret)
return -errno;
map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
if (!map_ids)
return -ENOMEM;
nr_maps = prog_info.nr_map_ids;
memset(&prog_info, 0, sizeof(prog_info));
prog_info.nr_map_ids = nr_maps;
prog_info.map_ids = ptr_to_u64(map_ids);
prog_info_len = sizeof(prog_info);
ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
if (ret) {
ret = -errno;
goto free_map_ids;
}
ret = -ENOENT;
for (i = 0; i < prog_info.nr_map_ids; i++) {
if (map_ids[i] == map_info.id) {
ret = 0;
break;
}
}
free_map_ids:
free(map_ids);
return ret;
}
static void test_metadata_unused(void)
{
struct metadata_unused *obj;
int err;
obj = metadata_unused__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata));
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
return;
/* Assert that we can access the metadata in skel and the values are
* what we expect.
*/
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo",
sizeof(obj->rodata->bpf_metadata_a)),
"bpf_metadata_a", "expected \"foo\", value differ"))
goto close_bpf_object;
if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b",
"expected 1, got %d", obj->rodata->bpf_metadata_b))
goto close_bpf_object;
/* Assert that binding metadata map to prog again succeeds. */
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata), NULL);
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
close_bpf_object:
metadata_unused__destroy(obj);
}
static void test_metadata_used(void)
{
struct metadata_used *obj;
int err;
obj = metadata_used__open_and_load();
if (CHECK(!obj, "skel-load", "errno %d", errno))
return;
err = prog_holds_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata));
if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
return;
/* Assert that we can access the metadata in skel and the values are
* what we expect.
*/
if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar",
sizeof(obj->rodata->bpf_metadata_a)),
"metadata_a", "expected \"bar\", value differ"))
goto close_bpf_object;
if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b",
"expected 2, got %d", obj->rodata->bpf_metadata_b))
goto close_bpf_object;
/* Assert that binding metadata map to prog again succeeds. */
err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
bpf_map__fd(obj->maps.rodata), NULL);
CHECK(err, "rebind_map", "errno %d, expected 0", errno);
close_bpf_object:
metadata_used__destroy(obj);
}
void test_metadata(void)
{
if (test__start_subtest("unused"))
test_metadata_unused();
if (test__start_subtest("used"))
test_metadata_used();
}
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
volatile const char bpf_metadata_a[] SEC(".rodata") = "foo";
volatile const int bpf_metadata_b SEC(".rodata") = 1;
SEC("cgroup_skb/egress")
int prog(struct xdp_md *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
volatile const char bpf_metadata_a[] SEC(".rodata") = "bar";
volatile const int bpf_metadata_b SEC(".rodata") = 2;
SEC("cgroup_skb/egress")
int prog(struct xdp_md *ctx)
{
return bpf_metadata_b ? 1 : 0;
}
char _license[] SEC("license") = "GPL";
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
TESTNAME=bpftool_metadata
BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
BPF_DIR=$BPF_FS/test_$TESTNAME
_cleanup()
{
set +e
rm -rf $BPF_DIR 2> /dev/null
}
cleanup_skip()
{
echo "selftests: $TESTNAME [SKIP]"
_cleanup
exit $ksft_skip
}
cleanup()
{
if [ "$?" = 0 ]; then
echo "selftests: $TESTNAME [PASS]"
else
echo "selftests: $TESTNAME [FAILED]"
fi
_cleanup
}
if [ $(id -u) -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] Need root privileges"
exit $ksft_skip
fi
if [ -z "$BPF_FS" ]; then
echo "selftests: $TESTNAME [SKIP] Could not run test without bpffs mounted"
exit $ksft_skip
fi
if ! bpftool version > /dev/null 2>&1; then
echo "selftests: $TESTNAME [SKIP] Could not run test without bpftool"
exit $ksft_skip
fi
set -e
trap cleanup_skip EXIT
mkdir $BPF_DIR
trap cleanup EXIT
bpftool prog load metadata_unused.o $BPF_DIR/unused
METADATA_PLAIN="$(bpftool prog)"
echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
echo "$METADATA_PLAIN" | grep 'b = 1' > /dev/null
bpftool prog --json | grep '"metadata":{"a":"foo","b":1}' > /dev/null
bpftool map | grep 'metadata.rodata' > /dev/null
rm $BPF_DIR/unused
bpftool prog load metadata_used.o $BPF_DIR/used
METADATA_PLAIN="$(bpftool prog)"
echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
echo "$METADATA_PLAIN" | grep 'b = 2' > /dev/null
bpftool prog --json | grep '"metadata":{"a":"bar","b":2}' > /dev/null
bpftool map | grep 'metadata.rodata' > /dev/null
rm $BPF_DIR/used
exit 0
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment