Commit 32b88d37 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-libbpf-btf-defined-maps'

Andrii Nakryiko says:

====================
This patch set implements initial version (as discussed at LSF/MM2019
conference) of a new way to specify BPF maps, relying on BTF type information,
which allows for easy extensibility, preserving forward and backward
compatibility. See details and examples in description for patch #6.

[0] contains an outline of follow up extensions to be added after this basic
set of features lands. They are useful by itself, but also allows to bring
libbpf to feature-parity with iproute2 BPF loader. That should open a path
forward for BPF loaders unification.

Patch #1 centralizes commonly used min/max macro in libbpf_internal.h.
Patch #2 extracts .BTF and .BTF.ext loading loging from elf_collect().
Patch #3 simplifies elf_collect() error-handling logic.
Patch #4 refactors map initialization logic into user-provided maps and global
data maps, in preparation to adding another way (BTF-defined maps).
Patch #5 adds support for map definitions in multiple ELF sections and
deprecates bpf_object__find_map_by_offset() API which doesn't appear to be
used anymore and makes assumption that all map definitions reside in single
ELF section.
Patch #6 splits BTF intialization from sanitization/loading into kernel to
preserve original BTF at the time of map initialization.
Patch #7 adds support for BTF-defined maps.
Patch #8 adds new test for BTF-defined map definition.
Patches #9-11 convert test BPF map definitions to use BTF way.

[0] https://lore.kernel.org/bpf/CAEf4BzbfdG2ub7gCi0OYqBrUoChVHWsmOntWAkJt47=FE+km+A@mail.gmail.com/

v1->v2:
- more BTF-sanity checks in parsing map definitions (Song);
- removed confusing usage of "attribute", switched to "field;
- split off elf_collect() refactor from btf loading refactor (Song);
- split selftests conversion into 3 patches (Stanislav):
  1. test already relying on BTF;
  2. tests w/ custom types as key/value (so benefiting from BTF);
  3. all the rest tests (integers as key/value, special maps w/o BTF support).
- smaller code improvements (Song);

rfc->v1:
- error out on unknown field by default (Stanislav, Jakub, Lorenz);
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 7f94208c df0b7792
...@@ -26,10 +26,11 @@ ...@@ -26,10 +26,11 @@
#include <memory.h> #include <memory.h>
#include <unistd.h> #include <unistd.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include "bpf.h" #include "bpf.h"
#include "libbpf.h" #include "libbpf.h"
#include <errno.h> #include "libbpf_internal.h"
/* /*
* When building perf, unistd.h is overridden. __NR_bpf is * When building perf, unistd.h is overridden. __NR_bpf is
...@@ -53,10 +54,6 @@ ...@@ -53,10 +54,6 @@
# endif # endif
#endif #endif
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
static inline __u64 ptr_to_u64(const void *ptr) static inline __u64 ptr_to_u64(const void *ptr)
{ {
return (__u64) (unsigned long) ptr; return (__u64) (unsigned long) ptr;
......
...@@ -6,10 +6,7 @@ ...@@ -6,10 +6,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include "libbpf.h" #include "libbpf.h"
#include "libbpf_internal.h"
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
struct bpf_prog_linfo { struct bpf_prog_linfo {
void *raw_linfo; void *raw_linfo;
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#include "libbpf_internal.h" #include "libbpf_internal.h"
#include "hashmap.h" #include "hashmap.h"
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
#define BTF_MAX_NR_TYPES 0x7fffffff #define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff #define BTF_MAX_STR_OFFSET 0x7fffffff
......
...@@ -17,6 +17,7 @@ extern "C" { ...@@ -17,6 +17,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF" #define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext" #define BTF_EXT_ELF_SEC ".BTF.ext"
#define MAPS_ELF_SEC ".maps"
struct btf; struct btf;
struct btf_ext; struct btf_ext;
......
...@@ -18,9 +18,6 @@ ...@@ -18,9 +18,6 @@
#include "libbpf.h" #include "libbpf.h"
#include "libbpf_internal.h" #include "libbpf_internal.h"
#define min(x, y) ((x) < (y) ? (x) : (y))
#define max(x, y) ((x) < (y) ? (y) : (x))
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t"; static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1; static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
......
...@@ -207,7 +207,8 @@ static const char * const libbpf_type_to_btf_name[] = { ...@@ -207,7 +207,8 @@ static const char * const libbpf_type_to_btf_name[] = {
struct bpf_map { struct bpf_map {
int fd; int fd;
char *name; char *name;
size_t offset; int sec_idx;
size_t sec_offset;
int map_ifindex; int map_ifindex;
int inner_map_fd; int inner_map_fd;
struct bpf_map_def def; struct bpf_map_def def;
...@@ -234,6 +235,7 @@ struct bpf_object { ...@@ -234,6 +235,7 @@ struct bpf_object {
size_t nr_programs; size_t nr_programs;
struct bpf_map *maps; struct bpf_map *maps;
size_t nr_maps; size_t nr_maps;
size_t maps_cap;
struct bpf_secdata sections; struct bpf_secdata sections;
bool loaded; bool loaded;
...@@ -260,6 +262,7 @@ struct bpf_object { ...@@ -260,6 +262,7 @@ struct bpf_object {
} *reloc; } *reloc;
int nr_reloc; int nr_reloc;
int maps_shndx; int maps_shndx;
int btf_maps_shndx;
int text_shndx; int text_shndx;
int data_shndx; int data_shndx;
int rodata_shndx; int rodata_shndx;
...@@ -512,6 +515,7 @@ static struct bpf_object *bpf_object__new(const char *path, ...@@ -512,6 +515,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf; obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz; obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.maps_shndx = -1; obj->efile.maps_shndx = -1;
obj->efile.btf_maps_shndx = -1;
obj->efile.data_shndx = -1; obj->efile.data_shndx = -1;
obj->efile.rodata_shndx = -1; obj->efile.rodata_shndx = -1;
obj->efile.bss_shndx = -1; obj->efile.bss_shndx = -1;
...@@ -646,7 +650,9 @@ static int compare_bpf_map(const void *_a, const void *_b) ...@@ -646,7 +650,9 @@ static int compare_bpf_map(const void *_a, const void *_b)
const struct bpf_map *a = _a; const struct bpf_map *a = _a;
const struct bpf_map *b = _b; const struct bpf_map *b = _b;
return a->offset - b->offset; if (a->sec_idx != b->sec_idx)
return a->sec_idx - b->sec_idx;
return a->sec_offset - b->sec_offset;
} }
static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
...@@ -763,24 +769,55 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, ...@@ -763,24 +769,55 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
return -ENOENT; return -ENOENT;
} }
static bool bpf_object__has_maps(const struct bpf_object *obj) static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
{ {
return obj->efile.maps_shndx >= 0 || struct bpf_map *new_maps;
obj->efile.data_shndx >= 0 || size_t new_cap;
obj->efile.rodata_shndx >= 0 || int i;
obj->efile.bss_shndx >= 0;
if (obj->nr_maps < obj->maps_cap)
return &obj->maps[obj->nr_maps++];
new_cap = max(4ul, obj->maps_cap * 3 / 2);
new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
if (!new_maps) {
pr_warning("alloc maps for object failed\n");
return ERR_PTR(-ENOMEM);
}
obj->maps_cap = new_cap;
obj->maps = new_maps;
/* zero out new maps */
memset(obj->maps + obj->nr_maps, 0,
(obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
/*
* fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
* when failure (zclose won't close negative fd)).
*/
for (i = obj->nr_maps; i < obj->maps_cap; i++) {
obj->maps[i].fd = -1;
obj->maps[i].inner_map_fd = -1;
}
return &obj->maps[obj->nr_maps++];
} }
static int static int
bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
enum libbpf_map_type type, Elf_Data *data, int sec_idx, Elf_Data *data, void **data_buff)
void **data_buff)
{ {
struct bpf_map_def *def = &map->def;
char map_name[BPF_OBJ_NAME_LEN]; char map_name[BPF_OBJ_NAME_LEN];
struct bpf_map_def *def;
struct bpf_map *map;
map = bpf_object__add_map(obj);
if (IS_ERR(map))
return PTR_ERR(map);
map->libbpf_type = type; map->libbpf_type = type;
map->offset = ~(typeof(map->offset))0; map->sec_idx = sec_idx;
map->sec_offset = 0;
snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name, snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
libbpf_type_to_btf_name[type]); libbpf_type_to_btf_name[type]);
map->name = strdup(map_name); map->name = strdup(map_name);
...@@ -788,7 +825,10 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, ...@@ -788,7 +825,10 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
pr_warning("failed to alloc map name\n"); pr_warning("failed to alloc map name\n");
return -ENOMEM; return -ENOMEM;
} }
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
def = &map->def;
def->type = BPF_MAP_TYPE_ARRAY; def->type = BPF_MAP_TYPE_ARRAY;
def->key_size = sizeof(int); def->key_size = sizeof(int);
def->value_size = data->d_size; def->value_size = data->d_size;
...@@ -808,22 +848,55 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map, ...@@ -808,22 +848,55 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
return 0; return 0;
} }
static int bpf_object__init_maps(struct bpf_object *obj, int flags) static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
int err;
if (!obj->caps.global_data)
return 0;
/*
* Populate obj->maps with libbpf internal maps.
*/
if (obj->efile.data_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
obj->efile.data_shndx,
obj->efile.data,
&obj->sections.data);
if (err)
return err;
}
if (obj->efile.rodata_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
obj->efile.rodata_shndx,
obj->efile.rodata,
&obj->sections.rodata);
if (err)
return err;
}
if (obj->efile.bss_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
obj->efile.bss_shndx,
obj->efile.bss, NULL);
if (err)
return err;
}
return 0;
}
static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
{ {
int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
bool strict = !(flags & MAPS_RELAX_COMPAT);
Elf_Data *symbols = obj->efile.symbols; Elf_Data *symbols = obj->efile.symbols;
int i, map_def_sz = 0, nr_maps = 0, nr_syms;
Elf_Data *data = NULL; Elf_Data *data = NULL;
int ret = 0; Elf_Scn *scn;
if (obj->efile.maps_shndx < 0)
return 0;
if (!symbols) if (!symbols)
return -EINVAL; return -EINVAL;
nr_syms = symbols->d_size / sizeof(GElf_Sym);
if (obj->efile.maps_shndx >= 0) {
Elf_Scn *scn = elf_getscn(obj->efile.elf,
obj->efile.maps_shndx);
scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
if (scn) if (scn)
data = elf_getdata(scn, NULL); data = elf_getdata(scn, NULL);
if (!scn || !data) { if (!scn || !data) {
...@@ -831,7 +904,6 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -831,7 +904,6 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
obj->efile.maps_shndx); obj->efile.maps_shndx);
return -EINVAL; return -EINVAL;
} }
}
/* /*
* Count number of maps. Each map has a name. * Count number of maps. Each map has a name.
...@@ -840,16 +912,8 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -840,16 +912,8 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
* *
* TODO: Detect array of map and report error. * TODO: Detect array of map and report error.
*/ */
if (obj->caps.global_data) { nr_syms = symbols->d_size / sizeof(GElf_Sym);
if (obj->efile.data_shndx >= 0) for (i = 0; i < nr_syms; i++) {
nr_maps_glob++;
if (obj->efile.rodata_shndx >= 0)
nr_maps_glob++;
if (obj->efile.bss_shndx >= 0)
nr_maps_glob++;
}
for (i = 0; data && i < nr_syms; i++) {
GElf_Sym sym; GElf_Sym sym;
if (!gelf_getsym(symbols, i, &sym)) if (!gelf_getsym(symbols, i, &sym))
...@@ -858,14 +922,9 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -858,14 +922,9 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
continue; continue;
nr_maps++; nr_maps++;
} }
if (!nr_maps && !nr_maps_glob)
return 0;
/* Assume equally sized map definitions */ /* Assume equally sized map definitions */
if (data) { pr_debug("maps in %s: %d maps in %zd bytes\n",
pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, obj->path, nr_maps, data->d_size);
nr_maps, data->d_size);
map_def_sz = data->d_size / nr_maps; map_def_sz = data->d_size / nr_maps;
if (!data->d_size || (data->d_size % nr_maps) != 0) { if (!data->d_size || (data->d_size % nr_maps) != 0) {
...@@ -874,63 +933,48 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -874,63 +933,48 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
obj->path, nr_maps, data->d_size); obj->path, nr_maps, data->d_size);
return -EINVAL; return -EINVAL;
} }
}
nr_maps += nr_maps_glob;
obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
if (!obj->maps) {
pr_warning("alloc maps for object failed\n");
return -ENOMEM;
}
obj->nr_maps = nr_maps;
for (i = 0; i < nr_maps; i++) { /* Fill obj->maps using data in "maps" section. */
/* for (i = 0; i < nr_syms; i++) {
* fill all fd with -1 so won't close incorrect
* fd (fd=0 is stdin) when failure (zclose won't close
* negative fd)).
*/
obj->maps[i].fd = -1;
obj->maps[i].inner_map_fd = -1;
}
/*
* Fill obj->maps using data in "maps" section.
*/
for (i = 0, map_idx = 0; data && i < nr_syms; i++) {
GElf_Sym sym; GElf_Sym sym;
const char *map_name; const char *map_name;
struct bpf_map_def *def; struct bpf_map_def *def;
struct bpf_map *map;
if (!gelf_getsym(symbols, i, &sym)) if (!gelf_getsym(symbols, i, &sym))
continue; continue;
if (sym.st_shndx != obj->efile.maps_shndx) if (sym.st_shndx != obj->efile.maps_shndx)
continue; continue;
map_name = elf_strptr(obj->efile.elf, map = bpf_object__add_map(obj);
obj->efile.strtabidx, if (IS_ERR(map))
return PTR_ERR(map);
map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
sym.st_name); sym.st_name);
if (!map_name) { if (!map_name) {
pr_warning("failed to get map #%d name sym string for obj %s\n", pr_warning("failed to get map #%d name sym string for obj %s\n",
map_idx, obj->path); i, obj->path);
return -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
} }
obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC; map->libbpf_type = LIBBPF_MAP_UNSPEC;
obj->maps[map_idx].offset = sym.st_value; map->sec_idx = sym.st_shndx;
map->sec_offset = sym.st_value;
pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
if (sym.st_value + map_def_sz > data->d_size) { if (sym.st_value + map_def_sz > data->d_size) {
pr_warning("corrupted maps section in %s: last map \"%s\" too small\n", pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
obj->path, map_name); obj->path, map_name);
return -EINVAL; return -EINVAL;
} }
obj->maps[map_idx].name = strdup(map_name); map->name = strdup(map_name);
if (!obj->maps[map_idx].name) { if (!map->name) {
pr_warning("failed to alloc map name\n"); pr_warning("failed to alloc map name\n");
return -ENOMEM; return -ENOMEM;
} }
pr_debug("map %d is \"%s\"\n", map_idx, pr_debug("map %d is \"%s\"\n", i, map->name);
obj->maps[map_idx].name);
def = (struct bpf_map_def *)(data->d_buf + sym.st_value); def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
/* /*
* If the definition of the map in the object file fits in * If the definition of the map in the object file fits in
...@@ -939,7 +983,7 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -939,7 +983,7 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
* calloc above. * calloc above.
*/ */
if (map_def_sz <= sizeof(struct bpf_map_def)) { if (map_def_sz <= sizeof(struct bpf_map_def)) {
memcpy(&obj->maps[map_idx].def, def, map_def_sz); memcpy(&map->def, def, map_def_sz);
} else { } else {
/* /*
* Here the map structure being read is bigger than what * Here the map structure being read is bigger than what
...@@ -959,37 +1003,340 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags) ...@@ -959,37 +1003,340 @@ static int bpf_object__init_maps(struct bpf_object *obj, int flags)
return -EINVAL; return -EINVAL;
} }
} }
memcpy(&obj->maps[map_idx].def, def, memcpy(&map->def, def, sizeof(struct bpf_map_def));
sizeof(struct bpf_map_def));
} }
map_idx++;
} }
return 0;
}
if (!obj->caps.global_data) static const struct btf_type *skip_mods_and_typedefs(const struct btf *btf,
goto finalize; __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
/* while (true) {
* Populate rest of obj->maps with libbpf internal maps. switch (BTF_INFO_KIND(t->info)) {
*/ case BTF_KIND_VOLATILE:
if (obj->efile.data_shndx >= 0) case BTF_KIND_CONST:
ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], case BTF_KIND_RESTRICT:
LIBBPF_MAP_DATA, case BTF_KIND_TYPEDEF:
obj->efile.data, t = btf__type_by_id(btf, t->type);
&obj->sections.data); break;
if (!ret && obj->efile.rodata_shndx >= 0) default:
ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], return t;
LIBBPF_MAP_RODATA, }
obj->efile.rodata, }
&obj->sections.rodata); }
if (!ret && obj->efile.bss_shndx >= 0)
ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], static bool get_map_field_int(const char *map_name,
LIBBPF_MAP_BSS, const struct btf *btf,
obj->efile.bss, NULL); const struct btf_type *def,
finalize: const struct btf_member *m,
if (!ret) const void *data, __u32 *res) {
const struct btf_type *t = skip_mods_and_typedefs(btf, m->type);
const char *name = btf__name_by_offset(btf, m->name_off);
__u32 int_info = *(const __u32 *)(const void *)(t + 1);
if (BTF_INFO_KIND(t->info) != BTF_KIND_INT) {
pr_warning("map '%s': attr '%s': expected INT, got %u.\n",
map_name, name, BTF_INFO_KIND(t->info));
return false;
}
if (t->size != 4 || BTF_INT_BITS(int_info) != 32 ||
BTF_INT_OFFSET(int_info)) {
pr_warning("map '%s': attr '%s': expected 32-bit non-bitfield integer, "
"got %u-byte (%d-bit) one with bit offset %d.\n",
map_name, name, t->size, BTF_INT_BITS(int_info),
BTF_INT_OFFSET(int_info));
return false;
}
if (BTF_INFO_KFLAG(def->info) && BTF_MEMBER_BITFIELD_SIZE(m->offset)) {
pr_warning("map '%s': attr '%s': bitfield is not supported.\n",
map_name, name);
return false;
}
if (m->offset % 32) {
pr_warning("map '%s': attr '%s': unaligned fields are not supported.\n",
map_name, name);
return false;
}
*res = *(const __u32 *)(data + m->offset / 8);
return true;
}
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
const Elf_Data *data, bool strict)
{
const struct btf_type *var, *def, *t;
const struct btf_var_secinfo *vi;
const struct btf_var *var_extra;
const struct btf_member *m;
const void *def_data;
const char *map_name;
struct bpf_map *map;
int vlen, i;
vi = (const struct btf_var_secinfo *)(const void *)(sec + 1) + var_idx;
var = btf__type_by_id(obj->btf, vi->type);
var_extra = (const void *)(var + 1);
map_name = btf__name_by_offset(obj->btf, var->name_off);
vlen = BTF_INFO_VLEN(var->info);
if (map_name == NULL || map_name[0] == '\0') {
pr_warning("map #%d: empty name.\n", var_idx);
return -EINVAL;
}
if ((__u64)vi->offset + vi->size > data->d_size) {
pr_warning("map '%s' BTF data is corrupted.\n", map_name);
return -EINVAL;
}
if (BTF_INFO_KIND(var->info) != BTF_KIND_VAR) {
pr_warning("map '%s': unexpected var kind %u.\n",
map_name, BTF_INFO_KIND(var->info));
return -EINVAL;
}
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
var_extra->linkage != BTF_VAR_STATIC) {
pr_warning("map '%s': unsupported var linkage %u.\n",
map_name, var_extra->linkage);
return -EOPNOTSUPP;
}
def = skip_mods_and_typedefs(obj->btf, var->type);
if (BTF_INFO_KIND(def->info) != BTF_KIND_STRUCT) {
pr_warning("map '%s': unexpected def kind %u.\n",
map_name, BTF_INFO_KIND(var->info));
return -EINVAL;
}
if (def->size > vi->size) {
pr_warning("map '%s': invalid def size.\n", map_name);
return -EINVAL;
}
map = bpf_object__add_map(obj);
if (IS_ERR(map))
return PTR_ERR(map);
map->name = strdup(map_name);
if (!map->name) {
pr_warning("map '%s': failed to alloc map name.\n", map_name);
return -ENOMEM;
}
map->libbpf_type = LIBBPF_MAP_UNSPEC;
map->def.type = BPF_MAP_TYPE_UNSPEC;
map->sec_idx = sec_idx;
map->sec_offset = vi->offset;
pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
def_data = data->d_buf + vi->offset;
vlen = BTF_INFO_VLEN(def->info);
m = (const void *)(def + 1);
for (i = 0; i < vlen; i++, m++) {
const char *name = btf__name_by_offset(obj->btf, m->name_off);
if (!name) {
pr_warning("map '%s': invalid field #%d.\n",
map_name, i);
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
if (!get_map_field_int(map_name, obj->btf, def, m,
def_data, &map->def.type))
return -EINVAL;
pr_debug("map '%s': found type = %u.\n",
map_name, map->def.type);
} else if (strcmp(name, "max_entries") == 0) {
if (!get_map_field_int(map_name, obj->btf, def, m,
def_data, &map->def.max_entries))
return -EINVAL;
pr_debug("map '%s': found max_entries = %u.\n",
map_name, map->def.max_entries);
} else if (strcmp(name, "map_flags") == 0) {
if (!get_map_field_int(map_name, obj->btf, def, m,
def_data, &map->def.map_flags))
return -EINVAL;
pr_debug("map '%s': found map_flags = %u.\n",
map_name, map->def.map_flags);
} else if (strcmp(name, "key_size") == 0) {
__u32 sz;
if (!get_map_field_int(map_name, obj->btf, def, m,
def_data, &sz))
return -EINVAL;
pr_debug("map '%s': found key_size = %u.\n",
map_name, sz);
if (map->def.key_size && map->def.key_size != sz) {
pr_warning("map '%s': conflictling key size %u != %u.\n",
map_name, map->def.key_size, sz);
return -EINVAL;
}
map->def.key_size = sz;
} else if (strcmp(name, "key") == 0) {
__s64 sz;
t = btf__type_by_id(obj->btf, m->type);
if (!t) {
pr_warning("map '%s': key type [%d] not found.\n",
map_name, m->type);
return -EINVAL;
}
if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) {
pr_warning("map '%s': key spec is not PTR: %u.\n",
map_name, BTF_INFO_KIND(t->info));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
if (sz < 0) {
pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
map_name, t->type, sz);
return sz;
}
pr_debug("map '%s': found key [%u], sz = %lld.\n",
map_name, t->type, sz);
if (map->def.key_size && map->def.key_size != sz) {
pr_warning("map '%s': conflictling key size %u != %lld.\n",
map_name, map->def.key_size, sz);
return -EINVAL;
}
map->def.key_size = sz;
map->btf_key_type_id = t->type;
} else if (strcmp(name, "value_size") == 0) {
__u32 sz;
if (!get_map_field_int(map_name, obj->btf, def, m,
def_data, &sz))
return -EINVAL;
pr_debug("map '%s': found value_size = %u.\n",
map_name, sz);
if (map->def.value_size && map->def.value_size != sz) {
pr_warning("map '%s': conflictling value size %u != %u.\n",
map_name, map->def.value_size, sz);
return -EINVAL;
}
map->def.value_size = sz;
} else if (strcmp(name, "value") == 0) {
__s64 sz;
t = btf__type_by_id(obj->btf, m->type);
if (!t) {
pr_warning("map '%s': value type [%d] not found.\n",
map_name, m->type);
return -EINVAL;
}
if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) {
pr_warning("map '%s': value spec is not PTR: %u.\n",
map_name, BTF_INFO_KIND(t->info));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
if (sz < 0) {
pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
map_name, t->type, sz);
return sz;
}
pr_debug("map '%s': found value [%u], sz = %lld.\n",
map_name, t->type, sz);
if (map->def.value_size && map->def.value_size != sz) {
pr_warning("map '%s': conflictling value size %u != %lld.\n",
map_name, map->def.value_size, sz);
return -EINVAL;
}
map->def.value_size = sz;
map->btf_value_type_id = t->type;
} else {
if (strict) {
pr_warning("map '%s': unknown field '%s'.\n",
map_name, name);
return -ENOTSUP;
}
pr_debug("map '%s': ignoring unknown field '%s'.\n",
map_name, name);
}
}
if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
pr_warning("map '%s': map type isn't specified.\n", map_name);
return -EINVAL;
}
return 0;
}
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
{
const struct btf_type *sec = NULL;
int nr_types, i, vlen, err;
const struct btf_type *t;
const char *name;
Elf_Data *data;
Elf_Scn *scn;
if (obj->efile.btf_maps_shndx < 0)
return 0;
scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
if (scn)
data = elf_getdata(scn, NULL);
if (!scn || !data) {
pr_warning("failed to get Elf_Data from map section %d (%s)\n",
obj->efile.maps_shndx, MAPS_ELF_SEC);
return -EINVAL;
}
nr_types = btf__get_nr_types(obj->btf);
for (i = 1; i <= nr_types; i++) {
t = btf__type_by_id(obj->btf, i);
if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
continue;
name = btf__name_by_offset(obj->btf, t->name_off);
if (strcmp(name, MAPS_ELF_SEC) == 0) {
sec = t;
break;
}
}
if (!sec) {
pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
return -ENOENT;
}
vlen = BTF_INFO_VLEN(sec->info);
for (i = 0; i < vlen; i++) {
err = bpf_object__init_user_btf_map(obj, sec, i,
obj->efile.btf_maps_shndx,
data, strict);
if (err)
return err;
}
return 0;
}
static int bpf_object__init_maps(struct bpf_object *obj, int flags)
{
bool strict = !(flags & MAPS_RELAX_COMPAT);
int err;
err = bpf_object__init_user_maps(obj, strict);
if (err)
return err;
err = bpf_object__init_user_btf_maps(obj, strict);
if (err)
return err;
err = bpf_object__init_global_data_maps(obj);
if (err)
return err;
if (obj->nr_maps) {
qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
compare_bpf_map); compare_bpf_map);
return ret; }
return 0;
} }
static bool section_have_execinstr(struct bpf_object *obj, int idx) static bool section_have_execinstr(struct bpf_object *obj, int idx)
...@@ -1078,6 +1425,86 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) ...@@ -1078,6 +1425,86 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
} }
} }
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
Elf_Data *btf_data,
Elf_Data *btf_ext_data)
{
bool btf_required = bpf_object__is_btf_mandatory(obj);
int err = 0;
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
if (IS_ERR(obj->btf)) {
pr_warning("Error loading ELF section %s: %d.\n",
BTF_ELF_SEC, err);
goto out;
}
err = btf__finalize_data(obj, obj->btf);
if (err) {
pr_warning("Error finalizing %s: %d.\n",
BTF_ELF_SEC, err);
goto out;
}
}
if (btf_ext_data) {
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
goto out;
}
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
goto out;
}
}
out:
if (err || IS_ERR(obj->btf)) {
if (btf_required)
err = err ? : PTR_ERR(obj->btf);
else
err = 0;
if (!IS_ERR_OR_NULL(obj->btf))
btf__free(obj->btf);
obj->btf = NULL;
}
if (btf_required && !obj->btf) {
pr_warning("BTF is required, but is missing or corrupted.\n");
return err == 0 ? -ENOENT : err;
}
return 0;
}
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
int err = 0;
if (!obj->btf)
return 0;
bpf_object__sanitize_btf(obj);
bpf_object__sanitize_btf_ext(obj);
err = btf__load(obj->btf);
if (err) {
pr_warning("Error loading %s into kernel: %d.\n",
BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
if (bpf_object__is_btf_mandatory(obj))
return err;
}
return 0;
}
static int bpf_object__elf_collect(struct bpf_object *obj, int flags) static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{ {
Elf *elf = obj->efile.elf; Elf *elf = obj->efile.elf;
...@@ -1102,24 +1529,21 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1102,24 +1529,21 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (gelf_getshdr(scn, &sh) != &sh) { if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section(%d) header from %s\n", pr_warning("failed to get section(%d) header from %s\n",
idx, obj->path); idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
goto out;
} }
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) { if (!name) {
pr_warning("failed to get section(%d) name from %s\n", pr_warning("failed to get section(%d) name from %s\n",
idx, obj->path); idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
goto out;
} }
data = elf_getdata(scn, 0); data = elf_getdata(scn, 0);
if (!data) { if (!data) {
pr_warning("failed to get section(%d) data from %s(%s)\n", pr_warning("failed to get section(%d) data from %s(%s)\n",
idx, name, obj->path); idx, name, obj->path);
err = -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
goto out;
} }
pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size, idx, name, (unsigned long)data->d_size,
...@@ -1130,12 +1554,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1130,12 +1554,18 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
err = bpf_object__init_license(obj, err = bpf_object__init_license(obj,
data->d_buf, data->d_buf,
data->d_size); data->d_size);
if (err)
return err;
} else if (strcmp(name, "version") == 0) { } else if (strcmp(name, "version") == 0) {
err = bpf_object__init_kversion(obj, err = bpf_object__init_kversion(obj,
data->d_buf, data->d_buf,
data->d_size); data->d_size);
if (err)
return err;
} else if (strcmp(name, "maps") == 0) { } else if (strcmp(name, "maps") == 0) {
obj->efile.maps_shndx = idx; obj->efile.maps_shndx = idx;
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) { } else if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = data; btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
...@@ -1144,11 +1574,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1144,11 +1574,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (obj->efile.symbols) { if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n", pr_warning("bpf: multiple SYMTAB in %s\n",
obj->path); obj->path);
err = -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
} else { }
obj->efile.symbols = data; obj->efile.symbols = data;
obj->efile.strtabidx = sh.sh_link; obj->efile.strtabidx = sh.sh_link;
}
} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
if (sh.sh_flags & SHF_EXECINSTR) { if (sh.sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0) if (strcmp(name, ".text") == 0)
...@@ -1162,6 +1591,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1162,6 +1591,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_warning("failed to alloc program %s (%s): %s", pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, cp); name, obj->path, cp);
return err;
} }
} else if (strcmp(name, ".data") == 0) { } else if (strcmp(name, ".data") == 0) {
obj->efile.data = data; obj->efile.data = data;
...@@ -1173,8 +1603,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1173,8 +1603,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_debug("skip section(%d) %s\n", idx, name); pr_debug("skip section(%d) %s\n", idx, name);
} }
} else if (sh.sh_type == SHT_REL) { } else if (sh.sh_type == SHT_REL) {
int nr_reloc = obj->efile.nr_reloc;
void *reloc = obj->efile.reloc; void *reloc = obj->efile.reloc;
int nr_reloc = obj->efile.nr_reloc + 1;
int sec = sh.sh_info; /* points to other section */ int sec = sh.sh_info; /* points to other section */
/* Only do relo for section with exec instructions */ /* Only do relo for section with exec instructions */
...@@ -1184,79 +1614,37 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -1184,79 +1614,37 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
continue; continue;
} }
reloc = reallocarray(reloc, nr_reloc, reloc = reallocarray(reloc, nr_reloc + 1,
sizeof(*obj->efile.reloc)); sizeof(*obj->efile.reloc));
if (!reloc) { if (!reloc) {
pr_warning("realloc failed\n"); pr_warning("realloc failed\n");
err = -ENOMEM; return -ENOMEM;
} else { }
int n = nr_reloc - 1;
obj->efile.reloc = reloc; obj->efile.reloc = reloc;
obj->efile.nr_reloc = nr_reloc; obj->efile.nr_reloc++;
obj->efile.reloc[n].shdr = sh; obj->efile.reloc[nr_reloc].shdr = sh;
obj->efile.reloc[n].data = data; obj->efile.reloc[nr_reloc].data = data;
}
} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
obj->efile.bss = data; obj->efile.bss = data;
obj->efile.bss_shndx = idx; obj->efile.bss_shndx = idx;
} else { } else {
pr_debug("skip section(%d) %s\n", idx, name); pr_debug("skip section(%d) %s\n", idx, name);
} }
if (err)
goto out;
} }
if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
pr_warning("Corrupted ELF file: index of strtab invalid\n"); pr_warning("Corrupted ELF file: index of strtab invalid\n");
return -LIBBPF_ERRNO__FORMAT; return -LIBBPF_ERRNO__FORMAT;
} }
if (btf_data) { err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); if (!err)
if (IS_ERR(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
} else {
err = btf__finalize_data(obj, obj->btf);
if (!err) {
bpf_object__sanitize_btf(obj);
err = btf__load(obj->btf);
}
if (err) {
pr_warning("Error finalizing and loading %s into kernel: %d. Ignored and continue.\n",
BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
err = 0;
}
}
}
if (btf_ext_data) {
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
} else {
obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
} else {
bpf_object__sanitize_btf_ext(obj);
}
}
}
if (bpf_object__has_maps(obj)) {
err = bpf_object__init_maps(obj, flags); err = bpf_object__init_maps(obj, flags);
if (err) if (!err)
goto out; err = bpf_object__sanitize_and_load_btf(obj);
} if (!err)
err = bpf_object__init_prog_names(obj); err = bpf_object__init_prog_names(obj);
out:
return err; return err;
} }
...@@ -1297,7 +1685,8 @@ static bool bpf_object__shndx_is_data(const struct bpf_object *obj, ...@@ -1297,7 +1685,8 @@ static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
int shndx) int shndx)
{ {
return shndx == obj->efile.maps_shndx; return shndx == obj->efile.maps_shndx ||
shndx == obj->efile.btf_maps_shndx;
} }
static bool bpf_object__relo_in_known_section(const struct bpf_object *obj, static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
...@@ -1341,14 +1730,14 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, ...@@ -1341,14 +1730,14 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
prog->nr_reloc = nrels; prog->nr_reloc = nrels;
for (i = 0; i < nrels; i++) { for (i = 0; i < nrels; i++) {
GElf_Sym sym;
GElf_Rel rel;
unsigned int insn_idx;
unsigned int shdr_idx;
struct bpf_insn *insns = prog->insns; struct bpf_insn *insns = prog->insns;
enum libbpf_map_type type; enum libbpf_map_type type;
unsigned int insn_idx;
unsigned int shdr_idx;
const char *name; const char *name;
size_t map_idx; size_t map_idx;
GElf_Sym sym;
GElf_Rel rel;
if (!gelf_getrel(data, i, &rel)) { if (!gelf_getrel(data, i, &rel)) {
pr_warning("relocation: failed to get %d reloc\n", i); pr_warning("relocation: failed to get %d reloc\n", i);
...@@ -1416,9 +1805,13 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, ...@@ -1416,9 +1805,13 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
if (maps[map_idx].libbpf_type != type) if (maps[map_idx].libbpf_type != type)
continue; continue;
if (type != LIBBPF_MAP_UNSPEC || if (type != LIBBPF_MAP_UNSPEC ||
maps[map_idx].offset == sym.st_value) { (maps[map_idx].sec_idx == sym.st_shndx &&
pr_debug("relocation: find map %zd (%s) for insn %u\n", maps[map_idx].sec_offset == sym.st_value)) {
map_idx, maps[map_idx].name, insn_idx); pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
map_idx, maps[map_idx].name,
maps[map_idx].sec_idx,
maps[map_idx].sec_offset,
insn_idx);
break; break;
} }
} }
...@@ -1438,14 +1831,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, ...@@ -1438,14 +1831,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
return 0; return 0;
} }
static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
{ {
struct bpf_map_def *def = &map->def; struct bpf_map_def *def = &map->def;
__u32 key_type_id = 0, value_type_id = 0; __u32 key_type_id = 0, value_type_id = 0;
int ret; int ret;
/* if it's BTF-defined map, we don't need to search for type IDs */
if (map->sec_idx == obj->efile.btf_maps_shndx)
return 0;
if (!bpf_map__is_internal(map)) { if (!bpf_map__is_internal(map)) {
ret = btf__get_map_kv_tids(btf, map->name, def->key_size, ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
def->value_size, &key_type_id, def->value_size, &key_type_id,
&value_type_id); &value_type_id);
} else { } else {
...@@ -1453,7 +1850,7 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) ...@@ -1453,7 +1850,7 @@ static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
* LLVM annotates global data differently in BTF, that is, * LLVM annotates global data differently in BTF, that is,
* only as '.data', '.bss' or '.rodata'. * only as '.data', '.bss' or '.rodata'.
*/ */
ret = btf__find_by_name(btf, ret = btf__find_by_name(obj->btf,
libbpf_type_to_btf_name[map->libbpf_type]); libbpf_type_to_btf_name[map->libbpf_type]);
} }
if (ret < 0) if (ret < 0)
...@@ -1743,7 +2140,7 @@ bpf_object__create_maps(struct bpf_object *obj) ...@@ -1743,7 +2140,7 @@ bpf_object__create_maps(struct bpf_object *obj)
map->inner_map_fd >= 0) map->inner_map_fd >= 0)
create_attr.inner_map_fd = map->inner_map_fd; create_attr.inner_map_fd = map->inner_map_fd;
if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) { if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_fd = btf__fd(obj->btf);
create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_key_type_id = map->btf_key_type_id;
create_attr.btf_value_type_id = map->btf_value_type_id; create_attr.btf_value_type_id = map->btf_value_type_id;
...@@ -3436,13 +3833,7 @@ bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name) ...@@ -3436,13 +3833,7 @@ bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
struct bpf_map * struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
{ {
int i; return ERR_PTR(-ENOTSUP);
for (i = 0; i < obj->nr_maps; i++) {
if (obj->maps[i].offset == offset)
return &obj->maps[i];
}
return ERR_PTR(-ENOENT);
} }
long libbpf_get_error(const void *ptr) long libbpf_get_error(const void *ptr)
......
...@@ -23,6 +23,13 @@ ...@@ -23,6 +23,13 @@
#define BTF_PARAM_ENC(name, type) (name), (type) #define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
#ifndef min
# define min(x, y) ((x) < (y) ? (x) : (y))
#endif
#ifndef max
# define max(x, y) ((x) < (y) ? (y) : (x))
#endif
extern void libbpf_print(enum libbpf_print_level level, extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...) const char *format, ...)
__attribute__((format(printf, 2, 3))); __attribute__((format(printf, 2, 3)));
......
...@@ -57,17 +57,25 @@ struct frag_hdr { ...@@ -57,17 +57,25 @@ struct frag_hdr {
__be32 identification; __be32 identification;
}; };
struct bpf_map_def SEC("maps") jmp_table = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} jmp_table SEC(".maps") = {
.type = BPF_MAP_TYPE_PROG_ARRAY, .type = BPF_MAP_TYPE_PROG_ARRAY,
.max_entries = 8,
.key_size = sizeof(__u32), .key_size = sizeof(__u32),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 8
}; };
struct bpf_map_def SEC("maps") last_dissection = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_flow_keys *value;
} last_dissection SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_flow_keys),
.max_entries = 1, .max_entries = 1,
}; };
......
...@@ -10,24 +10,22 @@ ...@@ -10,24 +10,22 @@
#define REFRESH_TIME_NS 100000000 #define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000 #define NS_PER_SEC 1000000000
struct bpf_map_def SEC("maps") percpu_netcnt = { struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct percpu_net_cnt *value;
} percpu_netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, .type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct percpu_net_cnt),
}; };
BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key, struct {
struct percpu_net_cnt); __u32 type;
struct bpf_cgroup_storage_key *key;
struct bpf_map_def SEC("maps") netcnt = { struct net_cnt *value;
} netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE, .type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
}; };
BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
struct net_cnt);
SEC("cgroup/skb") SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb) int bpf_nextcnt(struct __sk_buff *skb)
{ {
......
...@@ -12,15 +12,16 @@ struct socket_cookie { ...@@ -12,15 +12,16 @@ struct socket_cookie {
__u32 cookie_value; __u32 cookie_value;
}; };
struct bpf_map_def SEC("maps") socket_cookies = { struct {
__u32 type;
__u32 map_flags;
int *key;
struct socket_cookie *value;
} socket_cookies SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE, .type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct socket_cookie),
.map_flags = BPF_F_NO_PREALLOC, .map_flags = BPF_F_NO_PREALLOC,
}; };
BPF_ANNOTATE_KV_PAIR(socket_cookies, int, struct socket_cookie);
SEC("cgroup/connect6") SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx) int set_cookie(struct bpf_sock_addr *ctx)
{ {
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
int _version SEC("version") = 1;
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
/* just to validate we can handle maps in multiple sections */
struct bpf_map_def SEC("maps") btf_map_legacy = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(long long),
.max_entries = 4,
};
BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
struct {
int *key;
struct ipv_counts *value;
unsigned int type;
unsigned int max_entries;
} btf_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.max_entries = 4,
};
struct dummy_tracepoint_args {
unsigned long long pad;
struct sock *sock;
};
__attribute__((noinline))
static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
if (!arg->sock)
return 0;
counts = bpf_map_lookup_elem(&btf_map, &key);
if (!counts)
return 0;
counts->v6++;
/* just verify we can reference both maps */
counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
if (!counts)
return 0;
return 0;
}
__attribute__((noinline))
static int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
return test_long_fname_1(arg);
}
char _license[] SEC("license") = "GPL";
...@@ -15,17 +15,25 @@ struct stack_trace_t { ...@@ -15,17 +15,25 @@ struct stack_trace_t {
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
}; };
struct bpf_map_def SEC("maps") perfmap = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} perfmap SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.max_entries = 2,
.key_size = sizeof(int), .key_size = sizeof(int),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 2,
}; };
struct bpf_map_def SEC("maps") stackdata_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct stack_trace_t *value;
} stackdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct stack_trace_t),
.max_entries = 1, .max_entries = 1,
}; };
...@@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = { ...@@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = {
* issue and avoid complicated C programming massaging. * issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here. * This is an acceptable workaround since there is one entry here.
*/ */
struct bpf_map_def SEC("maps") rawdata_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 (*value)[2 * MAX_STACK_RAWTP];
} rawdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
.max_entries = 1, .max_entries = 1,
}; };
......
...@@ -7,17 +7,23 @@ ...@@ -7,17 +7,23 @@
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") result_number = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} result_number SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 11, .max_entries = 11,
}; };
struct bpf_map_def SEC("maps") result_string = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
const char (*value)[32];
} result_string SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = 32,
.max_entries = 5, .max_entries = 5,
}; };
...@@ -27,10 +33,13 @@ struct foo { ...@@ -27,10 +33,13 @@ struct foo {
__u64 c; __u64 c;
}; };
struct bpf_map_def SEC("maps") result_struct = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct foo *value;
} result_struct SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct foo),
.max_entries = 5, .max_entries = 5,
}; };
......
...@@ -169,38 +169,53 @@ struct eth_hdr { ...@@ -169,38 +169,53 @@ struct eth_hdr {
unsigned short eth_proto; unsigned short eth_proto;
}; };
struct bpf_map_def SEC("maps") vip_map = { struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS, .max_entries = MAX_VIPS,
}; };
struct bpf_map_def SEC("maps") ch_rings = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE, .max_entries = CH_RINGS_SIZE,
}; };
struct bpf_map_def SEC("maps") reals = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS, .max_entries = MAX_REALS,
}; };
struct bpf_map_def SEC("maps") stats = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct vip_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS, .max_entries = MAX_VIPS,
}; };
struct bpf_map_def SEC("maps") ctl_array = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE, .max_entries = CTL_MAP_SIZE,
}; };
......
...@@ -165,38 +165,53 @@ struct eth_hdr { ...@@ -165,38 +165,53 @@ struct eth_hdr {
unsigned short eth_proto; unsigned short eth_proto;
}; };
struct bpf_map_def SEC("maps") vip_map = { struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS, .max_entries = MAX_VIPS,
}; };
struct bpf_map_def SEC("maps") ch_rings = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE, .max_entries = CH_RINGS_SIZE,
}; };
struct bpf_map_def SEC("maps") reals = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS, .max_entries = MAX_REALS,
}; };
struct bpf_map_def SEC("maps") stats = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct vip_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS, .max_entries = MAX_VIPS,
}; };
struct bpf_map_def SEC("maps") ctl_array = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE, .max_entries = CTL_MAP_SIZE,
}; };
......
...@@ -11,29 +11,31 @@ struct hmap_elem { ...@@ -11,29 +11,31 @@ struct hmap_elem {
int var[VAR_NUM]; int var[VAR_NUM];
}; };
struct bpf_map_def SEC("maps") hash_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct hmap_elem *value;
} hash_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct hmap_elem),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
struct array_elem { struct array_elem {
struct bpf_spin_lock lock; struct bpf_spin_lock lock;
int var[VAR_NUM]; int var[VAR_NUM];
}; };
struct bpf_map_def SEC("maps") array_map = { struct {
__u32 type;
__u32 max_entries;
int *key;
struct array_elem *value;
} array_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct array_elem),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
SEC("map_lock_demo") SEC("map_lock_demo")
int bpf_map_lock_test(struct __sk_buff *skb) int bpf_map_lock_test(struct __sk_buff *skb)
{ {
......
...@@ -21,38 +21,55 @@ int _version SEC("version") = 1; ...@@ -21,38 +21,55 @@ int _version SEC("version") = 1;
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif #endif
struct bpf_map_def SEC("maps") outer_map = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} outer_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS, .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.max_entries = 1,
.key_size = sizeof(__u32), .key_size = sizeof(__u32),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 1,
}; };
struct bpf_map_def SEC("maps") result_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = NR_RESULTS, .max_entries = NR_RESULTS,
}; };
struct bpf_map_def SEC("maps") tmp_index_ovr_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
int *value;
} tmp_index_ovr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(int),
.max_entries = 1, .max_entries = 1,
}; };
struct bpf_map_def SEC("maps") linum_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1, .max_entries = 1,
}; };
struct bpf_map_def SEC("maps") data_check_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct data_check *value;
} data_check_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct data_check),
.max_entries = 1, .max_entries = 1,
}; };
......
...@@ -4,24 +4,26 @@ ...@@ -4,24 +4,26 @@
#include <linux/version.h> #include <linux/version.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
struct bpf_map_def SEC("maps") info_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} info_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(info_map, __u32, __u64); struct {
__u32 type;
struct bpf_map_def SEC("maps") status_map = { __u32 max_entries;
__u32 *key;
__u64 *value;
} status_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(status_map, __u32, __u64);
SEC("send_signal_demo") SEC("send_signal_demo")
int bpf_send_signal_test(void *ctx) int bpf_send_signal_test(void *ctx)
{ {
......
...@@ -27,31 +27,43 @@ enum bpf_linum_array_idx { ...@@ -27,31 +27,43 @@ enum bpf_linum_array_idx {
__NR_BPF_LINUM_ARRAY_IDX, __NR_BPF_LINUM_ARRAY_IDX,
}; };
struct bpf_map_def SEC("maps") addr_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct sockaddr_in6 *value;
} addr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct sockaddr_in6),
.max_entries = __NR_BPF_ADDR_ARRAY_IDX, .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
}; };
struct bpf_map_def SEC("maps") sock_result_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_sock *value;
} sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX, .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
}; };
struct bpf_map_def SEC("maps") tcp_sock_result_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_tcp_sock *value;
} tcp_sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_tcp_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX, .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
}; };
struct bpf_map_def SEC("maps") linum_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = __NR_BPF_LINUM_ARRAY_IDX, .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
}; };
...@@ -60,26 +72,26 @@ struct bpf_spinlock_cnt { ...@@ -60,26 +72,26 @@ struct bpf_spinlock_cnt {
__u32 cnt; __u32 cnt;
}; };
struct bpf_map_def SEC("maps") sk_pkt_out_cnt = { struct {
__u32 type;
__u32 map_flags;
int *key;
struct bpf_spinlock_cnt *value;
} sk_pkt_out_cnt SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE, .type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_spinlock_cnt),
.max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC, .map_flags = BPF_F_NO_PREALLOC,
}; };
BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt); struct {
__u32 type;
struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = { __u32 map_flags;
int *key;
struct bpf_spinlock_cnt *value;
} sk_pkt_out_cnt10 SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE, .type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_spinlock_cnt),
.max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC, .map_flags = BPF_F_NO_PREALLOC,
}; };
BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt);
static bool is_loopback6(__u32 *a6) static bool is_loopback6(__u32 *a6)
{ {
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1); return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
......
...@@ -10,30 +10,29 @@ struct hmap_elem { ...@@ -10,30 +10,29 @@ struct hmap_elem {
int test_padding; int test_padding;
}; };
struct bpf_map_def SEC("maps") hmap = { struct {
__u32 type;
__u32 max_entries;
int *key;
struct hmap_elem *value;
} hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct hmap_elem),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
struct cls_elem { struct cls_elem {
struct bpf_spin_lock lock; struct bpf_spin_lock lock;
volatile int cnt; volatile int cnt;
}; };
struct bpf_map_def SEC("maps") cls_map = { struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct cls_elem *value;
} cls_map SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE, .type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct cls_elem),
}; };
BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
struct cls_elem);
struct bpf_vqueue { struct bpf_vqueue {
struct bpf_spin_lock lock; struct bpf_spin_lock lock;
/* 4 byte hole */ /* 4 byte hole */
...@@ -42,14 +41,16 @@ struct bpf_vqueue { ...@@ -42,14 +41,16 @@ struct bpf_vqueue {
unsigned int rate; unsigned int rate;
}; };
struct bpf_map_def SEC("maps") vqueue = { struct {
__u32 type;
__u32 max_entries;
int *key;
struct bpf_vqueue *value;
} vqueue SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_vqueue),
.max_entries = 1, .max_entries = 1,
}; };
BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20) #define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
SEC("spin_lock_demo") SEC("spin_lock_demo")
......
...@@ -8,34 +8,50 @@ ...@@ -8,34 +8,50 @@
#define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_STACK_DEPTH 127
#endif #endif
struct bpf_map_def SEC("maps") control_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1, .max_entries = 1,
}; };
struct bpf_map_def SEC("maps") stackid_hmap = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 16384, .max_entries = 16384,
}; };
struct bpf_map_def SEC("maps") stackmap = { typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__u32 type;
__u32 max_entries;
__u32 map_flags;
__u32 key_size;
__u32 value_size;
} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE, .type = BPF_MAP_TYPE_STACK_TRACE,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH,
.max_entries = 128, .max_entries = 128,
.map_flags = BPF_F_STACK_BUILD_ID, .map_flags = BPF_F_STACK_BUILD_ID,
.key_size = sizeof(__u32),
.value_size = sizeof(stack_trace_t),
}; };
struct bpf_map_def SEC("maps") stack_amap = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
/* there seems to be a bug in kernel not handling typedef properly */
struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH,
.max_entries = 128, .max_entries = 128,
}; };
......
...@@ -8,31 +8,47 @@ ...@@ -8,31 +8,47 @@
#define PERF_MAX_STACK_DEPTH 127 #define PERF_MAX_STACK_DEPTH 127
#endif #endif
struct bpf_map_def SEC("maps") control_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1, .max_entries = 1,
}; };
struct bpf_map_def SEC("maps") stackid_hmap = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 16384, .max_entries = 16384,
}; };
struct bpf_map_def SEC("maps") stackmap = { typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE, .type = BPF_MAP_TYPE_STACK_TRACE,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384, .max_entries = 16384,
.key_size = sizeof(__u32),
.value_size = sizeof(stack_trace_t),
}; };
struct bpf_map_def SEC("maps") stack_amap = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 (*value)[PERF_MAX_STACK_DEPTH];
} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384, .max_entries = 16384,
}; };
......
...@@ -148,10 +148,13 @@ struct tcp_estats_basic_event { ...@@ -148,10 +148,13 @@ struct tcp_estats_basic_event {
struct tcp_estats_conn_id conn_id; struct tcp_estats_conn_id conn_id;
}; };
struct bpf_map_def SEC("maps") ev_record_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcp_estats_basic_event *value;
} ev_record_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcp_estats_basic_event),
.max_entries = 1024, .max_entries = 1024,
}; };
......
...@@ -14,17 +14,23 @@ ...@@ -14,17 +14,23 @@
#include "bpf_endian.h" #include "bpf_endian.h"
#include "test_tcpbpf.h" #include "test_tcpbpf.h"
struct bpf_map_def SEC("maps") global_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcpbpf_globals *value;
} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcpbpf_globals),
.max_entries = 4, .max_entries = 4,
}; };
struct bpf_map_def SEC("maps") sockopt_results = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
int *value;
} sockopt_results SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(int),
.max_entries = 2, .max_entries = 2,
}; };
......
...@@ -14,18 +14,26 @@ ...@@ -14,18 +14,26 @@
#include "bpf_endian.h" #include "bpf_endian.h"
#include "test_tcpnotify.h" #include "test_tcpnotify.h"
struct bpf_map_def SEC("maps") global_map = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcpnotify_globals *value;
} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcpnotify_globals),
.max_entries = 4, .max_entries = 4,
}; };
struct bpf_map_def SEC("maps") perf_event_map = { struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} perf_event_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.max_entries = 2,
.key_size = sizeof(int), .key_size = sizeof(int),
.value_size = sizeof(__u32), .value_size = sizeof(__u32),
.max_entries = 2,
}; };
int _version SEC("version") = 1; int _version SEC("version") = 1;
......
...@@ -22,17 +22,23 @@ ...@@ -22,17 +22,23 @@
int _version SEC("version") = 1; int _version SEC("version") = 1;
struct bpf_map_def SEC("maps") rxcnt = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} rxcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 256, .max_entries = 256,
}; };
struct bpf_map_def SEC("maps") vip2tnl = { struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct iptnl_info *value;
} vip2tnl SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct iptnl_info),
.max_entries = MAX_IPTNL_ENTRIES, .max_entries = MAX_IPTNL_ENTRIES,
}; };
......
...@@ -163,52 +163,66 @@ struct lb_stats { ...@@ -163,52 +163,66 @@ struct lb_stats {
__u64 v1; __u64 v1;
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = { struct {
__u32 type;
__u32 max_entries;
struct vip_definition *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH, .type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip_definition),
.value_size = sizeof(struct vip_meta),
.max_entries = 512, .max_entries = 512,
.map_flags = 0,
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = { struct {
__u32 type;
__u32 max_entries;
__u32 map_flags;
struct flow_key *key;
struct real_pos_lru *value;
} lru_cache SEC(".maps") = {
.type = BPF_MAP_TYPE_LRU_HASH, .type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct flow_key),
.value_size = sizeof(struct real_pos_lru),
.max_entries = 300, .max_entries = 300,
.map_flags = 1U << 1, .map_flags = 1U << 1,
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 12 * 655, .max_entries = 12 * 655,
.map_flags = 0,
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) reals = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = 40, .max_entries = 40,
.map_flags = 0,
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) stats = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct lb_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY, .type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct lb_stats),
.max_entries = 515, .max_entries = 515,
.map_flags = 0,
}; };
struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = { struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY, .type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = 16, .max_entries = 16,
.map_flags = 0,
}; };
struct eth_hdr { struct eth_hdr {
......
...@@ -4016,13 +4016,9 @@ struct btf_file_test { ...@@ -4016,13 +4016,9 @@ struct btf_file_test {
}; };
static struct btf_file_test file_tests[] = { static struct btf_file_test file_tests[] = {
{ { .file = "test_btf_haskv.o", },
.file = "test_btf_haskv.o", { .file = "test_btf_newkv.o", },
}, { .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
{
.file = "test_btf_nokv.o",
.btf_kv_notfound = true,
},
}; };
static int do_test_file(unsigned int test_num) static int do_test_file(unsigned int test_num)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment