Commit 5420f320 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'btf2c-converter'

Andrii Nakryiko says:

====================
This patch set adds BTF-to-C dumping APIs to libbpf, allowing to output
a subset of BTF types as a compilable C type definitions. This is useful by
itself, as raw BTF output is not easy to inspect and comprehend. But it's also
a big part of BPF CO-RE (compile once - run everywhere) initiative aimed at
allowing to write relocatable BPF programs, that won't require on-the-host
kernel headers (and would be able to inspect internal kernel structures, not
exposed through kernel headers).

This patch set consists of three groups of patches and one pre-patch, with the
BTF-to-C dumper API depending on the first two groups.

Pre-patch #1 fixes issue with libbpf_internal.h.

btf__parse_elf() API patches:
- patch #2 adds btf__parse_elf() API to libbpf, allowing to load BTF and/or
  BTF.ext from ELF file;
- patch #3 utilizies btf__parse_elf() from bpftool for `btf dump file` command;
- patch #4 switches test_btf.c to use btf__parse_elf() to check for presence
  of BTF data in object file.

libbpf's internal hashmap patches:
- patch #5 adds resizeable non-thread safe generic hashmap to libbpf;
- patch #6 adds tests for that hashmap;
- patch #7 migrates btf_dedup()'s dedup_table to use hashmap w/ APPEND.

BTF-to-C dumper API patches:
- patch #8 adds btf_dump APIs with all the logic for laying out type
  definitions in correct order and emitting C syntax for them;
- patch #9 adds lots of tests for common and quirky parts of C type system;
- patch #10 adds support for C-syntax btf dumping to bpftool;
- patch #11 updates bpftool documentation to mention C-syntax dump option;
- patch #12 update bash-completion for btf dump sub-command.

v2->v3:
- fix bpftool-btf.rst formatting (Quentin);
- simplify bash autocompletion script (Quentin);
- better error message in btf dump (Quentin);

v1->v2:
- removed unuseful file header (Jakub);
- removed inlines in .c (Jakub);
- added 'format {c|raw}' keyword/option (Jakub);
- re-use i var for iteration in btf_dump_c() (Jakub);
- bumped libbpf version to 0.0.4;

v0->v1:
- fix bug in hashmap__for_each_bucket_entry() not handling empty hashmap;
- removed `btf dump`-specific libbpf logging hook up (Quentin has more generic
  patchset);
- change btf__parse_elf() to always load .BTF and return it as a result, with
  .BTF.ext being optional and returned through struct btf_ext** arg (Alexei);
- endianness check to use __BYTE_ORDER__ (Alexei);
- bool:1 to __u8:1 in type_aux_state (Alexei);
- added HASHMAP_APPEND strategy to hashmap, changed
  hashmap__for_each_key_entry() to also check for key equality during
  iteration (multimap iteration for key);
- added new tests for empty hashmap and hashmap as a multimap;
- tried to clarify weak/strong dependency ordering comments (Alexei)
- btf dump test's expected output - support better commenting aproach (Alexei);
- added bash-completion for a new "c" option (Alexei).
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c87f60a7 90eea408
......@@ -19,10 +19,11 @@ SYNOPSIS
BTF COMMANDS
=============
| **bpftool** **btf dump** *BTF_SRC*
| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*]
| **bpftool** **btf help**
|
| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
| *FORMAT* := { **raw** | **c** }
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
......@@ -49,6 +50,10 @@ DESCRIPTION
.BTF section with well-defined BTF binary format data,
typically produced by clang or pahole.
**format** option can be used to override default (raw)
output format. Raw (**raw**) or C-syntax (**c**) output
formats are supported.
**bpftool btf help**
Print short help message.
......
......@@ -638,11 +638,24 @@ _bpftool()
esac
return 0
;;
format)
COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
;;
*)
if [[ $cword == 6 ]] && [[ ${words[3]} == "map" ]]; then
COMPREPLY+=( $( compgen -W 'key value kv all' -- \
"$cur" ) )
# emit extra options
case ${words[3]} in
id|file)
_bpftool_once_attr 'format'
;;
map|prog)
if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then
COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) )
fi
_bpftool_once_attr 'format'
;;
*)
;;
esac
return 0
;;
esac
......
......@@ -8,8 +8,8 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <gelf.h>
#include <bpf.h>
#include <libbpf.h>
#include <linux/btf.h>
#include "btf.h"
......@@ -340,109 +340,40 @@ static int dump_btf_raw(const struct btf *btf,
return 0;
}
static bool check_btf_endianness(GElf_Ehdr *ehdr)
static void __printf(2, 0) btf_dump_printf(void *ctx,
const char *fmt, va_list args)
{
static unsigned int const endian = 1;
switch (ehdr->e_ident[EI_DATA]) {
case ELFDATA2LSB:
return *(unsigned char const *)&endian == 1;
case ELFDATA2MSB:
return *(unsigned char const *)&endian == 0;
default:
return 0;
}
vfprintf(stdout, fmt, args);
}
static int btf_load_from_elf(const char *path, struct btf **btf)
static int dump_btf_c(const struct btf *btf,
__u32 *root_type_ids, int root_type_cnt)
{
int err = -1, fd = -1, idx = 0;
Elf_Data *btf_data = NULL;
Elf_Scn *scn = NULL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
if (elf_version(EV_CURRENT) == EV_NONE) {
p_err("failed to init libelf for %s", path);
return -1;
}
struct btf_dump *d;
int err = 0, i;
fd = open(path, O_RDONLY);
if (fd < 0) {
p_err("failed to open %s: %s", path, strerror(errno));
return -1;
}
d = btf_dump__new(btf, NULL, NULL, btf_dump_printf);
if (IS_ERR(d))
return PTR_ERR(d);
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
p_err("failed to open %s as ELF file", path);
goto done;
}
if (!gelf_getehdr(elf, &ehdr)) {
p_err("failed to get EHDR from %s", path);
goto done;
}
if (!check_btf_endianness(&ehdr)) {
p_err("non-native ELF endianness is not supported");
goto done;
}
if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
p_err("failed to get e_shstrndx from %s\n", path);
goto done;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
p_err("failed to get section(%d) header from %s",
idx, path);
goto done;
}
name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
if (!name) {
p_err("failed to get section(%d) name from %s",
idx, path);
goto done;
}
if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0);
if (!btf_data) {
p_err("failed to get section(%d, %s) data from %s",
idx, name, path);
if (root_type_cnt) {
for (i = 0; i < root_type_cnt; i++) {
err = btf_dump__dump_type(d, root_type_ids[i]);
if (err)
goto done;
}
break;
}
}
} else {
int cnt = btf__get_nr_types(btf);
if (!btf_data) {
p_err("%s ELF section not found in %s", BTF_ELF_SEC, path);
for (i = 1; i <= cnt; i++) {
err = btf_dump__dump_type(d, i);
if (err)
goto done;
}
*btf = btf__new(btf_data->d_buf, btf_data->d_size);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
p_err("failed to load BTF data from %s: %s",
path, strerror(err));
goto done;
}
err = 0;
done:
if (err) {
if (*btf) {
btf__free(*btf);
*btf = NULL;
}
}
if (elf)
elf_end(elf);
close(fd);
btf_dump__free(d);
return err;
}
......@@ -451,6 +382,7 @@ static int do_dump(int argc, char **argv)
struct btf *btf = NULL;
__u32 root_type_ids[2];
int root_type_cnt = 0;
bool dump_c = false;
__u32 btf_id = -1;
const char *src;
int fd = -1;
......@@ -522,9 +454,14 @@ static int do_dump(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(src, "file")) {
err = btf_load_from_elf(*argv, &btf);
if (err)
btf = btf__parse_elf(*argv, NULL);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
btf = NULL;
p_err("failed to load BTF from %s: %s",
*argv, strerror(err));
goto done;
}
NEXT_ARG();
} else {
err = -1;
......@@ -532,6 +469,29 @@ static int do_dump(int argc, char **argv)
goto done;
}
while (argc) {
if (is_prefix(*argv, "format")) {
NEXT_ARG();
if (argc < 1) {
p_err("expecting value for 'format' option\n");
goto done;
}
if (strcmp(*argv, "c") == 0) {
dump_c = true;
} else if (strcmp(*argv, "raw") == 0) {
dump_c = false;
} else {
p_err("unrecognized format specifier: '%s', possible values: raw, c",
*argv);
goto done;
}
NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
goto done;
}
}
if (!btf) {
err = btf__get_from_id(btf_id, &btf);
if (err) {
......@@ -545,7 +505,16 @@ static int do_dump(int argc, char **argv)
}
}
dump_btf_raw(btf, root_type_ids, root_type_cnt);
if (dump_c) {
if (json_output) {
p_err("JSON output for C-syntax dump is not supported");
err = -ENOTSUP;
goto done;
}
err = dump_btf_c(btf, root_type_ids, root_type_cnt);
} else {
err = dump_btf_raw(btf, root_type_ids, root_type_cnt);
}
done:
close(fd);
......@@ -561,10 +530,11 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
"Usage: %s btf dump BTF_SRC\n"
"Usage: %s btf dump BTF_SRC [format FORMAT]\n"
" %s btf help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
" FORMAT := { raw | c }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
" " HELP_SPEC_OPTIONS "\n"
......
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
btf_dump.o
......@@ -4,14 +4,17 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
#include <gelf.h>
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#include "hashmap.h"
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
......@@ -417,6 +420,132 @@ struct btf *btf__new(__u8 *data, __u32 size)
return btf;
}
static bool btf_check_endianness(const GElf_Ehdr *ehdr)
{
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
}
struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
{
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
int err = 0, fd = -1, idx = 0;
struct btf *btf = NULL;
Elf_Scn *scn = NULL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warning("failed to init libelf for %s\n", path);
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
fd = open(path, O_RDONLY);
if (fd < 0) {
err = -errno;
pr_warning("failed to open %s: %s\n", path, strerror(errno));
return ERR_PTR(err);
}
err = -LIBBPF_ERRNO__FORMAT;
elf = elf_begin(fd, ELF_C_READ, NULL);
if (!elf) {
pr_warning("failed to open %s as ELF file\n", path);
goto done;
}
if (!gelf_getehdr(elf, &ehdr)) {
pr_warning("failed to get EHDR from %s\n", path);
goto done;
}
if (!btf_check_endianness(&ehdr)) {
pr_warning("non-native ELF endianness is not supported\n");
goto done;
}
if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
pr_warning("failed to get e_shstrndx from %s\n", path);
goto done;
}
while ((scn = elf_nextscn(elf, scn)) != NULL) {
GElf_Shdr sh;
char *name;
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
pr_warning("failed to get section(%d) header from %s\n",
idx, path);
goto done;
}
name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
if (!name) {
pr_warning("failed to get section(%d) name from %s\n",
idx, path);
goto done;
}
if (strcmp(name, BTF_ELF_SEC) == 0) {
btf_data = elf_getdata(scn, 0);
if (!btf_data) {
pr_warning("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = elf_getdata(scn, 0);
if (!btf_ext_data) {
pr_warning("failed to get section(%d, %s) data from %s\n",
idx, name, path);
goto done;
}
continue;
}
}
err = 0;
if (!btf_data) {
err = -ENOENT;
goto done;
}
btf = btf__new(btf_data->d_buf, btf_data->d_size);
if (IS_ERR(btf))
goto done;
if (btf_ext && btf_ext_data) {
*btf_ext = btf_ext__new(btf_ext_data->d_buf,
btf_ext_data->d_size);
if (IS_ERR(*btf_ext))
goto done;
} else if (btf_ext) {
*btf_ext = NULL;
}
done:
if (elf)
elf_end(elf);
close(fd);
if (err)
return ERR_PTR(err);
/*
* btf is always parsed before btf_ext, so no need to clean up
* btf_ext, if btf loading failed
*/
if (IS_ERR(btf))
return btf;
if (btf_ext && IS_ERR(*btf_ext)) {
btf__free(btf);
err = PTR_ERR(*btf_ext);
return ERR_PTR(err);
}
return btf;
}
static int compare_vsi_off(const void *_a, const void *_b)
{
const struct btf_var_secinfo *a = _a;
......@@ -1165,16 +1294,9 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
return err;
}
#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
struct btf_dedup_node {
struct btf_dedup_node *next;
__u32 type_id;
};
struct btf_dedup {
/* .BTF section to be deduped in-place */
struct btf *btf;
......@@ -1190,7 +1312,7 @@ struct btf_dedup {
* candidates, which is fine because we rely on subsequent
* btf_xxx_equal() checks to authoritatively verify type equality.
*/
struct btf_dedup_node **dedup_table;
struct hashmap *dedup_table;
/* Canonical types map */
__u32 *map;
/* Hypothetical mapping, used during type graph equivalence checks */
......@@ -1215,30 +1337,18 @@ struct btf_str_ptrs {
__u32 cap;
};
static inline __u32 hash_combine(__u32 h, __u32 value)
static long hash_combine(long h, long value)
{
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
#define GOLDEN_RATIO_PRIME 0x9e370001UL
return h * 37 + value * GOLDEN_RATIO_PRIME;
#undef GOLDEN_RATIO_PRIME
return h * 31 + value;
}
#define for_each_dedup_cand(d, hash, node) \
for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \
node; \
node = node->next)
#define for_each_dedup_cand(d, node, hash) \
hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
int bucket = hash & (d->opts.dedup_table_size - 1);
if (!node)
return -ENOMEM;
node->type_id = type_id;
node->next = d->dedup_table[bucket];
d->dedup_table[bucket] = node;
return 0;
return hashmap__append(d->dedup_table,
(void *)hash, (void *)(long)type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
......@@ -1267,36 +1377,10 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
d->hypot_cnt = 0;
}
static void btf_dedup_table_free(struct btf_dedup *d)
{
struct btf_dedup_node *head, *tmp;
int i;
if (!d->dedup_table)
return;
for (i = 0; i < d->opts.dedup_table_size; i++) {
while (d->dedup_table[i]) {
tmp = d->dedup_table[i];
d->dedup_table[i] = tmp->next;
free(tmp);
}
head = d->dedup_table[i];
while (head) {
tmp = head;
head = head->next;
free(tmp);
}
}
free(d->dedup_table);
d->dedup_table = NULL;
}
static void btf_dedup_free(struct btf_dedup *d)
{
btf_dedup_table_free(d);
hashmap__free(d->dedup_table);
d->dedup_table = NULL;
free(d->map);
d->map = NULL;
......@@ -1310,40 +1394,43 @@ static void btf_dedup_free(struct btf_dedup *d)
free(d);
}
/* Find closest power of two >= to size, capped at 2^max_size_log */
static __u32 roundup_pow2_max(__u32 size, int max_size_log)
static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
{
int i;
return (size_t)key;
}
for (i = 0; i < max_size_log && (1U << i) < size; i++)
;
return 1U << i;
static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
{
return 0;
}
static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
{
return k1 == k2;
}
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
int i, err = 0;
__u32 sz;
if (!d)
return ERR_PTR(-ENOMEM);
d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
sz = opts && opts->dedup_table_size ? opts->dedup_table_size
: BTF_DEDUP_TABLE_DEFAULT_SIZE;
sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG);
d->opts.dedup_table_size = sz;
/* dedup_table_size is now used only to force collisions in tests */
if (opts && opts->dedup_table_size == 1)
hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
d->btf_ext = btf_ext;
d->dedup_table = calloc(d->opts.dedup_table_size,
sizeof(struct btf_dedup_node *));
if (!d->dedup_table) {
err = -ENOMEM;
d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(d->dedup_table)) {
err = PTR_ERR(d->dedup_table);
d->dedup_table = NULL;
goto done;
}
......@@ -1662,9 +1749,9 @@ static int btf_dedup_strings(struct btf_dedup *d)
return err;
}
static __u32 btf_hash_common(struct btf_type *t)
static long btf_hash_common(struct btf_type *t)
{
__u32 h;
long h;
h = hash_combine(0, t->name_off);
h = hash_combine(h, t->info);
......@@ -1680,10 +1767,10 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of INT. */
static __u32 btf_hash_int(struct btf_type *t)
static long btf_hash_int(struct btf_type *t)
{
__u32 info = *(__u32 *)(t + 1);
__u32 h;
long h;
h = btf_hash_common(t);
h = hash_combine(h, info);
......@@ -1703,9 +1790,9 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
}
/* Calculate type signature hash of ENUM. */
static __u32 btf_hash_enum(struct btf_type *t)
static long btf_hash_enum(struct btf_type *t)
{
__u32 h;
long h;
/* don't hash vlen and enum members to support enum fwd resolving */
h = hash_combine(0, t->name_off);
......@@ -1757,11 +1844,11 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
* as referenced type IDs equivalence is established separately during type
* graph equivalence check algorithm.
*/
static __u32 btf_hash_struct(struct btf_type *t)
static long btf_hash_struct(struct btf_type *t)
{
struct btf_member *member = (struct btf_member *)(t + 1);
__u32 vlen = BTF_INFO_VLEN(t->info);
__u32 h = btf_hash_common(t);
long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
......@@ -1804,10 +1891,10 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
static __u32 btf_hash_array(struct btf_type *t)
static long btf_hash_array(struct btf_type *t)
{
struct btf_array *info = (struct btf_array *)(t + 1);
__u32 h = btf_hash_common(t);
long h = btf_hash_common(t);
h = hash_combine(h, info->type);
h = hash_combine(h, info->index_type);
......@@ -1858,11 +1945,11 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
* under assumption that they were already resolved to canonical type IDs and
* are not going to change.
*/
static inline __u32 btf_hash_fnproto(struct btf_type *t)
static long btf_hash_fnproto(struct btf_type *t)
{
struct btf_param *member = (struct btf_param *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
__u32 h = btf_hash_common(t);
long h = btf_hash_common(t);
int i;
for (i = 0; i < vlen; i++) {
......@@ -1880,7 +1967,7 @@ static inline __u32 btf_hash_fnproto(struct btf_type *t)
* This function is called during reference types deduplication to compare
* FUNC_PROTO to potential canonical representative.
*/
static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
{
struct btf_param *m1, *m2;
__u16 vlen;
......@@ -1906,7 +1993,7 @@ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
* IDs. This check is performed during type graph equivalence check and
* referenced types equivalence is checked separately.
*/
static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
{
struct btf_param *m1, *m2;
__u16 vlen;
......@@ -1937,11 +2024,12 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
{
struct btf_type *t = d->btf->types[type_id];
struct hashmap_entry *hash_entry;
struct btf_type *cand;
struct btf_dedup_node *cand_node;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
__u32 h;
__u32 cand_id;
long h;
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_CONST:
......@@ -1960,10 +2048,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_INT:
h = btf_hash_int(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_int(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
}
......@@ -1971,10 +2060,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_ENUM:
h = btf_hash_enum(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_enum(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
if (d->opts.dont_resolve_fwds)
......@@ -1982,21 +2072,22 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
/* resolve canonical enum fwd to full enum */
d->map[cand_node->type_id] = type_id;
d->map[cand_id] = type_id;
}
}
break;
case BTF_KIND_FWD:
h = btf_hash_common(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
}
......@@ -2397,12 +2488,12 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
*/
static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
{
struct btf_dedup_node *cand_node;
struct btf_type *cand_type, *t;
struct hashmap_entry *hash_entry;
/* if we don't find equivalent type, then we are canonical */
__u32 new_id = type_id;
__u16 kind;
__u32 h;
long h;
/* already deduped or is in process of deduping (loop detected) */
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
......@@ -2415,7 +2506,8 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
return 0;
h = btf_hash_struct(t);
for_each_dedup_cand(d, h, cand_node) {
for_each_dedup_cand(d, hash_entry, h) {
__u32 cand_id = (__u32)(long)hash_entry->value;
int eq;
/*
......@@ -2428,17 +2520,17 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
cand_type = d->btf->types[cand_node->type_id];
cand_type = d->btf->types[cand_id];
if (!btf_shallow_equal_struct(t, cand_type))
continue;
btf_dedup_clear_hypot_map(d);
eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
eq = btf_dedup_is_equiv(d, type_id, cand_id);
if (eq < 0)
return eq;
if (!eq)
continue;
new_id = cand_node->type_id;
new_id = cand_id;
btf_dedup_merge_hypot_map(d);
break;
}
......@@ -2488,12 +2580,12 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
*/
static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
{
struct btf_dedup_node *cand_node;
struct hashmap_entry *hash_entry;
__u32 new_id = type_id, cand_id;
struct btf_type *t, *cand;
/* if we don't find equivalent type, then we are representative type */
__u32 new_id = type_id;
int ref_type_id;
__u32 h;
long h;
if (d->map[type_id] == BTF_IN_PROGRESS_ID)
return -ELOOP;
......@@ -2516,10 +2608,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
t->type = ref_type_id;
h = btf_hash_common(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_common(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
}
......@@ -2539,10 +2632,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
info->index_type = ref_type_id;
h = btf_hash_array(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_array(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
}
......@@ -2570,10 +2664,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
}
h = btf_hash_fnproto(t);
for_each_dedup_cand(d, h, cand_node) {
cand = d->btf->types[cand_node->type_id];
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
cand = d->btf->types[cand_id];
if (btf_equal_fnproto(t, cand)) {
new_id = cand_node->type_id;
new_id = cand_id;
break;
}
}
......@@ -2600,7 +2695,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
if (err < 0)
return err;
}
btf_dedup_table_free(d);
/* we won't need d->dedup_table anymore */
hashmap__free(d->dedup_table);
d->dedup_table = NULL;
return 0;
}
......
......@@ -4,6 +4,7 @@
#ifndef __LIBBPF_BTF_H
#define __LIBBPF_BTF_H
#include <stdarg.h>
#include <linux/types.h>
#ifdef __cplusplus
......@@ -59,6 +60,8 @@ struct btf_ext_header {
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
LIBBPF_API struct btf *btf__parse_elf(const char *path,
struct btf_ext **btf_ext);
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
......@@ -100,6 +103,22 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts);
struct btf_dump;
struct btf_dump_opts {
void *ctx;
};
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn);
LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
#ifdef __cplusplus
} /* extern "C" */
#endif
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C type converter.
*
* Copyright (c) 2019 Facebook
*/
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <linux/err.h>
#include <linux/btf.h>
#include "btf.h"
#include "hashmap.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#define min(x, y) ((x) < (y) ? (x) : (y))
#define max(x, y) ((x) < (y) ? (y) : (x))
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
static const char *pfx(int lvl)
{
return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
}
enum btf_dump_type_order_state {
NOT_ORDERED,
ORDERING,
ORDERED,
};
enum btf_dump_type_emit_state {
NOT_EMITTED,
EMITTING,
EMITTED,
};
/* per-type auxiliary state */
struct btf_dump_type_aux_state {
/* topological sorting state */
enum btf_dump_type_order_state order_state: 2;
/* emitting state used to determine the need for forward declaration */
enum btf_dump_type_emit_state emit_state: 2;
/* whether forward declaration was already emitted */
__u8 fwd_emitted: 1;
/* whether unique non-duplicate name was already assigned */
__u8 name_resolved: 1;
};
struct btf_dump {
const struct btf *btf;
const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts;
/* per-type auxiliary state */
struct btf_dump_type_aux_state *type_states;
/* per-type optional cached unique name, must be freed, if present */
const char **cached_names;
/* topo-sorted list of dependent type definitions */
__u32 *emit_queue;
int emit_queue_cap;
int emit_queue_cnt;
/*
* stack of type declarations (e.g., chain of modifiers, arrays,
* funcs, etc)
*/
__u32 *decl_stack;
int decl_stack_cap;
int decl_stack_cnt;
/* maps struct/union/enum name to a number of name occurrences */
struct hashmap *type_names;
/*
* maps typedef identifiers and enum value names to a number of such
* name occurrences
*/
struct hashmap *ident_names;
};
static size_t str_hash_fn(const void *key, void *ctx)
{
const char *s = key;
size_t h = 0;
while (*s) {
h = h * 31 + *s;
s++;
}
return h;
}
static bool str_equal_fn(const void *a, const void *b, void *ctx)
{
return strcmp(a, b) == 0;
}
static __u16 btf_kind_of(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info);
}
static __u16 btf_vlen_of(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
static bool btf_kflag_of(const struct btf_type *t)
{
return BTF_INFO_KFLAG(t->info);
}
static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
{
return btf__name_by_offset(d->btf, name_off);
}
static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
d->printf_fn(d->opts.ctx, fmt, args);
va_end(args);
}
struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn)
{
struct btf_dump *d;
int err;
d = calloc(1, sizeof(struct btf_dump));
if (!d)
return ERR_PTR(-ENOMEM);
d->btf = btf;
d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL;
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) {
err = PTR_ERR(d->type_names);
d->type_names = NULL;
btf_dump__free(d);
return ERR_PTR(err);
}
d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->ident_names)) {
err = PTR_ERR(d->ident_names);
d->ident_names = NULL;
btf_dump__free(d);
return ERR_PTR(err);
}
return d;
}
void btf_dump__free(struct btf_dump *d)
{
int i, cnt;
if (!d)
return;
free(d->type_states);
if (d->cached_names) {
/* any set cached name is owned by us and should be freed */
for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
if (d->cached_names[i])
free((void *)d->cached_names[i]);
}
}
free(d->cached_names);
free(d->emit_queue);
free(d->decl_stack);
hashmap__free(d->type_names);
hashmap__free(d->ident_names);
free(d);
}
static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
/*
* Dump BTF type in a compilable C syntax, including all the necessary
* dependent types, necessary for compilation. If some of the dependent types
* were already emitted as part of previous btf_dump__dump_type() invocation
* for another type, they won't be emitted again. This API allows callers to
* filter out BTF types according to user-defined criterias and emitted only
* minimal subset of types, necessary to compile everything. Full struct/union
* definitions will still be emitted, even if the only usage is through
* pointer and could be satisfied with just a forward declaration.
*
* Dumping is done in two high-level passes:
* 1. Topologically sort type definitions to satisfy C rules of compilation.
* 2. Emit type definitions in C syntax.
*
* Returns 0 on success; <0, otherwise.
*/
int btf_dump__dump_type(struct btf_dump *d, __u32 id)
{
int err, i;
if (id > btf__get_nr_types(d->btf))
return -EINVAL;
/* type states are lazily allocated, as they might not be needed */
if (!d->type_states) {
d->type_states = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->type_states[0]));
if (!d->type_states)
return -ENOMEM;
d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->cached_names[0]));
if (!d->cached_names)
return -ENOMEM;
/* VOID is special */
d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED;
}
d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false);
if (err < 0)
return err;
for (i = 0; i < d->emit_queue_cnt; i++)
btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
return 0;
}
static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
{
__u32 *new_queue;
size_t new_cap;
if (d->emit_queue_cnt >= d->emit_queue_cap) {
new_cap = max(16, d->emit_queue_cap * 3 / 2);
new_queue = realloc(d->emit_queue,
new_cap * sizeof(new_queue[0]));
if (!new_queue)
return -ENOMEM;
d->emit_queue = new_queue;
d->emit_queue_cap = new_cap;
}
d->emit_queue[d->emit_queue_cnt++] = id;
return 0;
}
/*
* Determine order of emitting dependent types and specified type to satisfy
* C compilation rules. This is done through topological sorting with an
* additional complication which comes from C rules. The main idea for C is
* that if some type is "embedded" into a struct/union, it's size needs to be
* known at the time of definition of containing type. E.g., for:
*
* struct A {};
* struct B { struct A x; }
*
* struct A *HAS* to be defined before struct B, because it's "embedded",
* i.e., it is part of struct B layout. But in the following case:
*
* struct A;
* struct B { struct A *x; }
* struct A {};
*
* it's enough to just have a forward declaration of struct A at the time of
* struct B definition, as struct B has a pointer to struct A, so the size of
* field x is known without knowing struct A size: it's sizeof(void *).
*
* Unfortunately, there are some trickier cases we need to handle, e.g.:
*
* struct A {}; // if this was forward-declaration: compilation error
* struct B {
* struct { // anonymous struct
* struct A y;
* } *x;
* };
*
* In this case, struct B's field x is a pointer, so it's size is known
* regardless of the size of (anonymous) struct it points to. But because this
* struct is anonymous and thus defined inline inside struct B, *and* it
* embeds struct A, compiler requires full definition of struct A to be known
* before struct B can be defined. This creates a transitive dependency
* between struct A and struct B. If struct A was forward-declared before
* struct B definition and fully defined after struct B definition, that would
* trigger compilation error.
*
* All this means that while we are doing topological sorting on BTF type
* graph, we need to determine relationships between different types (graph
* nodes):
* - weak link (relationship) between X and Y, if Y *CAN* be
* forward-declared at the point of X definition;
* - strong link, if Y *HAS* to be fully-defined before X can be defined.
*
* The rule is as follows. Given a chain of BTF types from X to Y, if there is
* BTF_KIND_PTR type in the chain and at least one non-anonymous type
* Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
* Weak/strong relationship is determined recursively during DFS traversal and
* is returned as a result from btf_dump_order_type().
*
* btf_dump_order_type() is trying to avoid unnecessary forward declarations,
* but it is not guaranteeing that no extraneous forward declarations will be
* emitted.
*
* To avoid extra work, algorithm marks some of BTF types as ORDERED, when
* it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
* ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
* entire graph path, so depending where from one came to that BTF type, it
* might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
* once they are processed, there is no need to do it again, so they are
* marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
* weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
* in any case, once those are processed, no need to do it again, as the
* result won't change.
*
* Returns:
* - 1, if type is part of strong link (so there is strong topological
* ordering requirements);
* - 0, if type is part of weak link (so can be satisfied through forward
* declaration);
* - <0, on error (e.g., unsatisfiable type loop detected).
*/
static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
{
/*
* Order state is used to detect strong link cycles, but only for BTF
* kinds that are or could be an independent definition (i.e.,
* stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
* func_protos, modifiers are just means to get to these definitions.
* Int/void don't need definitions, they are assumed to be always
* properly defined. We also ignore datasec, var, and funcs for now.
* So for all non-defining kinds, we never even set ordering state,
* for defining kinds we set ORDERING and subsequently ORDERED if it
* forms a strong link.
*/
struct btf_dump_type_aux_state *tstate = &d->type_states[id];
const struct btf_type *t;
__u16 kind, vlen;
int err, i;
/* return true, letting typedefs know that it's ok to be emitted */
if (tstate->order_state == ORDERED)
return 1;
t = btf__type_by_id(d->btf, id);
kind = btf_kind_of(t);
if (tstate->order_state == ORDERING) {
/* type loop, but resolvable through fwd declaration */
if ((kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION) &&
through_ptr && t->name_off != 0)
return 0;
pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
return -ELOOP;
}
switch (kind) {
case BTF_KIND_INT:
tstate->order_state = ORDERED;
return 0;
case BTF_KIND_PTR:
err = btf_dump_order_type(d, t->type, true);
tstate->order_state = ORDERED;
return err;
case BTF_KIND_ARRAY: {
const struct btf_array *a = (void *)(t + 1);
return btf_dump_order_type(d, a->type, through_ptr);
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = (void *)(t + 1);
/*
* struct/union is part of strong link, only if it's embedded
* (so no ptr in a path) or it's anonymous (so has to be
* defined inline, even if declared through ptr)
*/
if (through_ptr && t->name_off != 0)
return 0;
tstate->order_state = ORDERING;
vlen = btf_vlen_of(t);
for (i = 0; i < vlen; i++, m++) {
err = btf_dump_order_type(d, m->type, false);
if (err < 0)
return err;
}
if (t->name_off != 0) {
err = btf_dump_add_emit_queue_id(d, id);
if (err < 0)
return err;
}
tstate->order_state = ORDERED;
return 1;
}
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
if (t->name_off != 0) {
err = btf_dump_add_emit_queue_id(d, id);
if (err)
return err;
}
tstate->order_state = ORDERED;
return 1;
case BTF_KIND_TYPEDEF: {
int is_strong;
is_strong = btf_dump_order_type(d, t->type, through_ptr);
if (is_strong < 0)
return is_strong;
/* typedef is similar to struct/union w.r.t. fwd-decls */
if (through_ptr && !is_strong)
return 0;
/* typedef is always a named definition */
err = btf_dump_add_emit_queue_id(d, id);
if (err)
return err;
d->type_states[id].order_state = ORDERED;
return 1;
}
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_dump_order_type(d, t->type, through_ptr);
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = (void *)(t + 1);
bool is_strong;
err = btf_dump_order_type(d, t->type, through_ptr);
if (err < 0)
return err;
is_strong = err > 0;
vlen = btf_vlen_of(t);
for (i = 0; i < vlen; i++, p++) {
err = btf_dump_order_type(d, p->type, through_ptr);
if (err < 0)
return err;
if (err > 0)
is_strong = true;
}
return is_strong;
}
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
d->type_states[id].order_state = ORDERED;
return 0;
default:
return -EINVAL;
}
}
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
const struct btf_type *t, int lvl);
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
const struct btf_type *t, int lvl);
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
const struct btf_type *t, int lvl);
/* a local view into a shared stack */
struct id_stack {
const __u32 *ids;
int cnt;
};
static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
const char *fname, int lvl);
static void btf_dump_emit_type_chain(struct btf_dump *d,
struct id_stack *decl_stack,
const char *fname, int lvl);
static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
const char *orig_name);
static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
{
const struct btf_type *t = btf__type_by_id(d->btf, id);
/* __builtin_va_list is a compiler built-in, which causes compilation
* errors, when compiling w/ different compiler, then used to compile
* original code (e.g., GCC to compile kernel, Clang to use generated
* C header from BTF). As it is built-in, it should be already defined
* properly internally in compiler.
*/
if (t->name_off == 0)
return false;
return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
}
/*
* Emit C-syntax definitions of types from chains of BTF types.
*
* High-level handling of determining necessary forward declarations are handled
* by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
* declarations/definitions in C syntax are handled by a combo of
* btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
* corresponding btf_dump_emit_*_{def,fwd}() functions.
*
* We also keep track of "containing struct/union type ID" to determine when
* we reference it from inside and thus can avoid emitting unnecessary forward
* declaration.
*
* This algorithm is designed in such a way, that even if some error occurs
* (either technical, e.g., out of memory, or logical, i.e., malformed BTF
* that doesn't comply to C rules completely), algorithm will try to proceed
* and produce as much meaningful output as possible.
*/
static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
{
struct btf_dump_type_aux_state *tstate = &d->type_states[id];
bool top_level_def = cont_id == 0;
const struct btf_type *t;
__u16 kind;
if (tstate->emit_state == EMITTED)
return;
t = btf__type_by_id(d->btf, id);
kind = btf_kind_of(t);
if (top_level_def && t->name_off == 0) {
pr_warning("unexpected nameless definition, id:[%u]\n", id);
return;
}
if (tstate->emit_state == EMITTING) {
if (tstate->fwd_emitted)
return;
switch (kind) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
/*
* if we are referencing a struct/union that we are
* part of - then no need for fwd declaration
*/
if (id == cont_id)
return;
if (t->name_off == 0) {
pr_warning("anonymous struct/union loop, id:[%u]\n",
id);
return;
}
btf_dump_emit_struct_fwd(d, id, t);
btf_dump_printf(d, ";\n\n");
tstate->fwd_emitted = 1;
break;
case BTF_KIND_TYPEDEF:
/*
* for typedef fwd_emitted means typedef definition
* was emitted, but it can be used only for "weak"
* references through pointer only, not for embedding
*/
if (!btf_dump_is_blacklisted(d, id)) {
btf_dump_emit_typedef_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
};
tstate->fwd_emitted = 1;
break;
default:
break;
}
return;
}
switch (kind) {
case BTF_KIND_INT:
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
if (top_level_def) {
btf_dump_emit_enum_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
}
tstate->emit_state = EMITTED;
break;
case BTF_KIND_PTR:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
btf_dump_emit_type(d, t->type, cont_id);
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = (void *)(t + 1);
btf_dump_emit_type(d, a->type, cont_id);
break;
}
case BTF_KIND_FWD:
btf_dump_emit_fwd_def(d, id, t);
btf_dump_printf(d, ";\n\n");
tstate->emit_state = EMITTED;
break;
case BTF_KIND_TYPEDEF:
tstate->emit_state = EMITTING;
btf_dump_emit_type(d, t->type, id);
/*
* typedef can server as both definition and forward
* declaration; at this stage someone depends on
* typedef as a forward declaration (refers to it
* through pointer), so unless we already did it,
* emit typedef as a forward declaration
*/
if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
btf_dump_emit_typedef_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
}
tstate->emit_state = EMITTED;
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
tstate->emit_state = EMITTING;
/* if it's a top-level struct/union definition or struct/union
* is anonymous, then in C we'll be emitting all fields and
* their types (as opposed to just `struct X`), so we need to
* make sure that all types, referenced from struct/union
* members have necessary forward-declarations, where
* applicable
*/
if (top_level_def || t->name_off == 0) {
const struct btf_member *m = (void *)(t + 1);
__u16 vlen = btf_vlen_of(t);
int i, new_cont_id;
new_cont_id = t->name_off == 0 ? cont_id : id;
for (i = 0; i < vlen; i++, m++)
btf_dump_emit_type(d, m->type, new_cont_id);
} else if (!tstate->fwd_emitted && id != cont_id) {
btf_dump_emit_struct_fwd(d, id, t);
btf_dump_printf(d, ";\n\n");
tstate->fwd_emitted = 1;
}
if (top_level_def) {
btf_dump_emit_struct_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
tstate->emit_state = EMITTED;
} else {
tstate->emit_state = NOT_EMITTED;
}
break;
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = (void *)(t + 1);
__u16 vlen = btf_vlen_of(t);
int i;
btf_dump_emit_type(d, t->type, cont_id);
for (i = 0; i < vlen; i++, p++)
btf_dump_emit_type(d, p->type, cont_id);
break;
}
default:
break;
}
}
static int btf_align_of(const struct btf *btf, __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
__u16 kind = btf_kind_of(t);
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
return min(sizeof(void *), t->size);
case BTF_KIND_PTR:
return sizeof(void *);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_align_of(btf, t->type);
case BTF_KIND_ARRAY: {
const struct btf_array *a = (void *)(t + 1);
return btf_align_of(btf, a->type);
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
const struct btf_member *m = (void *)(t + 1);
__u16 vlen = btf_vlen_of(t);
int i, align = 1;
for (i = 0; i < vlen; i++, m++)
align = max(align, btf_align_of(btf, m->type));
return align;
}
default:
pr_warning("unsupported BTF_KIND:%u\n", btf_kind_of(t));
return 1;
}
}
static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
const struct btf_member *m;
int align, i, bit_sz;
__u16 vlen;
bool kflag;
align = btf_align_of(btf, id);
/* size of a non-packed struct has to be a multiple of its alignment*/
if (t->size % align)
return true;
m = (void *)(t + 1);
kflag = btf_kflag_of(t);
vlen = btf_vlen_of(t);
/* all non-bitfield fields have to be naturally aligned */
for (i = 0; i < vlen; i++, m++) {
align = btf_align_of(btf, m->type);
bit_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
if (bit_sz == 0 && m->offset % (8 * align) != 0)
return true;
}
/*
* if original struct was marked as packed, but its layout is
* naturally aligned, we'll detect that it's not packed
*/
return false;
}
static int chip_away_bits(int total, int at_most)
{
return total % at_most ? : at_most;
}
static void btf_dump_emit_bit_padding(const struct btf_dump *d,
int cur_off, int m_off, int m_bit_sz,
int align, int lvl)
{
int off_diff = m_off - cur_off;
int ptr_bits = sizeof(void *) * 8;
if (off_diff <= 0)
/* no gap */
return;
if (m_bit_sz == 0 && off_diff < align * 8)
/* natural padding will take care of a gap */
return;
while (off_diff > 0) {
const char *pad_type;
int pad_bits;
if (ptr_bits > 32 && off_diff > 32) {
pad_type = "long";
pad_bits = chip_away_bits(off_diff, ptr_bits);
} else if (off_diff > 16) {
pad_type = "int";
pad_bits = chip_away_bits(off_diff, 32);
} else if (off_diff > 8) {
pad_type = "short";
pad_bits = chip_away_bits(off_diff, 16);
} else {
pad_type = "char";
pad_bits = chip_away_bits(off_diff, 8);
}
btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
off_diff -= pad_bits;
}
}
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
btf_dump_printf(d, "%s %s",
btf_kind_of(t) == BTF_KIND_STRUCT ? "struct" : "union",
btf_dump_type_name(d, id));
}
static void btf_dump_emit_struct_def(struct btf_dump *d,
__u32 id,
const struct btf_type *t,
int lvl)
{
const struct btf_member *m = (void *)(t + 1);
bool kflag = btf_kflag_of(t), is_struct;
int align, i, packed, off = 0;
__u16 vlen = btf_vlen_of(t);
is_struct = btf_kind_of(t) == BTF_KIND_STRUCT;
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
align = packed ? 1 : btf_align_of(d->btf, id);
btf_dump_printf(d, "%s%s%s {",
is_struct ? "struct" : "union",
t->name_off ? " " : "",
btf_dump_type_name(d, id));
for (i = 0; i < vlen; i++, m++) {
const char *fname;
int m_off, m_sz;
fname = btf_name_of(d, m->name_off);
m_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
m_off = kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
align = packed ? 1 : btf_align_of(d->btf, m->type);
btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
btf_dump_printf(d, "\n%s", pfx(lvl + 1));
btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
if (m_sz) {
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
} else {
m_sz = max(0, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
}
btf_dump_printf(d, ";");
}
if (vlen)
btf_dump_printf(d, "\n");
btf_dump_printf(d, "%s}", pfx(lvl));
if (packed)
btf_dump_printf(d, " __attribute__((packed))");
}
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
}
static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
const struct btf_type *t,
int lvl)
{
const struct btf_enum *v = (void *)(t+1);
__u16 vlen = btf_vlen_of(t);
const char *name;
size_t dup_cnt;
int i;
btf_dump_printf(d, "enum%s%s",
t->name_off ? " " : "",
btf_dump_type_name(d, id));
if (vlen) {
btf_dump_printf(d, " {");
for (i = 0; i < vlen; i++, v++) {
name = btf_name_of(d, v->name_off);
/* enumerators share namespace with typedef idents */
dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
if (dup_cnt > 1) {
btf_dump_printf(d, "\n%s%s___%zu = %d,",
pfx(lvl + 1), name, dup_cnt,
(__s32)v->val);
} else {
btf_dump_printf(d, "\n%s%s = %d,",
pfx(lvl + 1), name,
(__s32)v->val);
}
}
btf_dump_printf(d, "\n%s}", pfx(lvl));
}
}
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
const char *name = btf_dump_type_name(d, id);
if (btf_kflag_of(t))
btf_dump_printf(d, "union %s", name);
else
btf_dump_printf(d, "struct %s", name);
}
static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
const struct btf_type *t, int lvl)
{
const char *name = btf_dump_ident_name(d, id);
btf_dump_printf(d, "typedef ");
btf_dump_emit_type_decl(d, t->type, name, lvl);
}
static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
{
__u32 *new_stack;
size_t new_cap;
if (d->decl_stack_cnt >= d->decl_stack_cap) {
new_cap = max(16, d->decl_stack_cap * 3 / 2);
new_stack = realloc(d->decl_stack,
new_cap * sizeof(new_stack[0]));
if (!new_stack)
return -ENOMEM;
d->decl_stack = new_stack;
d->decl_stack_cap = new_cap;
}
d->decl_stack[d->decl_stack_cnt++] = id;
return 0;
}
/*
* Emit type declaration (e.g., field type declaration in a struct or argument
* declaration in function prototype) in correct C syntax.
*
* For most types it's trivial, but there are few quirky type declaration
* cases worth mentioning:
* - function prototypes (especially nesting of function prototypes);
* - arrays;
* - const/volatile/restrict for pointers vs other types.
*
* For a good discussion of *PARSING* C syntax (as a human), see
* Peter van der Linden's "Expert C Programming: Deep C Secrets",
* Ch.3 "Unscrambling Declarations in C".
*
* It won't help with BTF to C conversion much, though, as it's an opposite
* problem. So we came up with this algorithm in reverse to van der Linden's
* parsing algorithm. It goes from structured BTF representation of type
* declaration to a valid compilable C syntax.
*
* For instance, consider this C typedef:
* typedef const int * const * arr[10] arr_t;
* It will be represented in BTF with this chain of BTF types:
* [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
*
* Notice how [const] modifier always goes before type it modifies in BTF type
* graph, but in C syntax, const/volatile/restrict modifiers are written to
* the right of pointers, but to the left of other types. There are also other
* quirks, like function pointers, arrays of them, functions returning other
* functions, etc.
*
* We handle that by pushing all the types to a stack, until we hit "terminal"
* type (int/enum/struct/union/fwd). Then depending on the kind of a type on
* top of a stack, modifiers are handled differently. Array/function pointers
* have also wildly different syntax and how nesting of them are done. See
* code for authoritative definition.
*
* To avoid allocating new stack for each independent chain of BTF types, we
* share one bigger stack, with each chain working only on its own local view
* of a stack frame. Some care is required to "pop" stack frames after
* processing type declaration chain.
*/
static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
const char *fname, int lvl)
{
struct id_stack decl_stack;
const struct btf_type *t;
int err, stack_start;
__u16 kind;
stack_start = d->decl_stack_cnt;
for (;;) {
err = btf_dump_push_decl_stack_id(d, id);
if (err < 0) {
/*
* if we don't have enough memory for entire type decl
* chain, restore stack, emit warning, and try to
* proceed nevertheless
*/
pr_warning("not enough memory for decl stack:%d", err);
d->decl_stack_cnt = stack_start;
return;
}
/* VOID */
if (id == 0)
break;
t = btf__type_by_id(d->btf, id);
kind = btf_kind_of(t);
switch (kind) {
case BTF_KIND_PTR:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_FUNC_PROTO:
id = t->type;
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = (void *)(t + 1);
id = a->type;
break;
}
case BTF_KIND_INT:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_TYPEDEF:
goto done;
default:
pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
kind, id);
goto done;
}
}
done:
/*
* We might be inside a chain of declarations (e.g., array of function
* pointers returning anonymous (so inlined) structs, having another
* array field). Each of those needs its own "stack frame" to handle
* emitting of declarations. Those stack frames are non-overlapping
* portions of shared btf_dump->decl_stack. To make it a bit nicer to
* handle this set of nested stacks, we create a view corresponding to
* our own "stack frame" and work with it as an independent stack.
* We'll need to clean up after emit_type_chain() returns, though.
*/
decl_stack.ids = d->decl_stack + stack_start;
decl_stack.cnt = d->decl_stack_cnt - stack_start;
btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
/*
* emit_type_chain() guarantees that it will pop its entire decl_stack
* frame before returning. But it works with a read-only view into
* decl_stack, so it doesn't actually pop anything from the
* perspective of shared btf_dump->decl_stack, per se. We need to
* reset decl_stack state to how it was before us to avoid it growing
* all the time.
*/
d->decl_stack_cnt = stack_start;
}
static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
{
const struct btf_type *t;
__u32 id;
while (decl_stack->cnt) {
id = decl_stack->ids[decl_stack->cnt - 1];
t = btf__type_by_id(d->btf, id);
switch (btf_kind_of(t)) {
case BTF_KIND_VOLATILE:
btf_dump_printf(d, "volatile ");
break;
case BTF_KIND_CONST:
btf_dump_printf(d, "const ");
break;
case BTF_KIND_RESTRICT:
btf_dump_printf(d, "restrict ");
break;
default:
return;
}
decl_stack->cnt--;
}
}
static bool btf_is_mod_kind(const struct btf *btf, __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
switch (btf_kind_of(t)) {
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return true;
default:
return false;
}
}
static void btf_dump_emit_name(const struct btf_dump *d,
const char *name, bool last_was_ptr)
{
bool separate = name[0] && !last_was_ptr;
btf_dump_printf(d, "%s%s", separate ? " " : "", name);
}
static void btf_dump_emit_type_chain(struct btf_dump *d,
struct id_stack *decls,
const char *fname, int lvl)
{
/*
* last_was_ptr is used to determine if we need to separate pointer
* asterisk (*) from previous part of type signature with space, so
* that we get `int ***`, instead of `int * * *`. We default to true
* for cases where we have single pointer in a chain. E.g., in ptr ->
* func_proto case. func_proto will start a new emit_type_chain call
* with just ptr, which should be emitted as (*) or (*<fname>), so we
* don't want to prepend space for that last pointer.
*/
bool last_was_ptr = true;
const struct btf_type *t;
const char *name;
__u16 kind;
__u32 id;
while (decls->cnt) {
id = decls->ids[--decls->cnt];
if (id == 0) {
/* VOID is a special snowflake */
btf_dump_emit_mods(d, decls);
btf_dump_printf(d, "void");
last_was_ptr = false;
continue;
}
t = btf__type_by_id(d->btf, id);
kind = btf_kind_of(t);
switch (kind) {
case BTF_KIND_INT:
btf_dump_emit_mods(d, decls);
name = btf_name_of(d, t->name_off);
btf_dump_printf(d, "%s", name);
break;
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
btf_dump_emit_mods(d, decls);
/* inline anonymous struct/union */
if (t->name_off == 0)
btf_dump_emit_struct_def(d, id, t, lvl);
else
btf_dump_emit_struct_fwd(d, id, t);
break;
case BTF_KIND_ENUM:
btf_dump_emit_mods(d, decls);
/* inline anonymous enum */
if (t->name_off == 0)
btf_dump_emit_enum_def(d, id, t, lvl);
else
btf_dump_emit_enum_fwd(d, id, t);
break;
case BTF_KIND_FWD:
btf_dump_emit_mods(d, decls);
btf_dump_emit_fwd_def(d, id, t);
break;
case BTF_KIND_TYPEDEF:
btf_dump_emit_mods(d, decls);
btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
break;
case BTF_KIND_PTR:
btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
break;
case BTF_KIND_VOLATILE:
btf_dump_printf(d, " volatile");
break;
case BTF_KIND_CONST:
btf_dump_printf(d, " const");
break;
case BTF_KIND_RESTRICT:
btf_dump_printf(d, " restrict");
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = (void *)(t + 1);
const struct btf_type *next_t;
__u32 next_id;
bool multidim;
/*
* GCC has a bug
* (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
* which causes it to emit extra const/volatile
* modifiers for an array, if array's element type has
* const/volatile modifiers. Clang doesn't do that.
* In general, it doesn't seem very meaningful to have
* a const/volatile modifier for array, so we are
* going to silently skip them here.
*/
while (decls->cnt) {
next_id = decls->ids[decls->cnt - 1];
if (btf_is_mod_kind(d->btf, next_id))
decls->cnt--;
else
break;
}
if (decls->cnt == 0) {
btf_dump_emit_name(d, fname, last_was_ptr);
btf_dump_printf(d, "[%u]", a->nelems);
return;
}
next_t = btf__type_by_id(d->btf, next_id);
multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY;
/* we need space if we have named non-pointer */
if (fname[0] && !last_was_ptr)
btf_dump_printf(d, " ");
/* no parentheses for multi-dimensional array */
if (!multidim)
btf_dump_printf(d, "(");
btf_dump_emit_type_chain(d, decls, fname, lvl);
if (!multidim)
btf_dump_printf(d, ")");
btf_dump_printf(d, "[%u]", a->nelems);
return;
}
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = (void *)(t + 1);
__u16 vlen = btf_vlen_of(t);
int i;
btf_dump_emit_mods(d, decls);
if (decls->cnt) {
btf_dump_printf(d, " (");
btf_dump_emit_type_chain(d, decls, fname, lvl);
btf_dump_printf(d, ")");
} else {
btf_dump_emit_name(d, fname, last_was_ptr);
}
btf_dump_printf(d, "(");
/*
* Clang for BPF target generates func_proto with no
* args as a func_proto with a single void arg (e.g.,
* `int (*f)(void)` vs just `int (*f)()`). We are
* going to pretend there are no args for such case.
*/
if (vlen == 1 && p->type == 0) {
btf_dump_printf(d, ")");
return;
}
for (i = 0; i < vlen; i++, p++) {
if (i > 0)
btf_dump_printf(d, ", ");
/* last arg of type void is vararg */
if (i == vlen - 1 && p->type == 0) {
btf_dump_printf(d, "...");
break;
}
name = btf_name_of(d, p->name_off);
btf_dump_emit_type_decl(d, p->type, name, lvl);
}
btf_dump_printf(d, ")");
return;
}
default:
pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
kind, id);
return;
}
last_was_ptr = kind == BTF_KIND_PTR;
}
btf_dump_emit_name(d, fname, last_was_ptr);
}
/* return number of duplicates (occurrences) of a given name */
static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
const char *orig_name)
{
size_t dup_cnt = 0;
hashmap__find(name_map, orig_name, (void **)&dup_cnt);
dup_cnt++;
hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
return dup_cnt;
}
static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
struct hashmap *name_map)
{
struct btf_dump_type_aux_state *s = &d->type_states[id];
const struct btf_type *t = btf__type_by_id(d->btf, id);
const char *orig_name = btf_name_of(d, t->name_off);
const char **cached_name = &d->cached_names[id];
size_t dup_cnt;
if (t->name_off == 0)
return "";
if (s->name_resolved)
return *cached_name ? *cached_name : orig_name;
dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
if (dup_cnt > 1) {
const size_t max_len = 256;
char new_name[max_len];
snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
*cached_name = strdup(new_name);
}
s->name_resolved = 1;
return *cached_name ? *cached_name : orig_name;
}
static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
{
return btf_dump_resolve_name(d, id, d->type_names);
}
static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
{
return btf_dump_resolve_name(d, id, d->ident_names);
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Generic non-thread safe hash map implementation.
*
* Copyright (c) 2019 Facebook
*/
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <linux/err.h>
#include "hashmap.h"
/* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2
static void hashmap_add_entry(struct hashmap_entry **pprev,
struct hashmap_entry *entry)
{
entry->next = *pprev;
*pprev = entry;
}
static void hashmap_del_entry(struct hashmap_entry **pprev,
struct hashmap_entry *entry)
{
*pprev = entry->next;
entry->next = NULL;
}
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn, void *ctx)
{
map->hash_fn = hash_fn;
map->equal_fn = equal_fn;
map->ctx = ctx;
map->buckets = NULL;
map->cap = 0;
map->cap_bits = 0;
map->sz = 0;
}
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn,
void *ctx)
{
struct hashmap *map = malloc(sizeof(struct hashmap));
if (!map)
return ERR_PTR(-ENOMEM);
hashmap__init(map, hash_fn, equal_fn, ctx);
return map;
}
void hashmap__clear(struct hashmap *map)
{
free(map->buckets);
map->cap = map->cap_bits = map->sz = 0;
}
void hashmap__free(struct hashmap *map)
{
if (!map)
return;
hashmap__clear(map);
free(map);
}
size_t hashmap__size(const struct hashmap *map)
{
return map->sz;
}
size_t hashmap__capacity(const struct hashmap *map)
{
return map->cap;
}
static bool hashmap_needs_to_grow(struct hashmap *map)
{
/* grow if empty or more than 75% filled */
return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
}
static int hashmap_grow(struct hashmap *map)
{
struct hashmap_entry **new_buckets;
struct hashmap_entry *cur, *tmp;
size_t new_cap_bits, new_cap;
size_t h;
int bkt;
new_cap_bits = map->cap_bits + 1;
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
new_cap_bits = HASHMAP_MIN_CAP_BITS;
new_cap = 1UL << new_cap_bits;
new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
if (!new_buckets)
return -ENOMEM;
hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
hashmap_add_entry(&new_buckets[h], cur);
}
map->cap = new_cap;
map->cap_bits = new_cap_bits;
free(map->buckets);
map->buckets = new_buckets;
return 0;
}
static bool hashmap_find_entry(const struct hashmap *map,
const void *key, size_t hash,
struct hashmap_entry ***pprev,
struct hashmap_entry **entry)
{
struct hashmap_entry *cur, **prev_ptr;
if (!map->buckets)
return false;
for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
cur;
prev_ptr = &cur->next, cur = cur->next) {
if (map->equal_fn(cur->key, key, map->ctx)) {
if (pprev)
*pprev = prev_ptr;
*entry = cur;
return true;
}
}
return false;
}
int hashmap__insert(struct hashmap *map, const void *key, void *value,
enum hashmap_insert_strategy strategy,
const void **old_key, void **old_value)
{
struct hashmap_entry *entry;
size_t h;
int err;
if (old_key)
*old_key = NULL;
if (old_value)
*old_value = NULL;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (strategy != HASHMAP_APPEND &&
hashmap_find_entry(map, key, h, NULL, &entry)) {
if (old_key)
*old_key = entry->key;
if (old_value)
*old_value = entry->value;
if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
entry->key = key;
entry->value = value;
return 0;
} else if (strategy == HASHMAP_ADD) {
return -EEXIST;
}
}
if (strategy == HASHMAP_UPDATE)
return -ENOENT;
if (hashmap_needs_to_grow(map)) {
err = hashmap_grow(map);
if (err)
return err;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
}
entry = malloc(sizeof(struct hashmap_entry));
if (!entry)
return -ENOMEM;
entry->key = key;
entry->value = value;
hashmap_add_entry(&map->buckets[h], entry);
map->sz++;
return 0;
}
bool hashmap__find(const struct hashmap *map, const void *key, void **value)
{
struct hashmap_entry *entry;
size_t h;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (!hashmap_find_entry(map, key, h, NULL, &entry))
return false;
if (value)
*value = entry->value;
return true;
}
bool hashmap__delete(struct hashmap *map, const void *key,
const void **old_key, void **old_value)
{
struct hashmap_entry **pprev, *entry;
size_t h;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (!hashmap_find_entry(map, key, h, &pprev, &entry))
return false;
if (old_key)
*old_key = entry->key;
if (old_value)
*old_value = entry->value;
hashmap_del_entry(pprev, entry);
free(entry);
map->sz--;
return true;
}
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
* Generic non-thread safe hash map implementation.
*
* Copyright (c) 2019 Facebook
*/
#ifndef __LIBBPF_HASHMAP_H
#define __LIBBPF_HASHMAP_H
#include <stdbool.h>
#include <stddef.h>
#include "libbpf_internal.h"
static inline size_t hash_bits(size_t h, int bits)
{
/* shuffle bits and return requested number of upper bits */
return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
}
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
struct hashmap_entry {
const void *key;
void *value;
struct hashmap_entry *next;
};
struct hashmap {
hashmap_hash_fn hash_fn;
hashmap_equal_fn equal_fn;
void *ctx;
struct hashmap_entry **buckets;
size_t cap;
size_t cap_bits;
size_t sz;
};
#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
.hash_fn = (hash_fn), \
.equal_fn = (equal_fn), \
.ctx = (ctx), \
.buckets = NULL, \
.cap = 0, \
.cap_bits = 0, \
.sz = 0, \
}
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn, void *ctx);
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn,
void *ctx);
void hashmap__clear(struct hashmap *map);
void hashmap__free(struct hashmap *map);
size_t hashmap__size(const struct hashmap *map);
size_t hashmap__capacity(const struct hashmap *map);
/*
* Hashmap insertion strategy:
* - HASHMAP_ADD - only add key/value if key doesn't exist yet;
* - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
* update value;
* - HASHMAP_UPDATE - update value, if key already exists; otherwise, do
* nothing and return -ENOENT;
* - HASHMAP_APPEND - always add key/value pair, even if key already exists.
* This turns hashmap into a multimap by allowing multiple values to be
* associated with the same key. Most useful read API for such hashmap is
* hashmap__for_each_key_entry() iteration. If hashmap__find() is still
* used, it will return last inserted key/value entry (first in a bucket
* chain).
*/
enum hashmap_insert_strategy {
HASHMAP_ADD,
HASHMAP_SET,
HASHMAP_UPDATE,
HASHMAP_APPEND,
};
/*
* hashmap__insert() adds key/value entry w/ various semantics, depending on
* provided strategy value. If a given key/value pair replaced already
* existing key/value pair, both old key and old value will be returned
* through old_key and old_value to allow calling code do proper memory
* management.
*/
int hashmap__insert(struct hashmap *map, const void *key, void *value,
enum hashmap_insert_strategy strategy,
const void **old_key, void **old_value);
static inline int hashmap__add(struct hashmap *map,
const void *key, void *value)
{
return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
}
static inline int hashmap__set(struct hashmap *map,
const void *key, void *value,
const void **old_key, void **old_value)
{
return hashmap__insert(map, key, value, HASHMAP_SET,
old_key, old_value);
}
static inline int hashmap__update(struct hashmap *map,
const void *key, void *value,
const void **old_key, void **old_value)
{
return hashmap__insert(map, key, value, HASHMAP_UPDATE,
old_key, old_value);
}
static inline int hashmap__append(struct hashmap *map,
const void *key, void *value)
{
return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
}
bool hashmap__delete(struct hashmap *map, const void *key,
const void **old_key, void **old_value);
bool hashmap__find(const struct hashmap *map, const void *key, void **value);
/*
* hashmap__for_each_entry - iterate over all entries in hashmap
* @map: hashmap to iterate
* @cur: struct hashmap_entry * used as a loop cursor
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry(map, cur, bkt) \
for (bkt = 0; bkt < map->cap; bkt++) \
for (cur = map->buckets[bkt]; cur; cur = cur->next)
/*
* hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
* against removals
* @map: hashmap to iterate
* @cur: struct hashmap_entry * used as a loop cursor
* @tmp: struct hashmap_entry * used as a temporary next cursor storage
* @bkt: integer used as a bucket loop cursor
*/
#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
for (bkt = 0; bkt < map->cap; bkt++) \
for (cur = map->buckets[bkt]; \
cur && ({tmp = cur->next; true; }); \
cur = tmp)
/*
* hashmap__for_each_key_entry - iterate over entries associated with given key
* @map: hashmap to iterate
* @cur: struct hashmap_entry * used as a loop cursor
* @key: key to iterate entries for
*/
#define hashmap__for_each_key_entry(map, cur, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
map->cap_bits); \
map->buckets ? map->buckets[bkt] : NULL; }); \
cur; \
cur = cur->next) \
if (map->equal_fn(cur->key, (_key), map->ctx))
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
map->cap_bits); \
cur = map->buckets ? map->buckets[bkt] : NULL; }); \
cur && ({ tmp = cur->next; true; }); \
cur = tmp) \
if (map->equal_fn(cur->key, (_key), map->ctx))
#endif /* __LIBBPF_HASHMAP_H */
......@@ -164,3 +164,11 @@ LIBBPF_0.0.3 {
bpf_map_freeze;
btf__finalize_data;
} LIBBPF_0.0.2;
LIBBPF_0.0.4 {
global:
btf_dump__dump_type;
btf_dump__free;
btf_dump__new;
btf__parse_elf;
} LIBBPF_0.0.3;
......@@ -9,6 +9,8 @@
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
#include "libbpf.h"
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
......
......@@ -35,3 +35,5 @@ test_sysctl
alu32
libbpf.pc
libbpf.so.*
test_hashmap
test_btf_dump
......@@ -23,7 +23,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \
test_btf_dump
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
TEST_GEN_FILES = $(BPF_OBJ_FILES)
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for bitfield.
*
* Copyright (c) 2019 Facebook
*/
#include <stdbool.h>
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfields_only_mixed_types {
* int a: 3;
* long int b: 2;
* _Bool c: 1;
* enum {
* A = 0,
* B = 1,
* } d: 1;
* short e: 5;
* int: 20;
* unsigned int f: 30;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfields_only_mixed_types {
int a: 3;
long int b: 2;
bool c: 1; /* it's really a _Bool type */
enum {
A, /* A = 0, dumper is very explicit */
B, /* B = 1, same */
} d: 1;
short e: 5;
/* 20-bit padding here */
unsigned f: 30; /* this gets aligned on 4-byte boundary */
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfield_mixed_with_others {
* char: 4;
* int a: 4;
* short b;
* long int c;
* long int d: 8;
* int e;
* int f;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_mixed_with_others {
long: 4; /* char is enough as a backing field */
int a: 4;
/* 8-bit implicit padding */
short b; /* combined with previous bitfield */
/* 4 more bytes of implicit padding */
long c;
long d: 8;
/* 24 bits implicit padding */
int e; /* combined with previous bitfield */
int f;
/* 4 bytes of padding */
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct bitfield_flushed {
* int a: 4;
* long: 60;
* long int b: 16;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct bitfield_flushed {
int a: 4;
long: 0; /* flush until next natural alignment boundary */
long b: 16;
};
int f(struct {
struct bitfields_only_mixed_types _1;
struct bitfield_mixed_with_others _2;
struct bitfield_flushed _3;
} *_)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for multi-dimensional array output.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
typedef int arr_t[2];
typedef int multiarr_t[3][4][5];
typedef int *ptr_arr_t[6];
typedef int *ptr_multiarr_t[7][8][9][10];
typedef int * (*fn_ptr_arr_t[11])();
typedef int * (*fn_ptr_multiarr_t[12][13])();
struct root_struct {
arr_t _1;
multiarr_t _2;
ptr_arr_t _3;
ptr_multiarr_t _4;
fn_ptr_arr_t _5;
fn_ptr_multiarr_t _6;
};
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *s)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test validating no name versioning happens between
* independent C namespaces (struct/union/enum vs typedef/enum values).
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct S {
int S;
int U;
};
typedef struct S S;
union U {
int S;
int U;
};
typedef union U U;
enum E {
V = 0,
};
typedef enum E E;
struct A {};
union B {};
enum C {
A = 1,
B = 2,
C = 3,
};
struct X {};
union Y {};
enum Z;
typedef int X;
typedef int Y;
typedef int Z;
/*------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct S _1;
S _2;
union U _3;
U _4;
enum E _5;
E _6;
struct A a;
union B b;
enum C c;
struct X x;
union Y y;
enum Z *z;
X xx;
Y yy;
Z zz;
} *_)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for topological sorting of dependent structs.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct s1 {};
struct s3;
struct s4;
struct s2 {
struct s2 *s2;
struct s3 *s3;
struct s4 *s4;
};
struct s3 {
struct s1 s1;
struct s2 s2;
};
struct s4 {
struct s1 s1;
struct s3 s3;
};
struct list_head {
struct list_head *next;
struct list_head *prev;
};
struct hlist_node {
struct hlist_node *next;
struct hlist_node **pprev;
};
struct hlist_head {
struct hlist_node *first;
};
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *);
};
struct root_struct {
struct s4 s4;
struct list_head l;
struct hlist_node n;
struct hlist_head h;
struct callback_head cb;
};
/*------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *root)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for struct packing determination.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct packed_trailing_space {
int a;
short b;
} __attribute__((packed));
struct non_packed_trailing_space {
int a;
short b;
};
struct packed_fields {
short a;
int b;
} __attribute__((packed));
struct non_packed_fields {
short a;
int b;
};
struct nested_packed {
char: 4;
int a: 4;
long int b;
struct {
char c;
int d;
} __attribute__((packed)) e;
} __attribute__((packed));
union union_is_never_packed {
int a: 4;
char b;
char c: 1;
};
union union_does_not_need_packing {
struct {
long int a;
int b;
} __attribute__((packed));
int c;
};
union jump_code_union {
char code[5];
struct {
char jump;
int offset;
} __attribute__((packed));
};
/*------ END-EXPECTED-OUTPUT ------ */
int f(struct {
struct packed_trailing_space _1;
struct non_packed_trailing_space _2;
struct packed_fields _3;
struct non_packed_fields _4;
struct nested_packed _5;
union union_is_never_packed _6;
union union_does_not_need_packing _7;
union jump_code_union _8;
} *_)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper tests for implicit and explicit padding between fields and
* at the end of a struct.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
struct padded_implicitly {
int a;
long int b;
char c;
};
/* ------ END-EXPECTED-OUTPUT ------ */
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_explicitly {
* int a;
* int: 32;
* int b;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padded_explicitly {
int a;
int: 1; /* algo will explicitly pad with full 32 bits here */
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_a_lot {
* int a;
* long: 32;
* long: 64;
* long: 64;
* int b;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padded_a_lot {
int a;
/* 32 bit of implicit padding here, which algo will make explicit */
long: 64;
long: 64;
int b;
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct padded_cache_line {
* int a;
* long: 32;
* long: 64;
* long: 64;
* long: 64;
* int b;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct padded_cache_line {
int a;
int b __attribute__((aligned(32)));
};
/* ----- START-EXPECTED-OUTPUT ----- */
/*
*struct zone_padding {
* char x[0];
*};
*
*struct zone {
* int a;
* short b;
* short: 16;
* struct zone_padding __pad__;
*};
*
*/
/* ------ END-EXPECTED-OUTPUT ------ */
struct zone_padding {
char x[0];
} __attribute__((__aligned__(8)));
struct zone {
int a;
short b;
short: 16;
struct zone_padding __pad__;
};
int f(struct {
struct padded_implicitly _1;
struct padded_explicitly _2;
struct padded_a_lot _3;
struct padded_cache_line _4;
struct zone _5;
} *_)
{
return 0;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* BTF-to-C dumper test for majority of C syntax quirks.
*
* Copyright (c) 2019 Facebook
*/
/* ----- START-EXPECTED-OUTPUT ----- */
enum e1 {
A = 0,
B = 1,
};
enum e2 {
C = 100,
D = -100,
E = 0,
};
typedef enum e2 e2_t;
typedef enum {
F = 0,
G = 1,
H = 2,
} e3_t;
typedef int int_t;
typedef volatile const int * volatile const crazy_ptr_t;
typedef int *****we_need_to_go_deeper_ptr_t;
typedef volatile const we_need_to_go_deeper_ptr_t * restrict * volatile * const * restrict volatile * restrict const * volatile const * restrict volatile const how_about_this_ptr_t;
typedef int *ptr_arr_t[10];
typedef void (*fn_ptr1_t)(int);
typedef void (*printf_fn_t)(const char *, ...);
/* ------ END-EXPECTED-OUTPUT ------ */
/*
* While previous function pointers are pretty trivial (C-syntax-level
* trivial), the following are deciphered here for future generations:
*
* - `fn_ptr2_t`: function, taking anonymous struct as a first arg and pointer
* to a function, that takes int and returns int, as a second arg; returning
* a pointer to a const pointer to a char. Equivalent to:
* typedef struct { int a; } s_t;
* typedef int (*fn_t)(int);
* typedef char * const * (*fn_ptr2_t)(s_t, fn_t);
*
* - `fn_complext_t`: pointer to a function returning struct and accepting
* union and struct. All structs and enum are anonymous and defined inline.
*
* - `signal_t: pointer to a function accepting a pointer to a function as an
* argument and returning pointer to a function as a result. Sane equivalent:
* typedef void (*signal_handler_t)(int);
* typedef signal_handler_t (*signal_ptr_t)(int, signal_handler_t);
*
* - fn_ptr_arr1_t: array of pointers to a function accepting pointer to
* a pointer to an int and returning pointer to a char. Easy.
*
* - fn_ptr_arr2_t: array of const pointers to a function taking no arguments
* and returning a const pointer to a function, that takes pointer to a
* `int -> char *` function and returns pointer to a char. Equivalent:
* typedef char * (*fn_input_t)(int);
* typedef char * (*fn_output_outer_t)(fn_input_t);
* typedef const fn_output_outer_t (* fn_output_inner_t)();
* typedef const fn_output_inner_t fn_ptr_arr2_t[5];
*/
/* ----- START-EXPECTED-OUTPUT ----- */
typedef char * const * (*fn_ptr2_t)(struct {
int a;
}, int (*)(int));
typedef struct {
int a;
void (*b)(int, struct {
int c;
}, union {
char d;
int e[5];
});
} (*fn_complex_t)(union {
void *f;
char g[16];
}, struct {
int h;
});
typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
struct struct_w_typedefs {
int_t a;
crazy_ptr_t b;
we_need_to_go_deeper_ptr_t c;
how_about_this_ptr_t d;
ptr_arr_t e;
fn_ptr1_t f;
printf_fn_t g;
fn_ptr2_t h;
fn_complex_t i;
signal_t j;
fn_ptr_arr1_t k;
fn_ptr_arr2_t l;
};
typedef struct {
int x;
int y;
int z;
} anon_struct_t;
struct struct_fwd;
typedef struct struct_fwd struct_fwd_t;
typedef struct struct_fwd *struct_fwd_ptr_t;
union union_fwd;
typedef union union_fwd union_fwd_t;
typedef union union_fwd *union_fwd_ptr_t;
struct struct_empty {};
struct struct_simple {
int a;
char b;
const int_t *p;
struct struct_empty s;
enum e2 e;
enum {
ANON_VAL1 = 1,
ANON_VAL2 = 2,
} f;
int arr1[13];
enum e2 arr2[5];
};
union union_empty {};
union union_simple {
void *ptr;
int num;
int_t num2;
union union_empty u;
};
struct struct_in_struct {
struct struct_simple simple;
union union_simple also_simple;
struct {
int a;
} not_so_hard_as_well;
union {
int b;
int c;
} anon_union_is_good;
struct {
int d;
int e;
};
union {
int f;
int g;
};
};
struct struct_with_embedded_stuff {
int a;
struct {
int b;
struct {
struct struct_with_embedded_stuff *c;
const char *d;
} e;
union {
volatile long int f;
void * restrict g;
};
};
union {
const int_t *h;
void (*i)(char, int, void *);
} j;
enum {
K = 100,
L = 200,
} m;
char n[16];
struct {
char o;
int p;
void (*q)(int);
} r[5];
struct struct_in_struct s[10];
int t[11];
};
struct root_struct {
enum e1 _1;
enum e2 _2;
e2_t _2_1;
e3_t _2_2;
struct struct_w_typedefs _3;
anon_struct_t _7;
struct struct_fwd *_8;
struct_fwd_t *_9;
struct_fwd_ptr_t _10;
union union_fwd *_11;
union_fwd_t *_12;
union_fwd_ptr_t _13;
struct struct_with_embedded_stuff _14;
};
/* ------ END-EXPECTED-OUTPUT ------ */
int f(struct root_struct *s)
{
return 0;
}
......@@ -4025,62 +4025,13 @@ static struct btf_file_test file_tests[] = {
},
};
static int file_has_btf_elf(const char *fn, bool *has_btf_ext)
{
Elf_Scn *scn = NULL;
GElf_Ehdr ehdr;
int ret = 0;
int elf_fd;
Elf *elf;
if (CHECK(elf_version(EV_CURRENT) == EV_NONE,
"elf_version(EV_CURRENT) == EV_NONE"))
return -1;
elf_fd = open(fn, O_RDONLY);
if (CHECK(elf_fd == -1, "open(%s): errno:%d", fn, errno))
return -1;
elf = elf_begin(elf_fd, ELF_C_READ, NULL);
if (CHECK(!elf, "elf_begin(%s): %s", fn, elf_errmsg(elf_errno()))) {
ret = -1;
goto done;
}
if (CHECK(!gelf_getehdr(elf, &ehdr), "!gelf_getehdr(%s)", fn)) {
ret = -1;
goto done;
}
while ((scn = elf_nextscn(elf, scn))) {
const char *sh_name;
GElf_Shdr sh;
if (CHECK(gelf_getshdr(scn, &sh) != &sh,
"file:%s gelf_getshdr != &sh", fn)) {
ret = -1;
goto done;
}
sh_name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
if (!strcmp(sh_name, BTF_ELF_SEC))
ret = 1;
if (!strcmp(sh_name, BTF_EXT_ELF_SEC))
*has_btf_ext = true;
}
done:
close(elf_fd);
elf_end(elf);
return ret;
}
static int do_test_file(unsigned int test_num)
{
const struct btf_file_test *test = &file_tests[test_num - 1];
const char *expected_fnames[] = {"_dummy_tracepoint",
"test_long_fname_1",
"test_long_fname_2"};
struct btf_ext *btf_ext = NULL;
struct bpf_prog_info info = {};
struct bpf_object *obj = NULL;
struct bpf_func_info *finfo;
......@@ -4095,15 +4046,19 @@ static int do_test_file(unsigned int test_num)
fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num,
test->file);
err = file_has_btf_elf(test->file, &has_btf_ext);
if (err == -1)
return err;
if (err == 0) {
btf = btf__parse_elf(test->file, &btf_ext);
if (IS_ERR(btf)) {
if (PTR_ERR(btf) == -ENOENT) {
fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC);
skip_cnt++;
return 0;
}
return PTR_ERR(btf);
}
btf__free(btf);
has_btf_ext = btf_ext != NULL;
btf_ext__free(btf_ext);
obj = bpf_object__open(test->file);
if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
......
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <linux/err.h>
#include <btf.h>
#define CHECK(condition, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
fprintf(stderr, format); \
} \
__ret; \
})
void btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vfprintf(ctx, fmt, args);
}
struct btf_dump_test_case {
const char *name;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
{.name = "btf_dump_test_case_syntax", .opts = {}},
{.name = "btf_dump_test_case_ordering", .opts = {}},
{.name = "btf_dump_test_case_padding", .opts = {}},
{.name = "btf_dump_test_case_packing", .opts = {}},
{.name = "btf_dump_test_case_bitfields", .opts = {}},
{.name = "btf_dump_test_case_multidim", .opts = {}},
{.name = "btf_dump_test_case_namespacing", .opts = {}},
};
static int btf_dump_all_types(const struct btf *btf,
const struct btf_dump_opts *opts)
{
size_t type_cnt = btf__get_nr_types(btf);
struct btf_dump *d;
int err = 0, id;
d = btf_dump__new(btf, NULL, opts, btf_dump_printf);
if (IS_ERR(d))
return PTR_ERR(d);
for (id = 1; id <= type_cnt; id++) {
err = btf_dump__dump_type(d, id);
if (err)
goto done;
}
done:
btf_dump__free(d);
return err;
}
int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
{
char test_file[256], out_file[256], diff_cmd[1024];
struct btf *btf = NULL;
int err = 0, fd = -1;
FILE *f = NULL;
fprintf(stderr, "Test case #%d (%s): ", n, test_case->name);
snprintf(test_file, sizeof(test_file), "%s.o", test_case->name);
btf = btf__parse_elf(test_file, NULL);
if (CHECK(IS_ERR(btf),
"failed to load test BTF: %ld\n", PTR_ERR(btf))) {
err = -PTR_ERR(btf);
btf = NULL;
goto done;
}
snprintf(out_file, sizeof(out_file),
"/tmp/%s.output.XXXXXX", test_case->name);
fd = mkstemp(out_file);
if (CHECK(fd < 0, "failed to create temp output file: %d\n", fd)) {
err = fd;
goto done;
}
f = fdopen(fd, "w");
if (CHECK(f == NULL, "failed to open temp output file: %s(%d)\n",
strerror(errno), errno)) {
close(fd);
goto done;
}
test_case->opts.ctx = f;
err = btf_dump_all_types(btf, &test_case->opts);
fclose(f);
close(fd);
if (CHECK(err, "failure during C dumping: %d\n", err)) {
goto done;
}
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
/*
* Diff test output and expected test output, contained between
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
* For expected output lines, everything before '*' is stripped out.
* Also lines containing comment start and comment end markers are
* ignored.
*/
snprintf(diff_cmd, sizeof(diff_cmd),
"awk '/START-EXPECTED-OUTPUT/{out=1;next} "
"/END-EXPECTED-OUTPUT/{out=0} "
"/\\/\\*|\\*\\//{next} " /* ignore comment start/end lines */
"out {sub(/^[ \\t]*\\*/, \"\"); print}' '%s' | diff -u - '%s'",
test_file, out_file);
err = system(diff_cmd);
if (CHECK(err,
"differing test output, output=%s, err=%d, diff cmd:\n%s\n",
out_file, err, diff_cmd))
goto done;
remove(out_file);
fprintf(stderr, "OK\n");
done:
btf__free(btf);
return err;
}
int main() {
int test_case_cnt, i, err, failed = 0;
test_case_cnt = sizeof(btf_dump_test_cases) /
sizeof(btf_dump_test_cases[0]);
for (i = 0; i < test_case_cnt; i++) {
err = test_btf_dump_case(i, &btf_dump_test_cases[i]);
if (err)
failed++;
}
fprintf(stderr, "%d tests succeeded, %d tests failed.\n",
test_case_cnt - failed, failed);
return failed;
}
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/*
* Tests for libbpf's hashmap.
*
* Copyright (c) 2019 Facebook
*/
#include <stdio.h>
#include <errno.h>
#include <linux/err.h>
#include "hashmap.h"
#define CHECK(condition, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
fprintf(stderr, format); \
} \
__ret; \
})
size_t hash_fn(const void *k, void *ctx)
{
return (long)k;
}
bool equal_fn(const void *a, const void *b, void *ctx)
{
return (long)a == (long)b;
}
static inline size_t next_pow_2(size_t n)
{
size_t r = 1;
while (r < n)
r <<= 1;
return r;
}
static inline size_t exp_cap(size_t sz)
{
size_t r = next_pow_2(sz);
if (sz * 4 / 3 > r)
r <<= 1;
return r;
}
#define ELEM_CNT 62
int test_hashmap_generic(void)
{
struct hashmap_entry *entry, *tmp;
int err, bkt, found_cnt, i;
long long found_msk;
struct hashmap *map;
fprintf(stderr, "%s: ", __func__);
map = hashmap__new(hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
for (i = 0; i < ELEM_CNT; i++) {
const void *oldk, *k = (const void *)(long)i;
void *oldv, *v = (void *)(long)(1024 + i);
err = hashmap__update(map, k, v, &oldk, &oldv);
if (CHECK(err != -ENOENT, "unexpected result: %d\n", err))
return 1;
if (i % 2) {
err = hashmap__add(map, k, v);
} else {
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(oldk != NULL || oldv != NULL,
"unexpected k/v: %p=%p\n", oldk, oldv))
return 1;
}
if (CHECK(err, "failed to add k/v %ld = %ld: %d\n",
(long)k, (long)v, err))
return 1;
if (CHECK(!hashmap__find(map, k, &oldv),
"failed to find key %ld\n", (long)k))
return 1;
if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv))
return 1;
}
if (CHECK(hashmap__size(map) != ELEM_CNT,
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
long k = (long)entry->key;
long v = (long)entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 1024, "invalid k/v pair: %ld = %ld\n", k, v))
return 1;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1,
"not all keys iterated: %llx\n", found_msk))
return 1;
for (i = 0; i < ELEM_CNT; i++) {
const void *oldk, *k = (const void *)(long)i;
void *oldv, *v = (void *)(long)(256 + i);
err = hashmap__add(map, k, v);
if (CHECK(err != -EEXIST, "unexpected add result: %d\n", err))
return 1;
if (i % 2)
err = hashmap__update(map, k, v, &oldk, &oldv);
else
err = hashmap__set(map, k, v, &oldk, &oldv);
if (CHECK(err, "failed to update k/v %ld = %ld: %d\n",
(long)k, (long)v, err))
return 1;
if (CHECK(!hashmap__find(map, k, &oldv),
"failed to find key %ld\n", (long)k))
return 1;
if (CHECK(oldv != v, "found value is wrong: %ld\n", (long)oldv))
return 1;
}
if (CHECK(hashmap__size(map) != ELEM_CNT,
"invalid updated map size: %zu\n", hashmap__size(map)))
return 1;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
found_msk = 0;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
long k = (long)entry->key;
long v = (long)entry->value;
found_msk |= 1ULL << k;
if (CHECK(v - k != 256,
"invalid updated k/v pair: %ld = %ld\n", k, v))
return 1;
}
if (CHECK(found_msk != (1ULL << ELEM_CNT) - 1,
"not all keys iterated after update: %llx\n", found_msk))
return 1;
found_cnt = 0;
hashmap__for_each_key_entry(map, entry, (void *)0) {
found_cnt++;
}
if (CHECK(!found_cnt, "didn't find any entries for key 0\n"))
return 1;
found_msk = 0;
found_cnt = 0;
hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) {
const void *oldk, *k;
void *oldv, *v;
k = entry->key;
v = entry->value;
found_cnt++;
found_msk |= 1ULL << (long)k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv),
"failed to delete k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
if (CHECK(oldk != k || oldv != v,
"invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n",
(long)k, (long)v, (long)oldk, (long)oldv))
return 1;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv),
"unexpectedly deleted k/v %ld = %ld\n",
(long)oldk, (long)oldv))
return 1;
}
if (CHECK(!found_cnt || !found_msk,
"didn't delete any key entries\n"))
return 1;
if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt,
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
return 1;
if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)),
"unexpected map capacity: %zu\n", hashmap__capacity(map)))
return 1;
hashmap__for_each_entry_safe(map, entry, tmp, bkt) {
const void *oldk, *k;
void *oldv, *v;
k = entry->key;
v = entry->value;
found_cnt++;
found_msk |= 1ULL << (long)k;
if (CHECK(!hashmap__delete(map, k, &oldk, &oldv),
"failed to delete k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
if (CHECK(oldk != k || oldv != v,
"invalid old k/v: expect %ld = %ld, got %ld = %ld\n",
(long)k, (long)v, (long)oldk, (long)oldv))
return 1;
if (CHECK(hashmap__delete(map, k, &oldk, &oldv),
"unexpectedly deleted k/v %ld = %ld\n",
(long)k, (long)v))
return 1;
}
if (CHECK(found_cnt != ELEM_CNT || found_msk != (1ULL << ELEM_CNT) - 1,
"not all keys were deleted: found_cnt:%d, found_msk:%llx\n",
found_cnt, found_msk))
return 1;
if (CHECK(hashmap__size(map) != 0,
"invalid updated map size (already deleted: %d): %zu\n",
found_cnt, hashmap__size(map)))
return 1;
found_cnt = 0;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
return 1;
}
hashmap__free(map);
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected map entries left: %ld = %ld\n",
(long)entry->key, (long)entry->value);
return 1;
}
fprintf(stderr, "OK\n");
return 0;
}
size_t collision_hash_fn(const void *k, void *ctx)
{
return 0;
}
int test_hashmap_multimap(void)
{
void *k1 = (void *)0, *k2 = (void *)1;
struct hashmap_entry *entry;
struct hashmap *map;
long found_msk;
int err, bkt;
fprintf(stderr, "%s: ", __func__);
/* force collisions */
map = hashmap__new(collision_hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
/* set up multimap:
* [0] -> 1, 2, 4;
* [1] -> 8, 16, 32;
*/
err = hashmap__append(map, k1, (void *)1);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
err = hashmap__append(map, k1, (void *)2);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
err = hashmap__append(map, k1, (void *)4);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
err = hashmap__append(map, k2, (void *)8);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
err = hashmap__append(map, k2, (void *)16);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
err = hashmap__append(map, k2, (void *)32);
if (CHECK(err, "failed to add k/v: %d\n", err))
return 1;
if (CHECK(hashmap__size(map) != 6,
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
/* verify global iteration still works and sees all values */
found_msk = 0;
hashmap__for_each_entry(map, entry, bkt) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (1 << 6) - 1,
"not all keys iterated: %lx\n", found_msk))
return 1;
/* iterate values for key 1 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k1) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (1 | 2 | 4),
"invalid k1 values: %lx\n", found_msk))
return 1;
/* iterate values for key 2 */
found_msk = 0;
hashmap__for_each_key_entry(map, entry, k2) {
found_msk |= (long)entry->value;
}
if (CHECK(found_msk != (8 | 16 | 32),
"invalid k2 values: %lx\n", found_msk))
return 1;
fprintf(stderr, "OK\n");
return 0;
}
int test_hashmap_empty()
{
struct hashmap_entry *entry;
int bkt;
struct hashmap *map;
void *k = (void *)0;
fprintf(stderr, "%s: ", __func__);
/* force collisions */
map = hashmap__new(hash_fn, equal_fn, NULL);
if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map)))
return 1;
if (CHECK(hashmap__size(map) != 0,
"invalid map size: %zu\n", hashmap__size(map)))
return 1;
if (CHECK(hashmap__capacity(map) != 0,
"invalid map capacity: %zu\n", hashmap__capacity(map)))
return 1;
if (CHECK(hashmap__find(map, k, NULL), "unexpected find\n"))
return 1;
if (CHECK(hashmap__delete(map, k, NULL, NULL), "unexpected delete\n"))
return 1;
hashmap__for_each_entry(map, entry, bkt) {
CHECK(false, "unexpected iterated entry\n");
return 1;
}
hashmap__for_each_key_entry(map, entry, k) {
CHECK(false, "unexpected key entry\n");
return 1;
}
fprintf(stderr, "OK\n");
return 0;
}
int main(int argc, char **argv)
{
bool failed = false;
if (test_hashmap_generic())
failed = true;
if (test_hashmap_multimap())
failed = true;
if (test_hashmap_empty())
failed = true;
return failed;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment