Commit 9c6c5c48 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

libbpf: Make btf_dump work with modifiable BTF

Ensure that btf_dump can accommodate new BTF types being appended to BTF
instance after struct btf_dump was created. This came up during attemp to
use btf_dump for raw type dumping in selftests, but given changes are not
excessive, it's good to not have any gotchas in API usage, so I decided to
support such use case in general.
Signed-off-by: default avatarAndrii Nakryiko <andriin@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200929232843.1249318-2-andriin@fb.com
parent ea7da1d5
...@@ -146,6 +146,23 @@ void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, ...@@ -146,6 +146,23 @@ void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
return new_data + cur_cnt * elem_sz; return new_data + cur_cnt * elem_sz;
} }
/* Ensure given dynamically allocated memory region has enough allocated space
* to accommodate *need_cnt* elements of size *elem_sz* bytes each
*/
int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
{
void *p;
if (need_cnt <= *cap_cnt)
return 0;
p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
if (!p)
return -ENOMEM;
return 0;
}
static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off) static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
{ {
__u32 *p; __u32 *p;
......
...@@ -60,11 +60,14 @@ struct btf_dump { ...@@ -60,11 +60,14 @@ struct btf_dump {
struct btf_dump_opts opts; struct btf_dump_opts opts;
int ptr_sz; int ptr_sz;
bool strip_mods; bool strip_mods;
int last_id;
/* per-type auxiliary state */ /* per-type auxiliary state */
struct btf_dump_type_aux_state *type_states; struct btf_dump_type_aux_state *type_states;
size_t type_states_cap;
/* per-type optional cached unique name, must be freed, if present */ /* per-type optional cached unique name, must be freed, if present */
const char **cached_names; const char **cached_names;
size_t cached_names_cap;
/* topo-sorted list of dependent type definitions */ /* topo-sorted list of dependent type definitions */
__u32 *emit_queue; __u32 *emit_queue;
...@@ -113,6 +116,7 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...) ...@@ -113,6 +116,7 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
} }
static int btf_dump_mark_referenced(struct btf_dump *d); static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_resize(struct btf_dump *d);
struct btf_dump *btf_dump__new(const struct btf *btf, struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext, const struct btf_ext *btf_ext,
...@@ -144,25 +148,8 @@ struct btf_dump *btf_dump__new(const struct btf *btf, ...@@ -144,25 +148,8 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->ident_names = NULL; d->ident_names = NULL;
goto err; goto err;
} }
d->type_states = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->type_states[0]));
if (!d->type_states) {
err = -ENOMEM;
goto err;
}
d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
sizeof(d->cached_names[0]));
if (!d->cached_names) {
err = -ENOMEM;
goto err;
}
/* VOID is special */ err = btf_dump_resize(d);
d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED;
/* eagerly determine referenced types for anon enums */
err = btf_dump_mark_referenced(d);
if (err) if (err)
goto err; goto err;
...@@ -172,9 +159,38 @@ struct btf_dump *btf_dump__new(const struct btf *btf, ...@@ -172,9 +159,38 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
return ERR_PTR(err); return ERR_PTR(err);
} }
static int btf_dump_resize(struct btf_dump *d)
{
int err, last_id = btf__get_nr_types(d->btf);
if (last_id <= d->last_id)
return 0;
if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
sizeof(*d->type_states), last_id + 1))
return -ENOMEM;
if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
sizeof(*d->cached_names), last_id + 1))
return -ENOMEM;
if (d->last_id == 0) {
/* VOID is special */
d->type_states[0].order_state = ORDERED;
d->type_states[0].emit_state = EMITTED;
}
/* eagerly determine referenced types for anon enums */
err = btf_dump_mark_referenced(d);
if (err)
return err;
d->last_id = last_id;
return 0;
}
void btf_dump__free(struct btf_dump *d) void btf_dump__free(struct btf_dump *d)
{ {
int i, cnt; int i;
if (IS_ERR_OR_NULL(d)) if (IS_ERR_OR_NULL(d))
return; return;
...@@ -182,7 +198,7 @@ void btf_dump__free(struct btf_dump *d) ...@@ -182,7 +198,7 @@ void btf_dump__free(struct btf_dump *d)
free(d->type_states); free(d->type_states);
if (d->cached_names) { if (d->cached_names) {
/* any set cached name is owned by us and should be freed */ /* any set cached name is owned by us and should be freed */
for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) { for (i = 0; i <= d->last_id; i++) {
if (d->cached_names[i]) if (d->cached_names[i])
free((void *)d->cached_names[i]); free((void *)d->cached_names[i]);
} }
...@@ -222,6 +238,10 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id) ...@@ -222,6 +238,10 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
if (id > btf__get_nr_types(d->btf)) if (id > btf__get_nr_types(d->btf))
return -EINVAL; return -EINVAL;
err = btf_dump_resize(d);
if (err)
return err;
d->emit_queue_cnt = 0; d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false); err = btf_dump_order_type(d, id, false);
if (err < 0) if (err < 0)
...@@ -251,7 +271,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) ...@@ -251,7 +271,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
const struct btf_type *t; const struct btf_type *t;
__u16 vlen; __u16 vlen;
for (i = 1; i <= n; i++) { for (i = d->last_id + 1; i <= n; i++) {
t = btf__type_by_id(d->btf, i); t = btf__type_by_id(d->btf, i);
vlen = btf_vlen(t); vlen = btf_vlen(t);
...@@ -306,6 +326,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d) ...@@ -306,6 +326,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
} }
return 0; return 0;
} }
static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id) static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
{ {
__u32 *new_queue; __u32 *new_queue;
...@@ -1049,11 +1070,15 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, ...@@ -1049,11 +1070,15 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts) const struct btf_dump_emit_type_decl_opts *opts)
{ {
const char *fname; const char *fname;
int lvl; int lvl, err;
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL; return -EINVAL;
err = btf_dump_resize(d);
if (err)
return -EINVAL;
fname = OPTS_GET(opts, field_name, ""); fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0); lvl = OPTS_GET(opts, indent_level, 0);
d->strip_mods = OPTS_GET(opts, strip_mods, false); d->strip_mods = OPTS_GET(opts, strip_mods, false);
......
...@@ -107,6 +107,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size) ...@@ -107,6 +107,7 @@ static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz, void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
size_t cur_cnt, size_t max_cnt, size_t add_cnt); size_t cur_cnt, size_t max_cnt, size_t add_cnt);
int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
static inline bool libbpf_validate_opts(const char *opts, static inline bool libbpf_validate_opts(const char *opts,
size_t opts_sz, size_t user_sz, size_t opts_sz, size_t user_sz,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment