Commit fd75733d authored by Daniel Müller's avatar Daniel Müller Committed by Andrii Nakryiko

bpf: Merge "types_are_compat" logic into relo_core.c

BPF type compatibility checks (bpf_core_types_are_compat()) are
currently duplicated between kernel and user space. That's a historical
artifact more than intentional doing and can lead to subtle bugs where
one implementation is adjusted but another is forgotten.

That happened with the enum64 work, for example, where the libbpf side
was changed (commit 23b2a3a8 ("libbpf: Add enum64 relocation
support")) to use the btf_kind_core_compat() helper function but the
kernel side was not (commit 6089fb32 ("bpf: Add btf enum64
support")).

This patch addresses both the duplication issue, by merging both
implementations and moving them into relo_core.c, and fixes the alluded
to kind check (by giving preference to libbpf's already adjusted logic).

For discussion of the topic, please refer to:
https://lore.kernel.org/bpf/CAADnVQKbWR7oarBdewgOBZUPzryhRYvEbkhyPJQHHuxq=0K1gw@mail.gmail.com/T/#mcc99f4a33ad9a322afaf1b9276fb1f0b7add9665

Changelog:
v1 -> v2:
- limited libbpf recursion limit to 32
- changed name to __bpf_core_types_are_compat
- included warning previously present in libbpf version
- merged kernel and user space changes into a single patch
Signed-off-by: default avatarDaniel Müller <deso@posteo.net>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220623182934.2582827-1-deso@posteo.net
parent 2f6d1e0f
...@@ -7416,87 +7416,6 @@ EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs); ...@@ -7416,87 +7416,6 @@ EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
#define MAX_TYPES_ARE_COMPAT_DEPTH 2 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
static
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id,
int level)
{
const struct btf_type *local_type, *targ_type;
int depth = 32; /* max recursion depth */
/* caller made sure that names match (ignoring flavor suffix) */
local_type = btf_type_by_id(local_btf, local_id);
targ_type = btf_type_by_id(targ_btf, targ_id);
if (btf_kind(local_type) != btf_kind(targ_type))
return 0;
recur:
depth--;
if (depth < 0)
return -EINVAL;
local_type = btf_type_skip_modifiers(local_btf, local_id, &local_id);
targ_type = btf_type_skip_modifiers(targ_btf, targ_id, &targ_id);
if (!local_type || !targ_type)
return -EINVAL;
if (btf_kind(local_type) != btf_kind(targ_type))
return 0;
switch (btf_kind(local_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
case BTF_KIND_ENUM64:
return 1;
case BTF_KIND_INT:
/* just reject deprecated bitfield-like integers; all other
* integers are by default compatible between each other
*/
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
case BTF_KIND_PTR:
local_id = local_type->type;
targ_id = targ_type->type;
goto recur;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
goto recur;
case BTF_KIND_FUNC_PROTO: {
struct btf_param *local_p = btf_params(local_type);
struct btf_param *targ_p = btf_params(targ_type);
__u16 local_vlen = btf_vlen(local_type);
__u16 targ_vlen = btf_vlen(targ_type);
int i, err;
if (local_vlen != targ_vlen)
return 0;
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
if (level <= 0)
return -EINVAL;
btf_type_skip_modifiers(local_btf, local_p->type, &local_id);
btf_type_skip_modifiers(targ_btf, targ_p->type, &targ_id);
err = __bpf_core_types_are_compat(local_btf, local_id,
targ_btf, targ_id,
level - 1);
if (err <= 0)
return err;
}
/* tail recurse for return type check */
btf_type_skip_modifiers(local_btf, local_type->type, &local_id);
btf_type_skip_modifiers(targ_btf, targ_type->type, &targ_id);
goto recur;
}
default:
return 0;
}
}
/* Check local and target types for compatibility. This check is used for /* Check local and target types for compatibility. This check is used for
* type-based CO-RE relocations and follow slightly different rules than * type-based CO-RE relocations and follow slightly different rules than
* field-based relocations. This function assumes that root types were already * field-based relocations. This function assumes that root types were already
...@@ -7519,8 +7438,7 @@ int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, ...@@ -7519,8 +7438,7 @@ int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id) const struct btf *targ_btf, __u32 targ_id)
{ {
return __bpf_core_types_are_compat(local_btf, local_id, return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
targ_btf, targ_id,
MAX_TYPES_ARE_COMPAT_DEPTH); MAX_TYPES_ARE_COMPAT_DEPTH);
} }
......
...@@ -5732,77 +5732,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l ...@@ -5732,77 +5732,7 @@ bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 l
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id) const struct btf *targ_btf, __u32 targ_id)
{ {
const struct btf_type *local_type, *targ_type; return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
int depth = 32; /* max recursion depth */
/* caller made sure that names match (ignoring flavor suffix) */
local_type = btf__type_by_id(local_btf, local_id);
targ_type = btf__type_by_id(targ_btf, targ_id);
if (!btf_kind_core_compat(local_type, targ_type))
return 0;
recur:
depth--;
if (depth < 0)
return -EINVAL;
local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
if (!local_type || !targ_type)
return -EINVAL;
if (!btf_kind_core_compat(local_type, targ_type))
return 0;
switch (btf_kind(local_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
return 1;
case BTF_KIND_INT:
/* just reject deprecated bitfield-like integers; all other
* integers are by default compatible between each other
*/
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
case BTF_KIND_PTR:
local_id = local_type->type;
targ_id = targ_type->type;
goto recur;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
goto recur;
case BTF_KIND_FUNC_PROTO: {
struct btf_param *local_p = btf_params(local_type);
struct btf_param *targ_p = btf_params(targ_type);
__u16 local_vlen = btf_vlen(local_type);
__u16 targ_vlen = btf_vlen(targ_type);
int i, err;
if (local_vlen != targ_vlen)
return 0;
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
if (err <= 0)
return err;
}
/* tail recurse for return type check */
skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
goto recur;
}
default:
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
btf_kind_str(local_type), local_id, targ_id);
return 0;
}
} }
static size_t bpf_core_hash_fn(const void *key, void *ctx) static size_t bpf_core_hash_fn(const void *key, void *ctx)
......
...@@ -141,6 +141,86 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind) ...@@ -141,6 +141,86 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
} }
} }
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id, int level)
{
const struct btf_type *local_type, *targ_type;
int depth = 32; /* max recursion depth */
/* caller made sure that names match (ignoring flavor suffix) */
local_type = btf_type_by_id(local_btf, local_id);
targ_type = btf_type_by_id(targ_btf, targ_id);
if (!btf_kind_core_compat(local_type, targ_type))
return 0;
recur:
depth--;
if (depth < 0)
return -EINVAL;
local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
if (!local_type || !targ_type)
return -EINVAL;
if (!btf_kind_core_compat(local_type, targ_type))
return 0;
switch (btf_kind(local_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
case BTF_KIND_ENUM64:
return 1;
case BTF_KIND_INT:
/* just reject deprecated bitfield-like integers; all other
* integers are by default compatible between each other
*/
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
case BTF_KIND_PTR:
local_id = local_type->type;
targ_id = targ_type->type;
goto recur;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
goto recur;
case BTF_KIND_FUNC_PROTO: {
struct btf_param *local_p = btf_params(local_type);
struct btf_param *targ_p = btf_params(targ_type);
__u16 local_vlen = btf_vlen(local_type);
__u16 targ_vlen = btf_vlen(targ_type);
int i, err;
if (local_vlen != targ_vlen)
return 0;
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
if (level <= 0)
return -EINVAL;
skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
level - 1);
if (err <= 0)
return err;
}
/* tail recurse for return type check */
skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
goto recur;
}
default:
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
btf_kind_str(local_type), local_id, targ_id);
return 0;
}
}
/* /*
* Turn bpf_core_relo into a low- and high-level spec representation, * Turn bpf_core_relo into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting * validating correctness along the way, as well as calculating resulting
......
...@@ -68,6 +68,8 @@ struct bpf_core_relo_res { ...@@ -68,6 +68,8 @@ struct bpf_core_relo_res {
__u32 new_type_id; __u32 new_type_id;
}; };
int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id, int level);
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id); const struct btf *targ_btf, __u32 targ_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment