Commit 3c480732 authored by Hao Luo's avatar Hao Luo Committed by Alexei Starovoitov

bpf: Replace RET_XXX_OR_NULL with RET_XXX | PTR_MAYBE_NULL

We have introduced a new type to make bpf_ret composable, by
reserving high bits to represent flags.

One of the flag is PTR_MAYBE_NULL, which indicates a pointer
may be NULL. When applying this flag to ret_types, it means
the returned value could be a NULL pointer. This patch
switches the qualified arg_types to use this flag.
The ret_types changed in this patch include:

1. RET_PTR_TO_MAP_VALUE_OR_NULL
2. RET_PTR_TO_SOCKET_OR_NULL
3. RET_PTR_TO_TCP_SOCK_OR_NULL
4. RET_PTR_TO_SOCK_COMMON_OR_NULL
5. RET_PTR_TO_ALLOC_MEM_OR_NULL
6. RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL
7. RET_PTR_TO_BTF_ID_OR_NULL

This patch doesn't eliminate the use of these names, instead
it makes them aliases to 'RET_PTR_TO_XXX | PTR_MAYBE_NULL'.
Signed-off-by: default avatarHao Luo <haoluo@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211217003152.48334-4-haoluo@google.com
parent 48946bd6
...@@ -382,17 +382,22 @@ enum bpf_return_type { ...@@ -382,17 +382,22 @@ enum bpf_return_type {
RET_INTEGER, /* function returns integer */ RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */ RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
__BPF_RET_TYPE_MAX, __BPF_RET_TYPE_MAX,
/* Extended ret_types. */
RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is /* This must be the last entry. Its purpose is to ensure the enum is
* wide enough to hold the higher bits reserved for bpf_type_flag. * wide enough to hold the higher bits reserved for bpf_type_flag.
*/ */
......
...@@ -682,7 +682,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) ...@@ -682,7 +682,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
const struct bpf_func_proto bpf_per_cpu_ptr_proto = { const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
.func = bpf_per_cpu_ptr, .func = bpf_per_cpu_ptr,
.gpl_only = false, .gpl_only = false,
.ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL,
.arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
......
...@@ -6473,6 +6473,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6473,6 +6473,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
int *insn_idx_p) int *insn_idx_p)
{ {
const struct bpf_func_proto *fn = NULL; const struct bpf_func_proto *fn = NULL;
enum bpf_return_type ret_type;
struct bpf_reg_state *regs; struct bpf_reg_state *regs;
struct bpf_call_arg_meta meta; struct bpf_call_arg_meta meta;
int insn_idx = *insn_idx_p; int insn_idx = *insn_idx_p;
...@@ -6612,13 +6613,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6612,13 +6613,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
/* update return register (already marked as written above) */ /* update return register (already marked as written above) */
if (fn->ret_type == RET_INTEGER) { ret_type = fn->ret_type;
if (ret_type == RET_INTEGER) {
/* sets type to SCALAR_VALUE */ /* sets type to SCALAR_VALUE */
mark_reg_unknown(env, regs, BPF_REG_0); mark_reg_unknown(env, regs, BPF_REG_0);
} else if (fn->ret_type == RET_VOID) { } else if (ret_type == RET_VOID) {
regs[BPF_REG_0].type = NOT_INIT; regs[BPF_REG_0].type = NOT_INIT;
} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
fn->ret_type == RET_PTR_TO_MAP_VALUE) {
/* There is no offset yet applied, variable or fixed */ /* There is no offset yet applied, variable or fixed */
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
/* remember map_ptr, so that check_map_access() /* remember map_ptr, so that check_map_access()
...@@ -6632,28 +6633,27 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6632,28 +6633,27 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
} }
regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].map_ptr = meta.map_ptr;
regs[BPF_REG_0].map_uid = meta.map_uid; regs[BPF_REG_0].map_uid = meta.map_uid;
if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { if (type_may_be_null(ret_type)) {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
if (map_value_has_spin_lock(meta.map_ptr)) if (map_value_has_spin_lock(meta.map_ptr))
regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].id = ++env->id_gen;
} else {
regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
} }
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
} else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].mem_size = meta.mem_size; regs[BPF_REG_0].mem_size = meta.mem_size;
} else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
const struct btf_type *t; const struct btf_type *t;
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
...@@ -6672,28 +6672,28 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6672,28 +6672,28 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL; return -EINVAL;
} }
regs[BPF_REG_0].type = regs[BPF_REG_0].type =
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? (ret_type & PTR_MAYBE_NULL) ?
PTR_TO_MEM : PTR_TO_MEM_OR_NULL; PTR_TO_MEM_OR_NULL : PTR_TO_MEM;
regs[BPF_REG_0].mem_size = tsize; regs[BPF_REG_0].mem_size = tsize;
} else { } else {
regs[BPF_REG_0].type = regs[BPF_REG_0].type =
fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? (ret_type & PTR_MAYBE_NULL) ?
PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; PTR_TO_BTF_ID_OR_NULL : PTR_TO_BTF_ID;
regs[BPF_REG_0].btf = meta.ret_btf; regs[BPF_REG_0].btf = meta.ret_btf;
regs[BPF_REG_0].btf_id = meta.ret_btf_id; regs[BPF_REG_0].btf_id = meta.ret_btf_id;
} }
} else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL || } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
fn->ret_type == RET_PTR_TO_BTF_ID) {
int ret_btf_id; int ret_btf_id;
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ? regs[BPF_REG_0].type = (ret_type & PTR_MAYBE_NULL) ?
PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL :
PTR_TO_BTF_ID_OR_NULL; PTR_TO_BTF_ID;
ret_btf_id = *fn->ret_btf_id; ret_btf_id = *fn->ret_btf_id;
if (ret_btf_id == 0) { if (ret_btf_id == 0) {
verbose(env, "invalid return type %d of func %s#%d\n", verbose(env, "invalid return type %u of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id); base_type(ret_type), func_id_name(func_id),
func_id);
return -EINVAL; return -EINVAL;
} }
/* current BPF helper definitions are only coming from /* current BPF helper definitions are only coming from
...@@ -6702,8 +6702,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -6702,8 +6702,8 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].btf = btf_vmlinux; regs[BPF_REG_0].btf = btf_vmlinux;
regs[BPF_REG_0].btf_id = ret_btf_id; regs[BPF_REG_0].btf_id = ret_btf_id;
} else { } else {
verbose(env, "unknown return type %d of func %s#%d\n", verbose(env, "unknown return type %u of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id); base_type(ret_type), func_id_name(func_id), func_id);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment