Commit 6373ef1c authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'propagate nullness information for reg to reg comparisons'

Eduard Zingerman says:

====================

This patchset adds ability to propagates nullness information for
branches of register to register equality compare instructions. The
following rules are used:
 - suppose register A maybe null
 - suppose register B is not null
 - for JNE A, B, ... - A is not null in the false branch
 - for JEQ A, B, ... - A is not null in the true branch

E.g. for program like below:

  r6 = skb->sk;
  r7 = sk_fullsock(r6);
  r0 = sk_fullsock(r6);
  if (r0 == 0) return 0;    (a)
  if (r0 != r7) return 0;   (b)
  *r7->type;                (c)
  return 0;

It is safe to dereference r7 at point (c), because of (a) and (b).

The utility of this change came up while working on BPF CLang backend
issue [1]. Specifically, while debugging issue with selftest
`test_sk_lookup.c`. This test has the following structure:

    int access_ctx_sk(struct bpf_sk_lookup *ctx __CTX__)
    {
        struct bpf_sock *sk1 = NULL, *sk2 = NULL;
        ...
        sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
        if (!sk1)           // (a)
            goto out;
        ...
        if (ctx->sk != sk1) // (b)
            goto out;
        ...
        if (ctx->sk->family != AF_INET ||     // (c)
            ctx->sk->type != SOCK_STREAM ||
            ctx->sk->state != BPF_TCP_LISTEN)
            goto out;
            ...
    }

- at (a) `sk1` is checked to be not null;
- at (b) `ctx->sk` is verified to be equal to `sk1`;
- at (c) `ctx->sk` is accessed w/o nullness check.

Currently Global Value Numbering pass considers expressions `sk1` and
`ctx->sk` to be identical at point (c) and replaces `ctx->sk` with
`sk1` (not expressions themselves but corresponding SSA values).
Since `sk1` is known to be not null after (b) verifier allows
execution of the program.

However, such optimization is not guaranteed to happen. When it does
not happen verifier reports an error.

Changelog:
v2 -> v3:
 - verifier tests are updated with correct error message for
   unprivileged mode (pointer comparisons are forbidden in
   unprivileged mode).

v1 -> v2:
 - after investigation described in [2] as suggested by John, Daniel
   and Shung-Hsi, function `type_is_pointer` is removed, calls to this
   function are replaced by `__is_pointer_value(false, src_reg)`.

RFC -> v1:
 - newly added if block in `check_cond_jmp_op` is moved down to keep
   `make_ptr_not_null_reg` actions together;
 - tests rewritten to have a single `r0 = 0; exit;` block.

[1]   https://reviews.llvm.org/D131633#3722231
[2]   https://lore.kernel.org/bpf/bad8be826d088e0d180232628160bf932006de89.camel@gmail.com/
[RFC] https://lore.kernel.org/bpf/20220822094312.175448-1-eddyz87@gmail.com/
[v1]  https://lore.kernel.org/bpf/20220826172915.1536914-1-eddyz87@gmail.com/
[v2]  https://lore.kernel.org/bpf/20221106214921.117631-1-eddyz87@gmail.com/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 32637e33 4741c371
...@@ -10267,6 +10267,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -10267,6 +10267,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_verifier_state *other_branch; struct bpf_verifier_state *other_branch;
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
bool is_jmp32; bool is_jmp32;
int pred = -1; int pred = -1;
...@@ -10376,8 +10377,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -10376,8 +10377,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
/* detect if we are comparing against a constant value so we can adjust /* detect if we are comparing against a constant value so we can adjust
* our min/max values for our dst register. * our min/max values for our dst register.
* this is only legit if both are scalars (or pointers to the same * this is only legit if both are scalars (or pointers to the same
* object, I suppose, but we don't support that right now), because * object, I suppose, see the PTR_MAYBE_NULL related if block below),
* otherwise the different base pointers mean the offsets aren't * because otherwise the different base pointers mean the offsets aren't
* comparable. * comparable.
*/ */
if (BPF_SRC(insn->code) == BPF_X) { if (BPF_SRC(insn->code) == BPF_X) {
...@@ -10426,6 +10427,36 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, ...@@ -10426,6 +10427,36 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
} }
/* if one pointer register is compared to another pointer
* register check if PTR_MAYBE_NULL could be lifted.
* E.g. register A - maybe null
* register B - not null
* for JNE A, B, ... - A is not null in the false branch;
* for JEQ A, B, ... - A is not null in the true branch.
*/
if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
__is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type)) {
eq_branch_regs = NULL;
switch (opcode) {
case BPF_JEQ:
eq_branch_regs = other_branch_regs;
break;
case BPF_JNE:
eq_branch_regs = regs;
break;
default:
/* do nothing */
break;
}
if (eq_branch_regs) {
if (type_may_be_null(src_reg->type))
mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
else
mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
}
}
/* detect if R == 0 where R is returned from bpf_map_lookup_elem(). /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
* NOTE: these optimizations below are related with pointer comparison * NOTE: these optimizations below are related with pointer comparison
* which will never be JMP32. * which will never be JMP32.
......
{
/* This is equivalent to the following program:
*
* r6 = skb->sk;
* r7 = sk_fullsock(r6);
* r0 = sk_fullsock(r6);
* if (r0 == 0) return 0; (a)
* if (r0 != r7) return 0; (b)
* *r7->type; (c)
* return 0;
*
* It is safe to dereference r7 at point (c), because of (a) and (b).
* The test verifies that relation r0 == r7 is propagated from (b) to (c).
*/
"jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch",
.insns = {
/* r6 = skb->sk; */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
/* if (r6 == 0) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8),
/* r7 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* r0 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
/* if (r0 == null) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* if (r0 == r7) r0 = *(r7->type); */
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R7 pointer comparison",
},
{
/* Same as above, but verify that another branch of JNE still
* prohibits access to PTR_MAYBE_NULL.
*/
"jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch",
.insns = {
/* r6 = skb->sk */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
/* if (r6 == 0) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9),
/* r7 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* r0 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
/* if (r0 == null) return 0; */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
/* if (r0 == r7) return 0; */
BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
/* r0 = *(r7->type); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
/* return 0 */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.result = REJECT,
.errstr = "R7 invalid mem access 'sock_or_null'",
.result_unpriv = REJECT,
.errstr_unpriv = "R7 pointer comparison",
},
{
/* Same as a first test, but not null should be inferred for JEQ branch */
"jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch",
.insns = {
/* r6 = skb->sk; */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
/* if (r6 == null) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9),
/* r7 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* r0 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
/* if (r0 == null) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
/* if (r0 != r7) return 0; */
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
/* r0 = *(r7->type); */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
/* return 0; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R7 pointer comparison",
},
{
/* Same as above, but verify that another branch of JNE still
* prohibits access to PTR_MAYBE_NULL.
*/
"jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch",
.insns = {
/* r6 = skb->sk; */
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)),
/* if (r6 == null) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8),
/* r7 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* r0 = sk_fullsock(skb); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
/* if (r0 == null) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
/* if (r0 != r7) r0 = *(r7->type); */
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
/* return 0; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.result = REJECT,
.errstr = "R7 invalid mem access 'sock_or_null'",
.result_unpriv = REJECT,
.errstr_unpriv = "R7 pointer comparison",
},
{
/* Maps are treated in a different branch of `mark_ptr_not_null_reg`,
* so separate test for maps case.
*/
"jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE",
.insns = {
/* r9 = &some stack to use as key */
BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_9, -8),
/* r8 = process local map */
BPF_LD_MAP_FD(BPF_REG_8, 0),
/* r6 = map_lookup_elem(r8, r9); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* r7 = map_lookup_elem(r8, r9); */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* if (r6 == 0) return 0; */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2),
/* if (r6 != r7) return 0; */
BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1),
/* read *r7; */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_xdp_sock, queue_id)),
/* return 0; */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_xskmap = { 3 },
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment