Commit 604dca5e authored by Jann Horn's avatar Jann Horn Committed by Alexei Starovoitov

bpf: Fix tnum constraints for 32-bit comparisons

The BPF verifier tried to track values based on 32-bit comparisons by
(ab)using the tnum state via 581738a6 ("bpf: Provide better register
bounds after jmp32 instructions"). The idea is that after a check like
this:

    if ((u32)r0 > 3)
      exit

We can't meaningfully constrain the arithmetic-range-based tracking, but
we can update the tnum state to (value=0,mask=0xffff'ffff'0000'0003).
However, the implementation from 581738a6 didn't compute the tnum
constraint based on the fixed operand, but instead derives it from the
arithmetic-range-based tracking. This means that after the following
sequence of operations:

    if (r0 >= 0x1'0000'0001)
      exit
    if ((u32)r0 > 7)
      exit

The verifier assumed that the lower half of r0 is in the range (0, 0)
and apply the tnum constraint (value=0,mask=0xffff'ffff'0000'0000) thus
causing the overall tnum to be (value=0,mask=0x1'0000'0000), which was
incorrect. Provide a fixed implementation.

Fixes: 581738a6 ("bpf: Provide better register bounds after jmp32 instructions")
Signed-off-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200330160324.15259-3-daniel@iogearbox.net
parent f2d67fec
...@@ -5678,6 +5678,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) ...@@ -5678,6 +5678,70 @@ static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
reg->smax_value <= 0 && reg->smin_value >= S32_MIN); reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
} }
/* Constrain the possible values of @reg with unsigned upper bound @bound.
* If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
* If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
* of @reg.
*/
static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
bool is_exclusive)
{
if (is_exclusive) {
/* There are no values for `reg` that make `reg<0` true. */
if (bound == 0)
return;
bound--;
}
if (is_jmp32) {
/* Constrain the register's value in the tnum representation.
* For 64-bit comparisons this happens later in
* __reg_bound_offset(), but for 32-bit comparisons, we can be
* more precise than what can be derived from the updated
* numeric bounds.
*/
struct tnum t = tnum_range(0, bound);
t.mask |= ~0xffffffffULL; /* upper half is unknown */
reg->var_off = tnum_intersect(reg->var_off, t);
/* Compute the 64-bit bound from the 32-bit bound. */
bound += gen_hi_max(reg->var_off);
}
reg->umax_value = min(reg->umax_value, bound);
}
/* Constrain the possible values of @reg with unsigned lower bound @bound.
* If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive.
* If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits
* of @reg.
*/
static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32,
bool is_exclusive)
{
if (is_exclusive) {
/* There are no values for `reg` that make `reg>MAX` true. */
if (bound == (is_jmp32 ? U32_MAX : U64_MAX))
return;
bound++;
}
if (is_jmp32) {
/* Constrain the register's value in the tnum representation.
* For 64-bit comparisons this happens later in
* __reg_bound_offset(), but for 32-bit comparisons, we can be
* more precise than what can be derived from the updated
* numeric bounds.
*/
struct tnum t = tnum_range(bound, U32_MAX);
t.mask |= ~0xffffffffULL; /* upper half is unknown */
reg->var_off = tnum_intersect(reg->var_off, t);
/* Compute the 64-bit bound from the 32-bit bound. */
bound += gen_hi_min(reg->var_off);
}
reg->umin_value = max(reg->umin_value, bound);
}
/* Adjusts the register min/max values in the case that the dst_reg is the /* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're * variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check. * simply doing a BPF_K check.
...@@ -5733,15 +5797,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -5733,15 +5797,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
case BPF_JGE: case BPF_JGE:
case BPF_JGT: case BPF_JGT:
{ {
u64 false_umax = opcode == BPF_JGT ? val : val - 1; set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
u64 true_umin = opcode == BPF_JGT ? val + 1 : val; set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
if (is_jmp32) {
false_umax += gen_hi_max(false_reg->var_off);
true_umin += gen_hi_min(true_reg->var_off);
}
false_reg->umax_value = min(false_reg->umax_value, false_umax);
true_reg->umin_value = max(true_reg->umin_value, true_umin);
break; break;
} }
case BPF_JSGE: case BPF_JSGE:
...@@ -5762,15 +5819,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -5762,15 +5819,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
case BPF_JLE: case BPF_JLE:
case BPF_JLT: case BPF_JLT:
{ {
u64 false_umin = opcode == BPF_JLT ? val : val + 1; set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
u64 true_umax = opcode == BPF_JLT ? val - 1 : val; set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
if (is_jmp32) {
false_umin += gen_hi_min(false_reg->var_off);
true_umax += gen_hi_max(true_reg->var_off);
}
false_reg->umin_value = max(false_reg->umin_value, false_umin);
true_reg->umax_value = min(true_reg->umax_value, true_umax);
break; break;
} }
case BPF_JSLE: case BPF_JSLE:
...@@ -5845,15 +5895,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -5845,15 +5895,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
case BPF_JGE: case BPF_JGE:
case BPF_JGT: case BPF_JGT:
{ {
u64 false_umin = opcode == BPF_JGT ? val : val + 1; set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE);
u64 true_umax = opcode == BPF_JGT ? val - 1 : val; set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT);
if (is_jmp32) {
false_umin += gen_hi_min(false_reg->var_off);
true_umax += gen_hi_max(true_reg->var_off);
}
false_reg->umin_value = max(false_reg->umin_value, false_umin);
true_reg->umax_value = min(true_reg->umax_value, true_umax);
break; break;
} }
case BPF_JSGE: case BPF_JSGE:
...@@ -5871,15 +5914,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -5871,15 +5914,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
case BPF_JLE: case BPF_JLE:
case BPF_JLT: case BPF_JLT:
{ {
u64 false_umax = opcode == BPF_JLT ? val : val - 1; set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE);
u64 true_umin = opcode == BPF_JLT ? val + 1 : val; set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT);
if (is_jmp32) {
false_umax += gen_hi_max(false_reg->var_off);
true_umin += gen_hi_min(true_reg->var_off);
}
false_reg->umax_value = min(false_reg->umax_value, false_umax);
true_reg->umin_value = max(true_reg->umin_value, true_umin);
break; break;
} }
case BPF_JSLE: case BPF_JSLE:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment