Commit 161f3cbc authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'jmp32-reg-bounds'

Yonghong Song says:

====================
With latest llvm, bpf selftest test_progs, which has +alu32 enabled, failed for
strobemeta.o and a few other subtests. The reason is due to that
verifier did not provide better var_off.mask after jmp32 instructions.
This patch set addressed this issue and after the fix, test_progs passed
with alu32.

Patch #1 provided detailed explanation of the problem and the fix.
Patch #2 added three tests in test_verifier.

Changelog:
  v1 -> v2:
    - do not directly manipulate tnum.{value,mask} in __reg_bound_offset32(),
      using tnum_lshift/tnum_rshift functions instead
    - do __reg_bound_offset32() after regular 64bit __reg_bound_offset()
      since the latter may give a better upper 32bit var_off, which can
      be inherited by __reg_bound_offset32().
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 6147a140 260cb5df
...@@ -1007,6 +1007,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg) ...@@ -1007,6 +1007,17 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
reg->umax_value)); reg->umax_value));
} }
static void __reg_bound_offset32(struct bpf_reg_state *reg)
{
u64 mask = 0xffffFFFF;
struct tnum range = tnum_range(reg->umin_value & mask,
reg->umax_value & mask);
struct tnum lo32 = tnum_cast(reg->var_off, 4);
struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
}
/* Reset the min/max bounds of a register */ /* Reset the min/max bounds of a register */
static void __mark_reg_unbounded(struct bpf_reg_state *reg) static void __mark_reg_unbounded(struct bpf_reg_state *reg)
{ {
...@@ -5589,6 +5600,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, ...@@ -5589,6 +5600,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */ /* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg); __reg_bound_offset(false_reg);
__reg_bound_offset(true_reg); __reg_bound_offset(true_reg);
if (is_jmp32) {
__reg_bound_offset32(false_reg);
__reg_bound_offset32(true_reg);
}
/* Intersecting with the old var_off might have improved our bounds /* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax. * then new var_off is (0; 0x7f...fc) which improves our umax.
...@@ -5698,6 +5713,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ...@@ -5698,6 +5713,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
/* We might have learned some bits from the bounds. */ /* We might have learned some bits from the bounds. */
__reg_bound_offset(false_reg); __reg_bound_offset(false_reg);
__reg_bound_offset(true_reg); __reg_bound_offset(true_reg);
if (is_jmp32) {
__reg_bound_offset32(false_reg);
__reg_bound_offset32(true_reg);
}
/* Intersecting with the old var_off might have improved our bounds /* Intersecting with the old var_off might have improved our bounds
* slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
* then new var_off is (0; 0x7f...fc) which improves our umax. * then new var_off is (0; 0x7f...fc) which improves our umax.
......
...@@ -744,3 +744,86 @@ ...@@ -744,3 +744,86 @@
.result = ACCEPT, .result = ACCEPT,
.retval = 2, .retval = 2,
}, },
{
"jgt32: range bound deduction, reg op imm",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"jle32: range bound deduction, reg1 op reg2, reg2 unknown",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment