Commit 27113c59 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Alexei Starovoitov

bpf: Check the other end of slot_type for STACK_SPILL

Every 8 bytes of the stack is tracked by a bpf_stack_state.
Within each bpf_stack_state, there is a 'u8 slot_type[8]' to track
the type of each byte.  Verifier tests slot_type[0] == STACK_SPILL
to decide if the spilled reg state is saved.  Verifier currently only
saves the reg state if the whole 8 bytes are spilled to the stack,
so checking the slot_type[7] is the same as checking slot_type[0].

The later patch will allow verifier to save the bounded scalar
reg also for <8 bytes spill.  There is a llvm patch [1] to ensure
the <8 bytes spill will be 8-byte aligned,  so checking
slot_type[7] instead of slot_type[0] is required.

While at it, this patch refactors the slot_type[0] == STACK_SPILL
test into a new function is_spilled_reg() and change the
slot_type[0] check to slot_type[7] check in there also.

[1] https://reviews.llvm.org/D109073Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210922004934.624194-1-kafai@fb.com
parent 091037fb
...@@ -612,6 +612,14 @@ static const char *kernel_type_name(const struct btf* btf, u32 id) ...@@ -612,6 +612,14 @@ static const char *kernel_type_name(const struct btf* btf, u32 id)
return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
} }
/* The reg state of a pointer or a bounded scalar was saved when
* it was spilled to the stack.
*/
static bool is_spilled_reg(const struct bpf_stack_state *stack)
{
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
}
static void print_verifier_state(struct bpf_verifier_env *env, static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state) const struct bpf_func_state *state)
{ {
...@@ -717,7 +725,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, ...@@ -717,7 +725,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue; continue;
verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
print_liveness(env, state->stack[i].spilled_ptr.live); print_liveness(env, state->stack[i].spilled_ptr.live);
if (state->stack[i].slot_type[0] == STACK_SPILL) { if (is_spilled_reg(&state->stack[i])) {
reg = &state->stack[i].spilled_ptr; reg = &state->stack[i].spilled_ptr;
t = reg->type; t = reg->type;
verbose(env, "=%s", reg_type_str[t]); verbose(env, "=%s", reg_type_str[t]);
...@@ -2373,7 +2381,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, ...@@ -2373,7 +2381,7 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
reg->precise = true; reg->precise = true;
} }
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
if (func->stack[j].slot_type[0] != STACK_SPILL) if (!is_spilled_reg(&func->stack[j]))
continue; continue;
reg = &func->stack[j].spilled_ptr; reg = &func->stack[j].spilled_ptr;
if (reg->type != SCALAR_VALUE) if (reg->type != SCALAR_VALUE)
...@@ -2415,7 +2423,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, ...@@ -2415,7 +2423,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
} }
while (spi >= 0) { while (spi >= 0) {
if (func->stack[spi].slot_type[0] != STACK_SPILL) { if (!is_spilled_reg(&func->stack[spi])) {
stack_mask = 0; stack_mask = 0;
break; break;
} }
...@@ -2514,7 +2522,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, ...@@ -2514,7 +2522,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
return 0; return 0;
} }
if (func->stack[i].slot_type[0] != STACK_SPILL) { if (!is_spilled_reg(&func->stack[i])) {
stack_mask &= ~(1ull << i); stack_mask &= ~(1ull << i);
continue; continue;
} }
...@@ -2713,7 +2721,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -2713,7 +2721,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* regular write of data into stack destroys any spilled ptr */ /* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT; state->stack[spi].spilled_ptr.type = NOT_INIT;
/* Mark slots as STACK_MISC if they belonged to spilled ptr. */ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
if (state->stack[spi].slot_type[0] == STACK_SPILL) if (is_spilled_reg(&state->stack[spi]))
for (i = 0; i < BPF_REG_SIZE; i++) for (i = 0; i < BPF_REG_SIZE; i++)
state->stack[spi].slot_type[i] = STACK_MISC; state->stack[spi].slot_type[i] = STACK_MISC;
...@@ -2923,7 +2931,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env, ...@@ -2923,7 +2931,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
stype = reg_state->stack[spi].slot_type; stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr; reg = &reg_state->stack[spi].spilled_ptr;
if (stype[0] == STACK_SPILL) { if (is_spilled_reg(&reg_state->stack[spi])) {
if (size != BPF_REG_SIZE) { if (size != BPF_REG_SIZE) {
if (reg->type != SCALAR_VALUE) { if (reg->type != SCALAR_VALUE) {
verbose_linfo(env, env->insn_idx, "; "); verbose_linfo(env, env->insn_idx, "; ");
...@@ -4514,11 +4522,11 @@ static int check_stack_range_initialized( ...@@ -4514,11 +4522,11 @@ static int check_stack_range_initialized(
goto mark; goto mark;
} }
if (state->stack[spi].slot_type[0] == STACK_SPILL && if (is_spilled_reg(&state->stack[spi]) &&
state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID) state->stack[spi].spilled_ptr.type == PTR_TO_BTF_ID)
goto mark; goto mark;
if (state->stack[spi].slot_type[0] == STACK_SPILL && if (is_spilled_reg(&state->stack[spi]) &&
(state->stack[spi].spilled_ptr.type == SCALAR_VALUE || (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
env->allow_ptr_leaks)) { env->allow_ptr_leaks)) {
if (clobber) { if (clobber) {
...@@ -10356,9 +10364,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, ...@@ -10356,9 +10364,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
* return false to continue verification of this path * return false to continue verification of this path
*/ */
return false; return false;
if (i % BPF_REG_SIZE) if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue; continue;
if (old->stack[spi].slot_type[0] != STACK_SPILL) if (!is_spilled_reg(&old->stack[spi]))
continue; continue;
if (!regsafe(env, &old->stack[spi].spilled_ptr, if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, idmap)) &cur->stack[spi].spilled_ptr, idmap))
...@@ -10565,7 +10573,7 @@ static int propagate_precision(struct bpf_verifier_env *env, ...@@ -10565,7 +10573,7 @@ static int propagate_precision(struct bpf_verifier_env *env,
} }
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
if (state->stack[i].slot_type[0] != STACK_SPILL) if (!is_spilled_reg(&state->stack[i]))
continue; continue;
state_reg = &state->stack[i].spilled_ptr; state_reg = &state->stack[i].spilled_ptr;
if (state_reg->type != SCALAR_VALUE || if (state_reg->type != SCALAR_VALUE ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment