Commit 84150795 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Dynptr fixes'

Kumar Kartikeya Dwivedi says:

====================

This is part 2 of https://lore.kernel.org/bpf/20221018135920.726360-1-memxor@gmail.com.

Changelog:
----------
v4 -> v5
v5: https://lore.kernel.org/bpf/20230120070355.1983560-1-memxor@gmail.com

 * Add comments, tests from Joanne
 * Add Joanne's acks

v3 -> v4
v3: https://lore.kernel.org/bpf/20230120034314.1921848-1-memxor@gmail.com

 * Adopt BPF ASM tests to more readable style (Alexei)

v2 -> v3
v2: https://lore.kernel.org/bpf/20230119021442.1465269-1-memxor@gmail.com

 * Fix slice invalidation logic for unreferenced dynptrs (Joanne)
 * Add selftests for precise slice invalidation on destruction
 * Add Joanne's acks

v1 -> v2
v1: https://lore.kernel.org/bpf/20230101083403.332783-1-memxor@gmail.com

 * Return error early in case of overwriting referenced dynptr slots (Andrii, Joanne)
 * Rename destroy_stack_slots_dynptr to destroy_if_dynptr_stack_slot (Joanne)
 * Invalidate dynptr slices associated with dynptr in destroy_if_dynptr_stack_slot (Joanne)
 * Combine both dynptr_get_spi and is_spi_bounds_valid (Joanne)
 * Compute spi once in process_dynptr_func and pass it as parameter instead of recomputing (Joanne)
 * Add comments expanding REG_LIVE_WRITTEN marking in unmark_stack_slots_dynptr (Joanne)
 * Add comments explaining why destroy_if_dynptr_stack_slot call needs to be done for both spi
   and spi - 1 (Joanne)
 * Port BPF assembly tests from test_verifier to test_progs framework (Andrii)
 * Address misc feedback, rebase to bpf-next

Old v1 -> v1
Old v1: https://lore.kernel.org/bpf/20221018135920.726360-1-memxor@gmail.com

 * Allow overwriting dynptr stack slots from dynptr init helpers
 * Fix a bug in alignment check where reg->var_off.value was still not included
 * Address other minor nits

Eduard Zingerman (1):
  selftests/bpf: convenience macro for use with 'asm volatile' blocks
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 00b8f39f ae8e354c
...@@ -70,7 +70,10 @@ struct bpf_reg_state { ...@@ -70,7 +70,10 @@ struct bpf_reg_state {
u32 btf_id; u32 btf_id;
}; };
u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
u32 mem_size;
u32 dynptr_id; /* for dynptr slices */
};
/* For dynptr stack slots */ /* For dynptr stack slots */
struct { struct {
......
...@@ -255,6 +255,7 @@ struct bpf_call_arg_meta { ...@@ -255,6 +255,7 @@ struct bpf_call_arg_meta {
int mem_size; int mem_size;
u64 msize_max_value; u64 msize_max_value;
int ref_obj_id; int ref_obj_id;
int dynptr_id;
int map_uid; int map_uid;
int func_id; int func_id;
struct btf *btf; struct btf *btf;
...@@ -638,11 +639,19 @@ static void print_liveness(struct bpf_verifier_env *env, ...@@ -638,11 +639,19 @@ static void print_liveness(struct bpf_verifier_env *env,
verbose(env, "D"); verbose(env, "D");
} }
static int get_spi(s32 off) static int __get_spi(s32 off)
{ {
return (-off - 1) / BPF_REG_SIZE; return (-off - 1) / BPF_REG_SIZE;
} }
static struct bpf_func_state *func(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg)
{
struct bpf_verifier_state *cur = env->cur_state;
return cur->frame[reg->frameno];
}
static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
{ {
int allocated_slots = state->allocated_stack / BPF_REG_SIZE; int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
...@@ -657,12 +666,30 @@ static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_sl ...@@ -657,12 +666,30 @@ static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_sl
return spi - nr_slots + 1 >= 0 && spi < allocated_slots; return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
} }
static struct bpf_func_state *func(struct bpf_verifier_env *env, static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
const struct bpf_reg_state *reg)
{ {
struct bpf_verifier_state *cur = env->cur_state; int off, spi;
return cur->frame[reg->frameno]; if (!tnum_is_const(reg->var_off)) {
verbose(env, "dynptr has to be at a constant offset\n");
return -EINVAL;
}
off = reg->off + reg->var_off.value;
if (off % BPF_REG_SIZE) {
verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
return -EINVAL;
}
spi = __get_spi(off);
if (spi < 1) {
verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
return -EINVAL;
}
if (!is_spi_bounds_valid(func(env, reg), spi, BPF_DYNPTR_NR_SLOTS))
return -ERANGE;
return spi;
} }
static const char *kernel_type_name(const struct btf* btf, u32 id) static const char *kernel_type_name(const struct btf* btf, u32 id)
...@@ -727,37 +754,58 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type) ...@@ -727,37 +754,58 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
static void __mark_dynptr_reg(struct bpf_reg_state *reg, static void __mark_dynptr_reg(struct bpf_reg_state *reg,
enum bpf_dynptr_type type, enum bpf_dynptr_type type,
bool first_slot); bool first_slot, int dynptr_id);
static void __mark_reg_not_init(const struct bpf_verifier_env *env, static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg); struct bpf_reg_state *reg);
static void mark_dynptr_stack_regs(struct bpf_reg_state *sreg1, static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
struct bpf_reg_state *sreg1,
struct bpf_reg_state *sreg2, struct bpf_reg_state *sreg2,
enum bpf_dynptr_type type) enum bpf_dynptr_type type)
{ {
__mark_dynptr_reg(sreg1, type, true); int id = ++env->id_gen;
__mark_dynptr_reg(sreg2, type, false);
__mark_dynptr_reg(sreg1, type, true, id);
__mark_dynptr_reg(sreg2, type, false, id);
} }
static void mark_dynptr_cb_reg(struct bpf_reg_state *reg, static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
enum bpf_dynptr_type type) enum bpf_dynptr_type type)
{ {
__mark_dynptr_reg(reg, type, true); __mark_dynptr_reg(reg, type, true, ++env->id_gen);
} }
static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx) enum bpf_arg_type arg_type, int insn_idx)
{ {
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
enum bpf_dynptr_type type; enum bpf_dynptr_type type;
int spi, i, id; int spi, i, id, err;
spi = get_spi(reg->off); spi = dynptr_get_spi(env, reg);
if (spi < 0)
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return spi;
return -EINVAL;
/* We cannot assume both spi and spi - 1 belong to the same dynptr,
* hence we need to call destroy_if_dynptr_stack_slot twice for both,
* to ensure that for the following example:
* [d1][d1][d2][d2]
* spi 3 2 1 0
* So marking spi = 2 should lead to destruction of both d1 and d2. In
* case they do belong to same dynptr, second call won't see slot_type
* as STACK_DYNPTR and will simply skip destruction.
*/
err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;
err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
if (err)
return err;
for (i = 0; i < BPF_REG_SIZE; i++) { for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_DYNPTR; state->stack[spi].slot_type[i] = STACK_DYNPTR;
...@@ -768,7 +816,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ ...@@ -768,7 +816,7 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
if (type == BPF_DYNPTR_TYPE_INVALID) if (type == BPF_DYNPTR_TYPE_INVALID)
return -EINVAL; return -EINVAL;
mark_dynptr_stack_regs(&state->stack[spi].spilled_ptr, mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
&state->stack[spi - 1].spilled_ptr, type); &state->stack[spi - 1].spilled_ptr, type);
if (dynptr_type_refcounted(type)) { if (dynptr_type_refcounted(type)) {
...@@ -781,6 +829,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_ ...@@ -781,6 +829,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
state->stack[spi - 1].spilled_ptr.ref_obj_id = id; state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
} }
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
return 0; return 0;
} }
...@@ -789,10 +840,9 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re ...@@ -789,10 +840,9 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
int spi, i; int spi, i;
spi = get_spi(reg->off); spi = dynptr_get_spi(env, reg);
if (spi < 0)
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) return spi;
return -EINVAL;
for (i = 0; i < BPF_REG_SIZE; i++) { for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_INVALID; state->stack[spi].slot_type[i] = STACK_INVALID;
...@@ -805,43 +855,133 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re ...@@ -805,43 +855,133 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr); __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
*
* While we don't allow reading STACK_INVALID, it is still possible to
* do <8 byte writes marking some but not all slots as STACK_MISC. Then,
* helpers or insns can do partial read of that part without failing,
* but check_stack_range_initialized, check_stack_read_var_off, and
* check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
* the slot conservatively. Hence we need to prevent those liveness
* marking walks.
*
* This was not a problem before because STACK_INVALID is only set by
* default (where the default reg state has its reg->parent as NULL), or
* in clean_live_states after REG_LIVE_DONE (at which point
* mark_reg_read won't walk reg->parent chain), but not randomly during
* verifier state exploration (like we did above). Hence, for our case
* parentage chain will still be live (i.e. reg->parent may be
* non-NULL), while earlier reg->parent was NULL, so we need
* REG_LIVE_WRITTEN to screen off read marker propagation when it is
* done later on reads or by mark_dynptr_read as well to unnecessary
* mark registers in verifier state.
*/
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
return 0; return 0;
} }
static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi)
{ {
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *fstate;
int spi, i; struct bpf_reg_state *dreg;
int i, dynptr_id;
if (reg->type == CONST_PTR_TO_DYNPTR) /* We always ensure that STACK_DYNPTR is never set partially,
return false; * hence just checking for slot_type[0] is enough. This is
* different for STACK_SPILL, where it may be only set for
* 1 byte, so code has to use is_spilled_reg.
*/
if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
return 0;
spi = get_spi(reg->off); /* Reposition spi to first slot */
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS)) if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
return true; spi = spi + 1;
if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
verbose(env, "cannot overwrite referenced dynptr\n");
return -EINVAL;
}
mark_stack_slot_scratched(env, spi);
mark_stack_slot_scratched(env, spi - 1);
/* Writing partially to one dynptr stack slot destroys both. */
for (i = 0; i < BPF_REG_SIZE; i++) { for (i = 0; i < BPF_REG_SIZE; i++) {
if (state->stack[spi].slot_type[i] == STACK_DYNPTR || state->stack[spi].slot_type[i] = STACK_INVALID;
state->stack[spi - 1].slot_type[i] == STACK_DYNPTR) state->stack[spi - 1].slot_type[i] = STACK_INVALID;
return false;
} }
dynptr_id = state->stack[spi].spilled_ptr.id;
/* Invalidate any slices associated with this dynptr */
bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
/* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
continue;
if (dreg->dynptr_id == dynptr_id) {
if (!env->allow_ptr_leaks)
__mark_reg_not_init(env, dreg);
else
__mark_reg_unknown(env, dreg);
}
}));
/* Do not release reference state, we are destroying dynptr on stack,
* not using some helper to release it. Just reset register.
*/
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
/* Same reason as unmark_stack_slots_dynptr above */
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
return 0;
}
static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi)
{
if (reg->type == CONST_PTR_TO_DYNPTR)
return false;
/* For -ERANGE (i.e. spi not falling into allocated stack slots), we
* will do check_mem_access to check and update stack bounds later, so
* return true for that case.
*/
if (spi < 0)
return spi == -ERANGE;
/* We allow overwriting existing unreferenced STACK_DYNPTR slots, see
* mark_stack_slots_dynptr which calls destroy_if_dynptr_stack_slot to
* ensure dynptr objects at the slots we are touching are completely
* destructed before we reinitialize them for a new one. For referenced
* ones, destroy_if_dynptr_stack_slot returns an error early instead of
* delaying it until the end where the user will get "Unreleased
* reference" error.
*/
return true; return true;
} }
static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
int spi)
{ {
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
int spi;
int i; int i;
/* This already represents first slot of initialized bpf_dynptr */ /* This already represents first slot of initialized bpf_dynptr */
if (reg->type == CONST_PTR_TO_DYNPTR) if (reg->type == CONST_PTR_TO_DYNPTR)
return true; return true;
spi = get_spi(reg->off); if (spi < 0)
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || return false;
!state->stack[spi].spilled_ptr.dynptr.first_slot) if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
return false; return false;
for (i = 0; i < BPF_REG_SIZE; i++) { for (i = 0; i < BPF_REG_SIZE; i++) {
...@@ -868,7 +1008,9 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg ...@@ -868,7 +1008,9 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg
if (reg->type == CONST_PTR_TO_DYNPTR) { if (reg->type == CONST_PTR_TO_DYNPTR) {
return reg->dynptr.type == dynptr_type; return reg->dynptr.type == dynptr_type;
} else { } else {
spi = get_spi(reg->off); spi = dynptr_get_spi(env, reg);
if (spi < 0)
return false;
return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
} }
} }
...@@ -1449,7 +1591,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env, ...@@ -1449,7 +1591,7 @@ static void mark_reg_known_zero(struct bpf_verifier_env *env,
} }
static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
bool first_slot) bool first_slot, int dynptr_id)
{ {
/* reg->type has no meaning for STACK_DYNPTR, but when we set reg for /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
* callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
...@@ -1457,6 +1599,8 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty ...@@ -1457,6 +1599,8 @@ static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type ty
*/ */
__mark_reg_known_zero(reg); __mark_reg_known_zero(reg);
reg->type = CONST_PTR_TO_DYNPTR; reg->type = CONST_PTR_TO_DYNPTR;
/* Give each dynptr a unique id to uniquely associate slices to it. */
reg->id = dynptr_id;
reg->dynptr.type = type; reg->dynptr.type = type;
reg->dynptr.first_slot = first_slot; reg->dynptr.first_slot = first_slot;
} }
...@@ -2390,6 +2534,32 @@ static int mark_reg_read(struct bpf_verifier_env *env, ...@@ -2390,6 +2534,32 @@ static int mark_reg_read(struct bpf_verifier_env *env,
return 0; return 0;
} }
static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = func(env, reg);
int spi, ret;
/* For CONST_PTR_TO_DYNPTR, it must have already been done by
* check_reg_arg in check_helper_call and mark_btf_func_reg_size in
* check_kfunc_call.
*/
if (reg->type == CONST_PTR_TO_DYNPTR)
return 0;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
return spi;
/* Caller ensures dynptr is valid and initialized, which means spi is in
* bounds and spi is the first dynptr slot. Simply mark stack slot as
* read.
*/
ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
if (ret)
return ret;
return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
}
/* This function is supposed to be used by the following 32-bit optimization /* This function is supposed to be used by the following 32-bit optimization
* code only. It returns TRUE if the source or destination register operates * code only. It returns TRUE if the source or destination register operates
* on 64-bit, otherwise return FALSE. * on 64-bit, otherwise return FALSE.
...@@ -3303,6 +3473,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -3303,6 +3473,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true; env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
} }
err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;
mark_stack_slot_scratched(env, spi); mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) { !register_is_null(reg) && env->bpf_capable) {
...@@ -3416,6 +3590,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env, ...@@ -3416,6 +3590,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
if (err) if (err)
return err; return err;
for (i = min_off; i < max_off; i++) {
int spi;
spi = __get_spi(i);
err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;
}
/* Variable offset writes destroy any spilled pointers in range. */ /* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) { for (i = min_off; i < max_off; i++) {
...@@ -5443,6 +5625,31 @@ static int check_stack_range_initialized( ...@@ -5443,6 +5625,31 @@ static int check_stack_range_initialized(
} }
if (meta && meta->raw_mode) { if (meta && meta->raw_mode) {
/* Ensure we won't be overwriting dynptrs when simulating byte
* by byte access in check_helper_call using meta.access_size.
* This would be a problem if we have a helper in the future
* which takes:
*
* helper(uninit_mem, len, dynptr)
*
* Now, uninint_mem may overlap with dynptr pointer. Hence, it
* may end up writing to dynptr itself when touching memory from
* arg 1. This can be relaxed on a case by case basis for known
* safe cases, but reject due to the possibilitiy of aliasing by
* default.
*/
for (i = min_off; i < max_off + access_size; i++) {
int stack_off = -i - 1;
spi = __get_spi(i);
/* raw_mode may write past allocated_stack */
if (state->allocated_stack <= stack_off)
continue;
if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
return -EACCES;
}
}
meta->access_size = access_size; meta->access_size = access_size;
meta->regno = regno; meta->regno = regno;
return 0; return 0;
...@@ -5930,6 +6137,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, ...@@ -5930,6 +6137,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta)
{ {
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
int spi = 0;
/* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
* ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
...@@ -5940,12 +6148,14 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, ...@@ -5940,12 +6148,14 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
} }
/* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to /* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
* check_func_arg_reg_off's logic. We only need to check offset * check_func_arg_reg_off's logic. We only need to check offset
* alignment for PTR_TO_STACK. * and its alignment for PTR_TO_STACK.
*/ */
if (reg->type == PTR_TO_STACK && (reg->off % BPF_REG_SIZE)) { if (reg->type == PTR_TO_STACK) {
verbose(env, "cannot pass in dynptr at an offset=%d\n", reg->off); spi = dynptr_get_spi(env, reg);
return -EINVAL; if (spi < 0 && spi != -ERANGE)
return spi;
} }
/* MEM_UNINIT - Points to memory that is an appropriate candidate for /* MEM_UNINIT - Points to memory that is an appropriate candidate for
* constructing a mutable bpf_dynptr object. * constructing a mutable bpf_dynptr object.
* *
...@@ -5962,7 +6172,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, ...@@ -5962,7 +6172,7 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
* to. * to.
*/ */
if (arg_type & MEM_UNINIT) { if (arg_type & MEM_UNINIT) {
if (!is_dynptr_reg_valid_uninit(env, reg)) { if (!is_dynptr_reg_valid_uninit(env, reg, spi)) {
verbose(env, "Dynptr has to be an uninitialized dynptr\n"); verbose(env, "Dynptr has to be an uninitialized dynptr\n");
return -EINVAL; return -EINVAL;
} }
...@@ -5977,13 +6187,15 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, ...@@ -5977,13 +6187,15 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
meta->uninit_dynptr_regno = regno; meta->uninit_dynptr_regno = regno;
} else /* MEM_RDONLY and None case from above */ { } else /* MEM_RDONLY and None case from above */ {
int err;
/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
return -EINVAL; return -EINVAL;
} }
if (!is_dynptr_reg_valid_init(env, reg)) { if (!is_dynptr_reg_valid_init(env, reg, spi)) {
verbose(env, verbose(env,
"Expected an initialized dynptr as arg #%d\n", "Expected an initialized dynptr as arg #%d\n",
regno); regno);
...@@ -6010,6 +6222,10 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno, ...@@ -6010,6 +6222,10 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
err_extra, regno); err_extra, regno);
return -EINVAL; return -EINVAL;
} }
err = mark_dynptr_read(env, reg);
if (err)
return err;
} }
return 0; return 0;
} }
...@@ -6347,15 +6563,29 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env, ...@@ -6347,15 +6563,29 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
} }
} }
static u32 dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{ {
struct bpf_func_state *state = func(env, reg); struct bpf_func_state *state = func(env, reg);
int spi; int spi;
if (reg->type == CONST_PTR_TO_DYNPTR) if (reg->type == CONST_PTR_TO_DYNPTR)
return reg->ref_obj_id; return reg->id;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
return spi;
return state->stack[spi].spilled_ptr.id;
}
static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = func(env, reg);
int spi;
spi = get_spi(reg->off); if (reg->type == CONST_PTR_TO_DYNPTR)
return reg->ref_obj_id;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
return spi;
return state->stack[spi].spilled_ptr.ref_obj_id; return state->stack[spi].spilled_ptr.ref_obj_id;
} }
...@@ -6429,9 +6659,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, ...@@ -6429,9 +6659,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
* PTR_TO_STACK. * PTR_TO_STACK.
*/ */
if (reg->type == PTR_TO_STACK) { if (reg->type == PTR_TO_STACK) {
spi = get_spi(reg->off); spi = dynptr_get_spi(env, reg);
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) || if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
!state->stack[spi].spilled_ptr.ref_obj_id) {
verbose(env, "arg %d is an unacquired reference\n", regno); verbose(env, "arg %d is an unacquired reference\n", regno);
return -EINVAL; return -EINVAL;
} }
...@@ -7413,7 +7642,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, ...@@ -7413,7 +7642,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
* callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
*/ */
__mark_reg_not_init(env, &callee->regs[BPF_REG_0]); __mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
mark_dynptr_cb_reg(&callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
/* unused */ /* unused */
...@@ -7919,13 +8148,32 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -7919,13 +8148,32 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
if (arg_type_is_dynptr(fn->arg_type[i])) { if (arg_type_is_dynptr(fn->arg_type[i])) {
struct bpf_reg_state *reg = &regs[BPF_REG_1 + i]; struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
int id, ref_obj_id;
if (meta.dynptr_id) {
verbose(env, "verifier internal error: meta.dynptr_id already set\n");
return -EFAULT;
}
if (meta.ref_obj_id) { if (meta.ref_obj_id) {
verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
return -EFAULT; return -EFAULT;
} }
meta.ref_obj_id = dynptr_ref_obj_id(env, reg); id = dynptr_id(env, reg);
if (id < 0) {
verbose(env, "verifier internal error: failed to obtain dynptr id\n");
return id;
}
ref_obj_id = dynptr_ref_obj_id(env, reg);
if (ref_obj_id < 0) {
verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
return ref_obj_id;
}
meta.dynptr_id = id;
meta.ref_obj_id = ref_obj_id;
break; break;
} }
} }
...@@ -8081,6 +8329,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -8081,6 +8329,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EFAULT; return -EFAULT;
} }
if (is_dynptr_ref_function(func_id))
regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
/* For release_reference() */ /* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
...@@ -13215,10 +13466,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, ...@@ -13215,10 +13466,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
return false; return false;
if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
continue; continue;
if (!is_spilled_reg(&old->stack[spi])) /* Both old and cur are having same slot_type */
continue; switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
if (!regsafe(env, &old->stack[spi].spilled_ptr, case STACK_SPILL:
&cur->stack[spi].spilled_ptr, idmap))
/* when explored and current stack slot are both storing /* when explored and current stack slot are both storing
* spilled registers, check that stored pointers types * spilled registers, check that stored pointers types
* are the same as well. * are the same as well.
...@@ -13229,7 +13479,30 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, ...@@ -13229,7 +13479,30 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
* such verifier states are not equivalent. * such verifier states are not equivalent.
* return false to continue verification of this path * return false to continue verification of this path
*/ */
if (!regsafe(env, &old->stack[spi].spilled_ptr,
&cur->stack[spi].spilled_ptr, idmap))
return false; return false;
break;
case STACK_DYNPTR:
{
const struct bpf_reg_state *old_reg, *cur_reg;
old_reg = &old->stack[spi].spilled_ptr;
cur_reg = &cur->stack[spi].spilled_ptr;
if (old_reg->dynptr.type != cur_reg->dynptr.type ||
old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
return false;
break;
}
case STACK_MISC:
case STACK_ZERO:
case STACK_INVALID:
continue;
/* Ensure that new unhandled slot types return false by default */
default:
return false;
}
} }
return true; return true;
} }
......
...@@ -18,7 +18,7 @@ static struct { ...@@ -18,7 +18,7 @@ static struct {
const char *expected_verifier_err_msg; const char *expected_verifier_err_msg;
int expected_runtime_err; int expected_runtime_err;
} kfunc_dynptr_tests[] = { } kfunc_dynptr_tests[] = {
{"not_valid_dynptr", "Expected an initialized dynptr as arg #1", 0}, {"not_valid_dynptr", "cannot pass in dynptr at an offset=-8", 0},
{"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0}, {"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0},
{"dynptr_data_null", NULL, -EBADMSG}, {"dynptr_data_null", NULL, -EBADMSG},
}; };
......
...@@ -7,6 +7,13 @@ ...@@ -7,6 +7,13 @@
#define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
/* Convenience macro for use with 'asm volatile' blocks */
#define __naked __attribute__((naked))
#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory"
#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory"
#define __imm(name) [name]"i"(name)
#define __imm_addr(name) [name]"i"(&name)
#if defined(__TARGET_ARCH_x86) #if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1 #define SYSCALL_WRAPPER 1
#define SYS_PREFIX "__x64_" #define SYS_PREFIX "__x64_"
......
...@@ -35,6 +35,13 @@ struct { ...@@ -35,6 +35,13 @@ struct {
__type(value, __u32); __type(value, __u32);
} array_map3 SEC(".maps"); } array_map3 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array_map4 SEC(".maps");
struct sample { struct sample {
int pid; int pid;
long value; long value;
...@@ -67,7 +74,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr) ...@@ -67,7 +74,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr)
* bpf_ringbuf_submit/discard_dynptr call * bpf_ringbuf_submit/discard_dynptr call
*/ */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("Unreleased reference id=1") __failure __msg("Unreleased reference id=2")
int ringbuf_missing_release1(void *ctx) int ringbuf_missing_release1(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -80,7 +87,7 @@ int ringbuf_missing_release1(void *ctx) ...@@ -80,7 +87,7 @@ int ringbuf_missing_release1(void *ctx)
} }
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("Unreleased reference id=2") __failure __msg("Unreleased reference id=4")
int ringbuf_missing_release2(void *ctx) int ringbuf_missing_release2(void *ctx)
{ {
struct bpf_dynptr ptr1, ptr2; struct bpf_dynptr ptr1, ptr2;
...@@ -382,7 +389,7 @@ int invalid_helper1(void *ctx) ...@@ -382,7 +389,7 @@ int invalid_helper1(void *ctx)
/* A dynptr can't be passed into a helper function at a non-zero offset */ /* A dynptr can't be passed into a helper function at a non-zero offset */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3") __failure __msg("cannot pass in dynptr at an offset=-8")
int invalid_helper2(void *ctx) int invalid_helper2(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -420,7 +427,7 @@ int invalid_write1(void *ctx) ...@@ -420,7 +427,7 @@ int invalid_write1(void *ctx)
* offset * offset
*/ */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #3") __failure __msg("cannot overwrite referenced dynptr")
int invalid_write2(void *ctx) int invalid_write2(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -444,7 +451,7 @@ int invalid_write2(void *ctx) ...@@ -444,7 +451,7 @@ int invalid_write2(void *ctx)
* non-const offset * non-const offset
*/ */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("Expected an initialized dynptr as arg #1") __failure __msg("cannot overwrite referenced dynptr")
int invalid_write3(void *ctx) int invalid_write3(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -476,7 +483,7 @@ static int invalid_write4_callback(__u32 index, void *data) ...@@ -476,7 +483,7 @@ static int invalid_write4_callback(__u32 index, void *data)
* be invalidated as a dynptr * be invalidated as a dynptr
*/ */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("arg 1 is an unacquired reference") __failure __msg("cannot overwrite referenced dynptr")
int invalid_write4(void *ctx) int invalid_write4(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -584,7 +591,7 @@ int invalid_read4(void *ctx) ...@@ -584,7 +591,7 @@ int invalid_read4(void *ctx)
/* Initializing a dynptr on an offset should fail */ /* Initializing a dynptr on an offset should fail */
SEC("?raw_tp") SEC("?raw_tp")
__failure __msg("invalid write to stack") __failure __msg("cannot pass in dynptr at an offset=0")
int invalid_offset(void *ctx) int invalid_offset(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -653,3 +660,435 @@ int dynptr_from_mem_invalid_api(void *ctx) ...@@ -653,3 +660,435 @@ int dynptr_from_mem_invalid_api(void *ctx)
return 0; return 0;
} }
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_pruning_overwrite(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 0xeB9F; \
r6 = %[ringbuf] ll; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto pjmp1; \
goto pjmp2; \
pjmp1: \
*(u64 *)(r10 - 16) = r9; \
pjmp2: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__success __msg("12: safe") __log_level(2)
int dynptr_pruning_stacksafe(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 0xeB9F; \
r6 = %[ringbuf] ll; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto stjmp1; \
goto stjmp2; \
stjmp1: \
r9 = r9; \
stjmp2: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_pruning_type_confusion(struct __sk_buff *ctx)
{
asm volatile (
"r6 = %[array_map4] ll; \
r7 = %[ringbuf] ll; \
r1 = r6; \
r2 = r10; \
r2 += -8; \
r9 = 0; \
*(u64 *)(r2 + 0) = r9; \
r3 = r10; \
r3 += -24; \
r9 = 0xeB9FeB9F; \
*(u64 *)(r10 - 16) = r9; \
*(u64 *)(r10 - 24) = r9; \
r9 = 0; \
r4 = 0; \
r8 = r2; \
call %[bpf_map_update_elem]; \
r1 = r6; \
r2 = r8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto tjmp1; \
exit; \
tjmp1: \
r8 = r0; \
r1 = r7; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
r0 = *(u64 *)(r0 + 0); \
call %[bpf_ringbuf_reserve_dynptr]; \
if r0 == 0 goto tjmp2; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
r8 = r8; \
goto tjmp3; \
tjmp2: \
*(u64 *)(r10 - 8) = r9; \
*(u64 *)(r10 - 16) = r9; \
r1 = r8; \
r1 += 8; \
r2 = 0; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_dynptr_from_mem]; \
tjmp3: \
r1 = r10; \
r1 += -16; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_map_update_elem),
__imm(bpf_map_lookup_elem),
__imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_dynptr_from_mem),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(array_map4),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("dynptr has to be at a constant offset") __log_level(2)
int dynptr_var_off_overwrite(struct __sk_buff *ctx)
{
asm volatile (
"r9 = 16; \
*(u32 *)(r10 - 4) = r9; \
r8 = *(u32 *)(r10 - 4); \
if r8 >= 0 goto vjmp1; \
r0 = 1; \
exit; \
vjmp1: \
if r8 <= 16 goto vjmp2; \
r0 = 1; \
exit; \
vjmp2: \
r8 &= 16; \
r1 = %[ringbuf] ll; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -32; \
r4 += r8; \
call %[bpf_ringbuf_reserve_dynptr]; \
r9 = 0xeB9F; \
*(u64 *)(r10 - 16) = r9; \
r1 = r10; \
r1 += -32; \
r1 += r8; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm_addr(ringbuf)
: __clobber_all
);
return 0;
}
SEC("?tc")
__failure __msg("cannot overwrite referenced dynptr") __log_level(2)
int dynptr_partial_slot_invalidate(struct __sk_buff *ctx)
{
asm volatile (
"r6 = %[ringbuf] ll; \
r7 = %[array_map4] ll; \
r1 = r7; \
r2 = r10; \
r2 += -8; \
r9 = 0; \
*(u64 *)(r2 + 0) = r9; \
r3 = r2; \
r4 = 0; \
r8 = r2; \
call %[bpf_map_update_elem]; \
r1 = r7; \
r2 = r8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto sjmp1; \
exit; \
sjmp1: \
r7 = r0; \
r1 = r6; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -24; \
call %[bpf_ringbuf_reserve_dynptr]; \
*(u64 *)(r10 - 16) = r9; \
r1 = r7; \
r2 = 8; \
r3 = 0; \
r4 = r10; \
r4 += -16; \
call %[bpf_dynptr_from_mem]; \
r1 = r10; \
r1 += -512; \
r2 = 488; \
r3 = r10; \
r3 += -24; \
r4 = 0; \
r5 = 0; \
call %[bpf_dynptr_read]; \
r8 = 1; \
if r0 != 0 goto sjmp2; \
r8 = 0; \
sjmp2: \
r1 = r10; \
r1 += -24; \
r2 = 0; \
call %[bpf_ringbuf_discard_dynptr]; "
:
: __imm(bpf_map_update_elem),
__imm(bpf_map_lookup_elem),
__imm(bpf_ringbuf_reserve_dynptr),
__imm(bpf_ringbuf_discard_dynptr),
__imm(bpf_dynptr_from_mem),
__imm(bpf_dynptr_read),
__imm_addr(ringbuf),
__imm_addr(array_map4)
: __clobber_all
);
return 0;
}
/* Test that it is allowed to overwrite unreferenced dynptr. */
SEC("?raw_tp")
__success
int dynptr_overwrite_unref(void *ctx)
{
struct bpf_dynptr ptr;
if (get_map_val_dynptr(&ptr))
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
return 0;
}
/* Test that slices are invalidated on reinitializing a dynptr. */
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int dynptr_invalidate_slice_reinit(void *ctx)
{
struct bpf_dynptr ptr;
__u8 *p;
if (get_map_val_dynptr(&ptr))
return 0;
p = bpf_dynptr_data(&ptr, 0, 1);
if (!p)
return 0;
if (get_map_val_dynptr(&ptr))
return 0;
/* this should fail */
return *p;
}
/* Invalidation of dynptr slices on destruction of dynptr should not miss
* mem_or_null pointers.
*/
SEC("?raw_tp")
__failure __msg("R1 type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx)
{
struct bpf_dynptr ptr;
__u8 *p;
if (get_map_val_dynptr(&ptr))
return 0;
p = bpf_dynptr_data(&ptr, 0, 1);
*(__u8 *)&ptr = 0;
/* this should fail */
bpf_this_cpu_ptr(p);
return 0;
}
/* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp")
__failure __msg("R7 invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u8 *p1, *p2;
if (get_map_val_dynptr(&ptr1))
return 0;
if (get_map_val_dynptr(&ptr2))
return 0;
p1 = bpf_dynptr_data(&ptr1, 0, 1);
if (!p1)
return 0;
p2 = bpf_dynptr_data(&ptr2, 0, 1);
if (!p2)
return 0;
*(__u8 *)&ptr1 = 0;
/* this should fail */
return *p1;
}
/* Invalidation of slices should be scoped and should not prevent dereferencing
* slices of another dynptr after destroying unrelated dynptr
*/
SEC("?raw_tp")
__success
int dynptr_invalidate_slice_success(void *ctx)
{
struct bpf_dynptr ptr1;
struct bpf_dynptr ptr2;
__u8 *p1, *p2;
if (get_map_val_dynptr(&ptr1))
return 1;
if (get_map_val_dynptr(&ptr2))
return 1;
p1 = bpf_dynptr_data(&ptr1, 0, 1);
if (!p1)
return 1;
p2 = bpf_dynptr_data(&ptr2, 0, 1);
if (!p2)
return 1;
*(__u8 *)&ptr1 = 0;
return *p2;
}
/* Overwriting referenced dynptr should be rejected */
SEC("?raw_tp")
__failure __msg("cannot overwrite referenced dynptr")
int dynptr_overwrite_ref(void *ctx)
{
struct bpf_dynptr ptr;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
/* this should fail */
if (get_map_val_dynptr(&ptr))
bpf_ringbuf_discard_dynptr(&ptr, 0);
return 0;
}
/* Reject writes to dynptr slot from bpf_dynptr_read */
SEC("?raw_tp")
__failure __msg("potential write to dynptr at off=-16")
int dynptr_read_into_slot(void *ctx)
{
union {
struct {
char _pad[48];
struct bpf_dynptr ptr;
};
char buf[64];
} data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr);
/* this should fail */
bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0);
return 0;
}
/* Reject writes to dynptr slot for uninit arg */
SEC("?raw_tp")
__failure __msg("potential write to dynptr at off=-16")
int uninit_write_into_slot(void *ctx)
{
struct {
char buf[64];
struct bpf_dynptr ptr;
} data;
bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr);
/* this should fail */
bpf_get_current_comm(data.buf, 80);
return 0;
}
static int callback(__u32 index, void *data)
{
*(__u32 *)data = 123;
return 0;
}
/* If the dynptr is written into in a callback function, its data
* slices should be invalidated as well.
*/
SEC("?raw_tp")
__failure __msg("invalid mem access 'scalar'")
int invalid_data_slices(void *ctx)
{
struct bpf_dynptr ptr;
__u32 *slice;
if (get_map_val_dynptr(&ptr))
return 0;
slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
if (!slice)
return 0;
bpf_loop(10, callback, &ptr, 0);
/* this should fail */
*slice = 1;
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment