Commit ab5cfac1 authored by Eduard Zingerman's avatar Eduard Zingerman Committed by Alexei Starovoitov

bpf: verify callbacks as if they are called unknown number of times

Prior to this patch callbacks were handled as regular function calls,
execution of callback body was modeled exactly once.
This patch updates callbacks handling logic as follows:
- introduces a function push_callback_call() that schedules callback
  body verification in env->head stack;
- updates prepare_func_exit() to reschedule callback body verification
  upon BPF_EXIT;
- as calls to bpf_*_iter_next(), calls to callback invoking functions
  are marked as checkpoints;
- is_state_visited() is updated to stop callback based iteration when
  some identical parent state is found.

Paths with callback function invoked zero times are now verified first,
which leads to necessity to modify some selftests:
- the following negative tests required adding release/unlock/drop
  calls to avoid previously masked unrelated error reports:
  - cb_refs.c:underflow_prog
  - exceptions_fail.c:reject_rbtree_add_throw
  - exceptions_fail.c:reject_with_cp_reference
- the following precision tracking selftests needed change in expected
  log trace:
  - verifier_subprog_precision.c:callback_result_precise
    (note: r0 precision is no longer propagated inside callback and
           I think this is a correct behavior)
  - verifier_subprog_precision.c:parent_callee_saved_reg_precise_with_callback
  - verifier_subprog_precision.c:parent_stack_slot_precise_with_callback
Reported-by: default avatarAndrew Werner <awerner32@gmail.com>
Closes: https://lore.kernel.org/bpf/CA+vRuzPChFNXmouzGG+wsy=6eMcfr1mFG0F3g7rbg-sedGKW3w@mail.gmail.com/Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20231121020701.26440-7-eddyz87@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 58124a98
...@@ -400,6 +400,7 @@ struct bpf_verifier_state { ...@@ -400,6 +400,7 @@ struct bpf_verifier_state {
struct bpf_idx_pair *jmp_history; struct bpf_idx_pair *jmp_history;
u32 jmp_history_cnt; u32 jmp_history_cnt;
u32 dfs_depth; u32 dfs_depth;
u32 callback_unroll_depth;
}; };
#define bpf_get_spilled_reg(slot, frame, mask) \ #define bpf_get_spilled_reg(slot, frame, mask) \
...@@ -511,6 +512,10 @@ struct bpf_insn_aux_data { ...@@ -511,6 +512,10 @@ struct bpf_insn_aux_data {
* this instruction, regardless of any heuristics * this instruction, regardless of any heuristics
*/ */
bool force_checkpoint; bool force_checkpoint;
/* true if instruction is a call to a helper function that
* accepts callback function as a parameter.
*/
bool calls_callback;
}; };
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
......
...@@ -547,13 +547,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id) ...@@ -547,13 +547,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
return func_id == BPF_FUNC_dynptr_data; return func_id == BPF_FUNC_dynptr_data;
} }
static bool is_callback_calling_kfunc(u32 btf_id); static bool is_sync_callback_calling_kfunc(u32 btf_id);
static bool is_bpf_throw_kfunc(struct bpf_insn *insn); static bool is_bpf_throw_kfunc(struct bpf_insn *insn);
static bool is_callback_calling_function(enum bpf_func_id func_id) static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
{ {
return func_id == BPF_FUNC_for_each_map_elem || return func_id == BPF_FUNC_for_each_map_elem ||
func_id == BPF_FUNC_timer_set_callback ||
func_id == BPF_FUNC_find_vma || func_id == BPF_FUNC_find_vma ||
func_id == BPF_FUNC_loop || func_id == BPF_FUNC_loop ||
func_id == BPF_FUNC_user_ringbuf_drain; func_id == BPF_FUNC_user_ringbuf_drain;
...@@ -564,6 +563,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id) ...@@ -564,6 +563,18 @@ static bool is_async_callback_calling_function(enum bpf_func_id func_id)
return func_id == BPF_FUNC_timer_set_callback; return func_id == BPF_FUNC_timer_set_callback;
} }
static bool is_callback_calling_function(enum bpf_func_id func_id)
{
return is_sync_callback_calling_function(func_id) ||
is_async_callback_calling_function(func_id);
}
static bool is_sync_callback_calling_insn(struct bpf_insn *insn)
{
return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) ||
(bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm));
}
static bool is_storage_get_function(enum bpf_func_id func_id) static bool is_storage_get_function(enum bpf_func_id func_id)
{ {
return func_id == BPF_FUNC_sk_storage_get || return func_id == BPF_FUNC_sk_storage_get ||
...@@ -1808,6 +1819,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, ...@@ -1808,6 +1819,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
dst_state->first_insn_idx = src->first_insn_idx; dst_state->first_insn_idx = src->first_insn_idx;
dst_state->last_insn_idx = src->last_insn_idx; dst_state->last_insn_idx = src->last_insn_idx;
dst_state->dfs_depth = src->dfs_depth; dst_state->dfs_depth = src->dfs_depth;
dst_state->callback_unroll_depth = src->callback_unroll_depth;
dst_state->used_as_loop_entry = src->used_as_loop_entry; dst_state->used_as_loop_entry = src->used_as_loop_entry;
for (i = 0; i <= src->curframe; i++) { for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i]; dst = dst_state->frame[i];
...@@ -3731,6 +3743,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) ...@@ -3731,6 +3743,8 @@ static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
} }
} }
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
/* For given verifier state backtrack_insn() is called from the last insn to /* For given verifier state backtrack_insn() is called from the last insn to
* the first insn. Its purpose is to compute a bitmask of registers and * the first insn. Its purpose is to compute a bitmask of registers and
* stack slots that needs precision in the parent verifier state. * stack slots that needs precision in the parent verifier state.
...@@ -3906,16 +3920,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, ...@@ -3906,16 +3920,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
} else if ((bpf_helper_call(insn) && } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) {
is_callback_calling_function(insn->imm) && /* exit from callback subprog to callback-calling helper or
!is_async_callback_calling_function(insn->imm)) || * kfunc call. Use idx/subseq_idx check to discern it from
(bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) { * straight line code backtracking.
/* callback-calling helper or kfunc call, which means * Unlike the subprog call handling above, we shouldn't
* we are exiting from subprog, but unlike the subprog * propagate precision of r1-r5 (if any requested), as they are
* call handling above, we shouldn't propagate * not actually arguments passed directly to callback subprogs
* precision of r1-r5 (if any requested), as they are
* not actually arguments passed directly to callback
* subprogs
*/ */
if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
...@@ -3950,10 +3961,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, ...@@ -3950,10 +3961,18 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
} else if (opcode == BPF_EXIT) { } else if (opcode == BPF_EXIT) {
bool r0_precise; bool r0_precise;
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { /* Backtracking to a nested function call, 'idx' is a part of
/* if backtracing was looking for registers R1-R5 * the inner frame 'subseq_idx' is a part of the outer frame.
* they should have been found already. * In case of a regular function call, instructions giving
* precision to registers R1-R5 should have been found already.
* In case of a callback, it is ok to have R1-R5 marked for
* backtracking, as these registers are set by the function
* invoking callback.
*/ */
if (subseq_idx >= 0 && calls_callback(env, subseq_idx))
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
bt_clear_reg(bt, i);
if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
WARN_ONCE(1, "verifier backtracking bug"); WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT; return -EFAULT;
...@@ -9421,11 +9440,11 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls ...@@ -9421,11 +9440,11 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls
return err; return err;
} }
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx, int subprog, int insn_idx, int subprog,
set_callee_state_fn set_callee_state_cb) set_callee_state_fn set_callee_state_cb)
{ {
struct bpf_verifier_state *state = env->cur_state; struct bpf_verifier_state *state = env->cur_state, *callback_state;
struct bpf_func_state *caller, *callee; struct bpf_func_state *caller, *callee;
int err; int err;
...@@ -9433,35 +9452,14 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -9433,35 +9452,14 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
err = btf_check_subprog_call(env, subprog, caller->regs); err = btf_check_subprog_call(env, subprog, caller->regs);
if (err == -EFAULT) if (err == -EFAULT)
return err; return err;
if (subprog_is_global(env, subprog)) {
if (err) {
verbose(env, "Caller passes invalid args into func#%d\n",
subprog);
return err;
} else {
if (env->log.level & BPF_LOG_LEVEL)
verbose(env,
"Func#%d is global and valid. Skipping.\n",
subprog);
clear_caller_saved_regs(env, caller->regs);
/* All global functions return a 64-bit SCALAR_VALUE */
mark_reg_unknown(env, caller->regs, BPF_REG_0);
caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
/* continue with next insn after call */
return 0;
}
}
/* set_callee_state is used for direct subprog calls, but we are /* set_callee_state is used for direct subprog calls, but we are
* interested in validating only BPF helpers that can call subprogs as * interested in validating only BPF helpers that can call subprogs as
* callbacks * callbacks
*/ */
if (set_callee_state_cb != set_callee_state) {
env->subprog_info[subprog].is_cb = true; env->subprog_info[subprog].is_cb = true;
if (bpf_pseudo_kfunc_call(insn) && if (bpf_pseudo_kfunc_call(insn) &&
!is_callback_calling_kfunc(insn->imm)) { !is_sync_callback_calling_kfunc(insn->imm)) {
verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
func_id_name(insn->imm), insn->imm); func_id_name(insn->imm), insn->imm);
return -EFAULT; return -EFAULT;
...@@ -9471,7 +9469,6 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -9471,7 +9469,6 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
func_id_name(insn->imm), insn->imm); func_id_name(insn->imm), insn->imm);
return -EFAULT; return -EFAULT;
} }
}
if (insn->code == (BPF_JMP | BPF_CALL) && if (insn->code == (BPF_JMP | BPF_CALL) &&
insn->src_reg == 0 && insn->src_reg == 0 &&
...@@ -9481,25 +9478,76 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -9481,25 +9478,76 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
/* there is no real recursion here. timer callbacks are async */ /* there is no real recursion here. timer callbacks are async */
env->subprog_info[subprog].is_async_cb = true; env->subprog_info[subprog].is_async_cb = true;
async_cb = push_async_cb(env, env->subprog_info[subprog].start, async_cb = push_async_cb(env, env->subprog_info[subprog].start,
*insn_idx, subprog); insn_idx, subprog);
if (!async_cb) if (!async_cb)
return -EFAULT; return -EFAULT;
callee = async_cb->frame[0]; callee = async_cb->frame[0];
callee->async_entry_cnt = caller->async_entry_cnt + 1; callee->async_entry_cnt = caller->async_entry_cnt + 1;
/* Convert bpf_timer_set_callback() args into timer callback args */ /* Convert bpf_timer_set_callback() args into timer callback args */
err = set_callee_state_cb(env, caller, callee, *insn_idx); err = set_callee_state_cb(env, caller, callee, insn_idx);
if (err)
return err;
return 0;
}
/* for callback functions enqueue entry to callback and
* proceed with next instruction within current frame.
*/
callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false);
if (!callback_state)
return -ENOMEM;
err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb,
callback_state);
if (err) if (err)
return err; return err;
callback_state->callback_unroll_depth++;
return 0;
}
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_func_state *caller;
int err, subprog, target_insn;
target_insn = *insn_idx + insn->imm + 1;
subprog = find_subprog(env, target_insn);
if (subprog < 0) {
verbose(env, "verifier bug. No program starts at insn %d\n", target_insn);
return -EFAULT;
}
caller = state->frame[state->curframe];
err = btf_check_subprog_call(env, subprog, caller->regs);
if (err == -EFAULT)
return err;
if (subprog_is_global(env, subprog)) {
if (err) {
verbose(env, "Caller passes invalid args into func#%d\n", subprog);
return err;
}
if (env->log.level & BPF_LOG_LEVEL)
verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
clear_caller_saved_regs(env, caller->regs); clear_caller_saved_regs(env, caller->regs);
/* All global functions return a 64-bit SCALAR_VALUE */
mark_reg_unknown(env, caller->regs, BPF_REG_0); mark_reg_unknown(env, caller->regs, BPF_REG_0);
caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
/* continue with next insn after call */ /* continue with next insn after call */
return 0; return 0;
} }
err = setup_func_entry(env, subprog, *insn_idx, set_callee_state_cb, state); /* for regular function entry setup new frame and continue
* from that frame.
*/
err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state);
if (err) if (err)
return err; return err;
...@@ -9559,22 +9607,6 @@ static int set_callee_state(struct bpf_verifier_env *env, ...@@ -9559,22 +9607,6 @@ static int set_callee_state(struct bpf_verifier_env *env,
return 0; return 0;
} }
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx)
{
int subprog, target_insn;
target_insn = *insn_idx + insn->imm + 1;
subprog = find_subprog(env, target_insn);
if (subprog < 0) {
verbose(env, "verifier bug. No program starts at insn %d\n",
target_insn);
return -EFAULT;
}
return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
}
static int set_map_elem_callback_state(struct bpf_verifier_env *env, static int set_map_elem_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller, struct bpf_func_state *caller,
struct bpf_func_state *callee, struct bpf_func_state *callee,
...@@ -9798,6 +9830,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) ...@@ -9798,6 +9830,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
return -EINVAL; return -EINVAL;
} }
if (!calls_callback(env, callee->callsite)) {
verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n",
*insn_idx, callee->callsite);
return -EFAULT;
}
} else { } else {
/* return to the caller whatever r0 had in the callee */ /* return to the caller whatever r0 had in the callee */
caller->regs[BPF_REG_0] = *r0; caller->regs[BPF_REG_0] = *r0;
...@@ -9815,7 +9852,15 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) ...@@ -9815,7 +9852,15 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return err; return err;
} }
/* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite,
* there function call logic would reschedule callback visit. If iteration
* converges is_state_visited() would prune that visit eventually.
*/
if (callee->in_callback_fn)
*insn_idx = callee->callsite;
else
*insn_idx = callee->callsite + 1; *insn_idx = callee->callsite + 1;
if (env->log.level & BPF_LOG_LEVEL) { if (env->log.level & BPF_LOG_LEVEL) {
verbose(env, "returning from callee:\n"); verbose(env, "returning from callee:\n");
print_verifier_state(env, callee, true); print_verifier_state(env, callee, true);
...@@ -10228,15 +10273,15 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -10228,15 +10273,15 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
} }
break; break;
case BPF_FUNC_for_each_map_elem: case BPF_FUNC_for_each_map_elem:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_map_elem_callback_state); set_map_elem_callback_state);
break; break;
case BPF_FUNC_timer_set_callback: case BPF_FUNC_timer_set_callback:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_timer_callback_state); set_timer_callback_state);
break; break;
case BPF_FUNC_find_vma: case BPF_FUNC_find_vma:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_find_vma_callback_state); set_find_vma_callback_state);
break; break;
case BPF_FUNC_snprintf: case BPF_FUNC_snprintf:
...@@ -10244,7 +10289,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -10244,7 +10289,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
break; break;
case BPF_FUNC_loop: case BPF_FUNC_loop:
update_loop_inline_state(env, meta.subprogno); update_loop_inline_state(env, meta.subprogno);
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_loop_callback_state); set_loop_callback_state);
break; break;
case BPF_FUNC_dynptr_from_mem: case BPF_FUNC_dynptr_from_mem:
...@@ -10341,7 +10386,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn ...@@ -10341,7 +10386,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
break; break;
} }
case BPF_FUNC_user_ringbuf_drain: case BPF_FUNC_user_ringbuf_drain:
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_user_ringbuf_callback_state); set_user_ringbuf_callback_state);
break; break;
} }
...@@ -11230,7 +11275,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id) ...@@ -11230,7 +11275,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
} }
static bool is_callback_calling_kfunc(u32 btf_id) static bool is_sync_callback_calling_kfunc(u32 btf_id)
{ {
return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
} }
...@@ -11982,6 +12027,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -11982,6 +12027,21 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EACCES; return -EACCES;
} }
/* Check the arguments */
err = check_kfunc_args(env, &meta, insn_idx);
if (err < 0)
return err;
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
set_rbtree_add_callback_state);
if (err) {
verbose(env, "kfunc %s#%d failed callback verification\n",
func_name, meta.func_id);
return err;
}
}
rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
...@@ -12017,10 +12077,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -12017,10 +12077,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EINVAL; return -EINVAL;
} }
/* Check the arguments */
err = check_kfunc_args(env, &meta, insn_idx);
if (err < 0)
return err;
/* In case of release function, we get register number of refcounted /* In case of release function, we get register number of refcounted
* PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
*/ */
...@@ -12054,16 +12110,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ...@@ -12054,16 +12110,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
} }
} }
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
set_rbtree_add_callback_state);
if (err) {
verbose(env, "kfunc %s#%d failed callback verification\n",
func_name, meta.func_id);
return err;
}
}
if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { if (meta.func_id == special_kfunc_list[KF_bpf_throw]) {
if (!bpf_jit_supports_exceptions()) { if (!bpf_jit_supports_exceptions()) {
verbose(env, "JIT does not support calling kfunc %s#%d\n", verbose(env, "JIT does not support calling kfunc %s#%d\n",
...@@ -15427,6 +15473,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) ...@@ -15427,6 +15473,15 @@ static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
return env->insn_aux_data[insn_idx].force_checkpoint; return env->insn_aux_data[insn_idx].force_checkpoint;
} }
static void mark_calls_callback(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].calls_callback = true;
}
static bool calls_callback(struct bpf_verifier_env *env, int insn_idx)
{
return env->insn_aux_data[insn_idx].calls_callback;
}
enum { enum {
DONE_EXPLORING = 0, DONE_EXPLORING = 0,
...@@ -15540,6 +15595,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env) ...@@ -15540,6 +15595,21 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
* async state will be pushed for further exploration. * async state will be pushed for further exploration.
*/ */
mark_prune_point(env, t); mark_prune_point(env, t);
/* For functions that invoke callbacks it is not known how many times
* callback would be called. Verifier models callback calling functions
* by repeatedly visiting callback bodies and returning to origin call
* instruction.
* In order to stop such iteration verifier needs to identify when a
* state identical some state from a previous iteration is reached.
* Check below forces creation of checkpoint before callback calling
* instruction to allow search for such identical states.
*/
if (is_sync_callback_calling_insn(insn)) {
mark_calls_callback(env, t);
mark_force_checkpoint(env, t);
mark_prune_point(env, t);
mark_jmp_point(env, t);
}
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
struct bpf_kfunc_call_arg_meta meta; struct bpf_kfunc_call_arg_meta meta;
...@@ -17009,10 +17079,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -17009,10 +17079,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
} }
goto skip_inf_loop_check; goto skip_inf_loop_check;
} }
if (calls_callback(env, insn_idx)) {
if (states_equal(env, &sl->state, cur, true))
goto hit;
goto skip_inf_loop_check;
}
/* attempt to detect infinite loop to avoid unnecessary doomed work */ /* attempt to detect infinite loop to avoid unnecessary doomed work */
if (states_maybe_looping(&sl->state, cur) && if (states_maybe_looping(&sl->state, cur) &&
states_equal(env, &sl->state, cur, false) && states_equal(env, &sl->state, cur, false) &&
!iter_active_depths_differ(&sl->state, cur)) { !iter_active_depths_differ(&sl->state, cur) &&
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
verbose_linfo(env, insn_idx, "; "); verbose_linfo(env, insn_idx, "; ");
verbose(env, "infinite loop detected at insn %d\n", insn_idx); verbose(env, "infinite loop detected at insn %d\n", insn_idx);
verbose(env, "cur state:"); verbose(env, "cur state:");
......
...@@ -33,6 +33,7 @@ int underflow_prog(void *ctx) ...@@ -33,6 +33,7 @@ int underflow_prog(void *ctx)
if (!p) if (!p)
return 0; return 0;
bpf_for_each_map_elem(&array_map, cb1, &p, 0); bpf_for_each_map_elem(&array_map, cb1, &p, 0);
bpf_kfunc_call_test_release(p);
return 0; return 0;
} }
......
...@@ -171,6 +171,7 @@ int reject_with_rbtree_add_throw(void *ctx) ...@@ -171,6 +171,7 @@ int reject_with_rbtree_add_throw(void *ctx)
return 0; return 0;
bpf_spin_lock(&lock); bpf_spin_lock(&lock);
bpf_rbtree_add(&rbtree, &f->node, rbless); bpf_rbtree_add(&rbtree, &f->node, rbless);
bpf_spin_unlock(&lock);
return 0; return 0;
} }
...@@ -214,6 +215,7 @@ int reject_with_cb_reference(void *ctx) ...@@ -214,6 +215,7 @@ int reject_with_cb_reference(void *ctx)
if (!f) if (!f)
return 0; return 0;
bpf_loop(5, subprog_cb_ref, NULL, 0); bpf_loop(5, subprog_cb_ref, NULL, 0);
bpf_obj_drop(f);
return 0; return 0;
} }
......
...@@ -119,15 +119,26 @@ __naked int global_subprog_result_precise(void) ...@@ -119,15 +119,26 @@ __naked int global_subprog_result_precise(void)
SEC("?raw_tp") SEC("?raw_tp")
__success __log_level(2) __success __log_level(2)
/* First simulated path does not include callback body */
__msg("14: (0f) r1 += r6") __msg("14: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 14 first_idx 10") __msg("mark_precise: frame0: last_idx 14 first_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
__msg("mark_precise: frame0: parent state regs=r0 stack=:") __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop")
__msg("mark_precise: frame0: last_idx 18 first_idx 0") /* State entering callback body popped from states stack */
__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit") __msg("from 9 to 17: frame1:")
__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
__msg("17: (b7) r0 = 0")
__msg("18: (95) exit")
__msg("returning from callee:")
__msg("to caller at 9:")
/* r4 (flags) is always precise for bpf_loop() */
__msg("frame 0: propagating r4")
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
__msg("mark_precise: frame0: regs=r4 stack= before 18: (95) exit")
__msg("from 18 to 9: safe")
__naked int callback_result_precise(void) __naked int callback_result_precise(void)
{ {
asm volatile ( asm volatile (
...@@ -233,20 +244,36 @@ __naked int parent_callee_saved_reg_precise_global(void) ...@@ -233,20 +244,36 @@ __naked int parent_callee_saved_reg_precise_global(void)
SEC("?raw_tp") SEC("?raw_tp")
__success __log_level(2) __success __log_level(2)
/* First simulated path does not include callback body */
__msg("12: (0f) r1 += r6") __msg("12: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 12 first_idx 10") __msg("mark_precise: frame0: last_idx 12 first_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop")
__msg("mark_precise: frame0: parent state regs=r6 stack=:") __msg("mark_precise: frame0: parent state regs=r6 stack=:")
__msg("mark_precise: frame0: last_idx 16 first_idx 0") __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9")
__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
/* State entering callback body popped from states stack */
__msg("from 9 to 15: frame1:")
__msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb")
__msg("15: (b7) r0 = 0")
__msg("16: (95) exit")
__msg("returning from callee:")
__msg("to caller at 9:")
/* r4 (flags) is always precise for bpf_loop(),
* r6 was marked before backtracking to callback body.
*/
__msg("frame 0: propagating r4,r6")
__msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1")
__msg("mark_precise: frame0: regs=r4,r6 stack= before 16: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop")
__msg("mark_precise: frame0: parent state regs= stack=:")
__msg("from 16 to 9: safe")
__naked int parent_callee_saved_reg_precise_with_callback(void) __naked int parent_callee_saved_reg_precise_with_callback(void)
{ {
asm volatile ( asm volatile (
...@@ -373,22 +400,38 @@ __naked int parent_stack_slot_precise_global(void) ...@@ -373,22 +400,38 @@ __naked int parent_stack_slot_precise_global(void)
SEC("?raw_tp") SEC("?raw_tp")
__success __log_level(2) __success __log_level(2)
/* First simulated path does not include callback body */
__msg("14: (0f) r1 += r6") __msg("14: (0f) r1 += r6")
__msg("mark_precise: frame0: last_idx 14 first_idx 11") __msg("mark_precise: frame0: last_idx 14 first_idx 10")
__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
__msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop")
__msg("mark_precise: frame0: parent state regs= stack=-8:") __msg("mark_precise: frame0: parent state regs= stack=-8:")
__msg("mark_precise: frame0: last_idx 18 first_idx 0") __msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10")
__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
/* State entering callback body popped from states stack */
__msg("from 10 to 17: frame1:")
__msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb")
__msg("17: (b7) r0 = 0")
__msg("18: (95) exit")
__msg("returning from callee:")
__msg("to caller at 10:")
/* r4 (flags) is always precise for bpf_loop(),
* fp-8 was marked before backtracking to callback body.
*/
__msg("frame 0: propagating r4,fp-8")
__msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1")
__msg("mark_precise: frame0: regs=r4 stack=-8 before 18: (95) exit")
__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
__msg("mark_precise: frame0: parent state regs= stack=:")
__msg("from 18 to 10: safe")
__naked int parent_stack_slot_precise_with_callback(void) __naked int parent_stack_slot_precise_with_callback(void)
{ {
asm volatile ( asm volatile (
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment