Commit e80698b7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2023-07-19

We've added 4 non-merge commits during the last 1 day(s) which contain
a total of 3 files changed, 55 insertions(+), 10 deletions(-).

The main changes are:

1) Fix stack depth check in presence of async callbacks,
   from Kumar Kartikeya Dwivedi.

2) Fix BTI type used for freplace attached functions,
   from Alexander Duyck.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf, arm64: Fix BTI type used for freplace attached functions
  selftests/bpf: Add more tests for check_max_stack_depth bug
  bpf: Repeat check_max_stack_depth for async callbacks
  bpf: Fix subprog idx logic in check_max_stack_depth
====================

Link: https://lore.kernel.org/r/20230719174502.74023-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents aa7cb378 a3f25d61
......@@ -322,7 +322,13 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
*
*/
emit_bti(A64_BTI_C, ctx);
/* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump
* 3. blr, working as a function pointer, used by emit_call.
* So BTI_JC should used here to support both br and blr.
*/
emit_bti(A64_BTI_JC, ctx);
emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
emit(A64_NOP, ctx);
......
......@@ -5573,16 +5573,17 @@ static int update_stack_depth(struct bpf_verifier_env *env,
* Since recursion is prevented by check_cfg() this algorithm
* only needs a local stack of MAX_CALL_FRAMES to remember callsites
*/
static int check_max_stack_depth(struct bpf_verifier_env *env)
static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
{
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
int depth = 0, frame = 0, i, subprog_end;
bool tail_call_reachable = false;
int ret_insn[MAX_CALL_FRAMES];
int ret_prog[MAX_CALL_FRAMES];
int j;
i = subprog[idx].start;
process_func:
/* protect against potential stack overflow that might happen when
* bpf2bpf calls get combined with tailcalls. Limit the caller's stack
......@@ -5621,7 +5622,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
continue_func:
subprog_end = subprog[idx + 1].start;
for (; i < subprog_end; i++) {
int next_insn;
int next_insn, sidx;
if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
continue;
......@@ -5631,14 +5632,14 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
/* find the callee */
next_insn = i + insn[i].imm + 1;
idx = find_subprog(env, next_insn);
if (idx < 0) {
sidx = find_subprog(env, next_insn);
if (sidx < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
next_insn);
return -EFAULT;
}
if (subprog[idx].is_async_cb) {
if (subprog[idx].has_tail_call) {
if (subprog[sidx].is_async_cb) {
if (subprog[sidx].has_tail_call) {
verbose(env, "verifier bug. subprog has tail_call and async cb\n");
return -EFAULT;
}
......@@ -5647,6 +5648,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
continue;
}
i = next_insn;
idx = sidx;
if (subprog[idx].has_tail_call)
tail_call_reachable = true;
......@@ -5682,6 +5684,22 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
goto continue_func;
}
static int check_max_stack_depth(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *si = env->subprog_info;
int ret;
for (int i = 0; i < env->subprog_cnt; i++) {
if (!i || si[i].is_async_cb) {
ret = check_max_stack_depth_subprog(env, i);
if (ret < 0)
return ret;
}
continue;
}
return 0;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
static int get_callee_stack_depth(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int idx)
......
......@@ -22,9 +22,16 @@ static int timer_cb(void *map, int *key, struct bpf_timer *timer)
return buf[69];
}
__attribute__((noinline))
static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer)
{
volatile char buf[300] = {};
return buf[255] + timer_cb(NULL, NULL, NULL);
}
SEC("tc")
__failure __msg("combined stack size of 2 calls")
int prog(struct __sk_buff *ctx)
__failure __msg("combined stack size of 2 calls is 576. Too large")
int pseudo_call_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
volatile char buf[256] = {};
......@@ -37,4 +44,18 @@ int prog(struct __sk_buff *ctx)
return bpf_timer_set_callback(&elem->timer, timer_cb) + buf[0];
}
SEC("tc")
__failure __msg("combined stack size of 2 calls is 608. Too large")
int async_call_root_check(struct __sk_buff *ctx)
{
struct hmap_elem *elem;
volatile char buf[256] = {};
elem = bpf_map_lookup_elem(&hmap, &(int){0});
if (!elem)
return 0;
return bpf_timer_set_callback(&elem->timer, bad_timer_cb) + buf[0];
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment