Commit 59bb14bd authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2023-06-21

We've added 7 non-merge commits during the last 14 day(s) which contain
a total of 7 files changed, 181 insertions(+), 15 deletions(-).

The main changes are:

1) Fix a verifier id tracking issue with scalars upon spill,
   from Maxim Mikityanskiy.

2) Fix NULL dereference if an exception is generated while a BPF
   subprogram is running, from Krister Johansen.

3) Fix a BTF verification failure when compiling kernel with LLVM_IAS=0,
   from Florent Revest.

4) Fix expected_attach_type enforcement for kprobe_multi link,
   from Jiri Olsa.

5) Fix a bpf_jit_dump issue for x86_64 to pick the correct JITed image,
   from Yonghong Song.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Force kprobe multi expected_attach_type for kprobe_multi link
  bpf/btf: Accept function names that contain dots
  selftests/bpf: add a test for subprogram extables
  bpf: ensure main program has an extable
  bpf: Fix a bpf_jit_dump issue for x86_64 with sysctl bpf_jit_enable.
  selftests/bpf: Add test cases to assert proper ID tracking on spill
  bpf: Fix verifier id tracking of scalars on spill
====================

Link: https://lore.kernel.org/r/20230621101116.16122-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a129b41f db8eae6b
...@@ -2570,7 +2570,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ...@@ -2570,7 +2570,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
} }
if (bpf_jit_enable > 1) if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, proglen, pass + 1, image); bpf_jit_dump(prog->len, proglen, pass + 1, rw_image);
if (image) { if (image) {
if (!prog->is_func || extra_pass) { if (!prog->is_func || extra_pass) {
......
...@@ -744,13 +744,12 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset) ...@@ -744,13 +744,12 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
return offset < btf->hdr.str_len; return offset < btf->hdr.str_len;
} }
static bool __btf_name_char_ok(char c, bool first, bool dot_ok) static bool __btf_name_char_ok(char c, bool first)
{ {
if ((first ? !isalpha(c) : if ((first ? !isalpha(c) :
!isalnum(c)) && !isalnum(c)) &&
c != '_' && c != '_' &&
((c == '.' && !dot_ok) || c != '.')
c != '.'))
return false; return false;
return true; return true;
} }
...@@ -767,20 +766,20 @@ static const char *btf_str_by_offset(const struct btf *btf, u32 offset) ...@@ -767,20 +766,20 @@ static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
return NULL; return NULL;
} }
static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) static bool __btf_name_valid(const struct btf *btf, u32 offset)
{ {
/* offset must be valid */ /* offset must be valid */
const char *src = btf_str_by_offset(btf, offset); const char *src = btf_str_by_offset(btf, offset);
const char *src_limit; const char *src_limit;
if (!__btf_name_char_ok(*src, true, dot_ok)) if (!__btf_name_char_ok(*src, true))
return false; return false;
/* set a limit on identifier length */ /* set a limit on identifier length */
src_limit = src + KSYM_NAME_LEN; src_limit = src + KSYM_NAME_LEN;
src++; src++;
while (*src && src < src_limit) { while (*src && src < src_limit) {
if (!__btf_name_char_ok(*src, false, dot_ok)) if (!__btf_name_char_ok(*src, false))
return false; return false;
src++; src++;
} }
...@@ -788,17 +787,14 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) ...@@ -788,17 +787,14 @@ static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
return !*src; return !*src;
} }
/* Only C-style identifier is permitted. This can be relaxed if
* necessary.
*/
static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
{ {
return __btf_name_valid(btf, offset, false); return __btf_name_valid(btf, offset);
} }
static bool btf_name_valid_section(const struct btf *btf, u32 offset) static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{ {
return __btf_name_valid(btf, offset, true); return __btf_name_valid(btf, offset);
} }
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
...@@ -4422,7 +4418,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env, ...@@ -4422,7 +4418,7 @@ static s32 btf_var_check_meta(struct btf_verifier_env *env,
} }
if (!t->name_off || if (!t->name_off ||
!__btf_name_valid(env->btf, t->name_off, true)) { !__btf_name_valid(env->btf, t->name_off)) {
btf_verifier_log_type(env, t, "Invalid name"); btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL; return -EINVAL;
} }
......
...@@ -3440,6 +3440,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, ...@@ -3440,6 +3440,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
return prog->enforce_expected_attach_type && return prog->enforce_expected_attach_type &&
prog->expected_attach_type != attach_type ? prog->expected_attach_type != attach_type ?
-EINVAL : 0; -EINVAL : 0;
case BPF_PROG_TYPE_KPROBE:
if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
attach_type != BPF_TRACE_KPROBE_MULTI)
return -EINVAL;
return 0;
default: default:
return 0; return 0;
} }
......
...@@ -3868,6 +3868,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, ...@@ -3868,6 +3868,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err; return err;
} }
save_register_state(state, spi, reg, size); save_register_state(state, spi, reg, size);
/* Break the relation on a narrowing spill. */
if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
insn->imm != 0 && env->bpf_capable) { insn->imm != 0 && env->bpf_capable) {
struct bpf_reg_state fake_reg = {}; struct bpf_reg_state fake_reg = {};
...@@ -17214,9 +17217,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -17214,9 +17217,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
} }
/* finally lock prog and jit images for all functions and /* finally lock prog and jit images for all functions and
* populate kallsysm * populate kallsysm. Begin at the first subprogram, since
* bpf_prog_load will add the kallsyms for the main program.
*/ */
for (i = 0; i < env->subprog_cnt; i++) { for (i = 1; i < env->subprog_cnt; i++) {
bpf_prog_lock_ro(func[i]); bpf_prog_lock_ro(func[i]);
bpf_prog_kallsyms_add(func[i]); bpf_prog_kallsyms_add(func[i]);
} }
...@@ -17242,6 +17246,8 @@ static int jit_subprogs(struct bpf_verifier_env *env) ...@@ -17242,6 +17246,8 @@ static int jit_subprogs(struct bpf_verifier_env *env)
prog->jited = 1; prog->jited = 1;
prog->bpf_func = func[0]->bpf_func; prog->bpf_func = func[0]->bpf_func;
prog->jited_len = func[0]->jited_len; prog->jited_len = func[0]->jited_len;
prog->aux->extable = func[0]->aux->extable;
prog->aux->num_exentries = func[0]->aux->num_exentries;
prog->aux->func = func; prog->aux->func = func;
prog->aux->func_cnt = env->subprog_cnt; prog->aux->func_cnt = env->subprog_cnt;
bpf_prog_jit_attempt_done(prog); bpf_prog_jit_attempt_done(prog);
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "test_subprogs_extable.skel.h"
void test_subprogs_extable(void)
{
const int read_sz = 456;
struct test_subprogs_extable *skel;
int err;
skel = test_subprogs_extable__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
return;
err = test_subprogs_extable__attach(skel);
if (!ASSERT_OK(err, "skel_attach"))
goto cleanup;
/* trigger tracepoint */
ASSERT_OK(trigger_module_test_read(read_sz), "trigger_read");
ASSERT_NEQ(skel->bss->triggered, 0, "verify at least one program ran");
test_subprogs_extable__detach(skel);
cleanup:
test_subprogs_extable__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 8);
__type(key, __u32);
__type(value, __u64);
} test_array SEC(".maps");
unsigned int triggered;
static __u64 test_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
{
return 1;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs2, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
SEC("fexit/bpf_testmod_return_ptr")
int BPF_PROG(handle_fexit_ret_subprogs3, int arg, struct file *ret)
{
*(volatile long *)ret;
*(volatile int *)&ret->f_mode;
bpf_for_each_map_elem(&test_array, test_cb, NULL, 0);
triggered++;
return 0;
}
char _license[] SEC("license") = "GPL";
...@@ -371,4 +371,83 @@ __naked void and_then_at_fp_8(void) ...@@ -371,4 +371,83 @@ __naked void and_then_at_fp_8(void)
" ::: __clobber_all); " ::: __clobber_all);
} }
SEC("xdp")
__description("32-bit spill of 64-bit reg should clear ID")
__failure __msg("math between ctx pointer and 4294967295 is not allowed")
__naked void spill_32bit_of_64bit_fail(void)
{
asm volatile (" \
r6 = r1; \
/* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \
/* Put a large number into r1. */ \
r1 = 0xffffffff; \
r1 <<= 32; \
r1 += r0; \
/* Assign an ID to r1. */ \
r2 = r1; \
/* 32-bit spill r1 to stack - should clear the ID! */\
*(u32*)(r10 - 8) = r1; \
/* 32-bit fill r2 from stack. */ \
r2 = *(u32*)(r10 - 8); \
/* Compare r2 with another register to trigger find_equal_scalars.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
* the branches, and equal to eight on the other branch.\
*/ \
r3 = 0; \
if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 32; \
/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
* read will happen, because it actually contains 0xffffffff.\
*/ \
r6 += r1; \
r0 = *(u32*)(r6 + 0); \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("xdp")
__description("16-bit spill of 32-bit reg should clear ID")
__failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
__naked void spill_16bit_of_32bit_fail(void)
{
asm volatile (" \
r6 = r1; \
/* Roll one bit to force the verifier to track both branches. */\
call %[bpf_get_prandom_u32]; \
r0 &= 0x8; \
/* Put a large number into r1. */ \
w1 = 0xffff0000; \
r1 += r0; \
/* Assign an ID to r1. */ \
r2 = r1; \
/* 16-bit spill r1 to stack - should clear the ID! */\
*(u16*)(r10 - 8) = r1; \
/* 16-bit fill r2 from stack. */ \
r2 = *(u16*)(r10 - 8); \
/* Compare r2 with another register to trigger find_equal_scalars.\
* Having one random bit is important here, otherwise the verifier cuts\
* the corners. If the ID was mistakenly preserved on spill, this would\
* cause the verifier to think that r1 is also equal to zero in one of\
* the branches, and equal to eight on the other branch.\
*/ \
r3 = 0; \
if r2 != r3 goto l0_%=; \
l0_%=: r1 >>= 16; \
/* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
* read will happen, because it actually contains 0xffff.\
*/ \
r6 += r1; \
r0 = *(u32*)(r6 + 0); \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment