Commit 240436c0 authored by David S. Miller's avatar David S. Miller

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
bpf-next-for-netdev
The following pull-request contains BPF updates for your *net-next* tree.

We've added 22 non-merge commits during the last 3 day(s) which contain
a total of 23 files changed, 652 insertions(+), 431 deletions(-).

The main changes are:

1) Add verifier support for annotating user's global BPF subprogram arguments
   with few commonly requested annotations for a better developer experience,
   from Andrii Nakryiko.

   These tags are:
     - Ability to annotate a special PTR_TO_CTX argument
     - Ability to annotate a generic PTR_TO_MEM as non-NULL

2) Support BPF verifier tracking of BPF_JNE which helps cases when the compiler
   transforms (unsigned) "a > 0" into "if a == 0 goto xxx" and the like, from
   Menglong Dong.

3) Fix a warning in bpf_mem_cache's check_obj_size() as reported by LKP, from Hou Tao.

4) Re-support uid/gid options when mounting bpffs which had to be reverted with
   the prior token series revert to avoid conflicts, from Daniel Borkmann.

5) Fix a libbpf NULL pointer dereference in bpf_object__collect_prog_relos() found
   from fuzzing the library with malformed ELF files, from Mingyi Zhang.

6) Skip DWARF sections in libbpf's linker sanity check given compiler options to
   generate compressed debug sections can trigger a rejection due to misalignment,
   from Alyssa Ross.

7) Fix an unnecessary use of the comma operator in BPF verifier, from Simon Horman.

8) Fix format specifier for unsigned long values in cpustat sample, from Colin Ian King.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cff9c565 5abde624
......@@ -2466,12 +2466,7 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf_func_model *m);
struct bpf_reg_state;
int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg, u32 *nargs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
......
......@@ -606,6 +606,13 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
#define BPF_MAX_SUBPROGS 256
struct bpf_subprog_arg_info {
enum bpf_arg_type arg_type;
union {
u32 mem_size;
};
};
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
......@@ -617,6 +624,10 @@ struct bpf_subprog_info {
bool is_cb: 1;
bool is_async_cb: 1;
bool is_exception_cb: 1;
bool args_cached: 1;
u8 arg_cnt;
struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
};
struct bpf_verifier_env;
......@@ -727,6 +738,16 @@ struct bpf_verifier_env {
char tmp_str_buf[TMP_STR_BUF_LEN];
};
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
{
return &env->prog->aux->func_info_aux[subprog];
}
static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
{
return &env->subprog_info[subprog];
}
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
......@@ -764,14 +785,6 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
int check_func_arg_reg_off(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
enum bpf_arg_type arg_type);
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size);
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
struct btf *btf, u32 btf_id)
......
This diff is collapsed.
......@@ -599,8 +599,15 @@ EXPORT_SYMBOL(bpf_prog_get_type_path);
*/
static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
struct inode *inode = d_inode(root);
umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
from_kuid_munged(&init_user_ns, inode->i_uid));
if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
from_kgid_munged(&init_user_ns, inode->i_gid));
if (mode != S_IRWXUGO)
seq_printf(m, ",mode=%o", mode);
return 0;
......@@ -625,15 +632,21 @@ static const struct super_operations bpf_super_ops = {
};
enum {
OPT_UID,
OPT_GID,
OPT_MODE,
};
static const struct fs_parameter_spec bpf_fs_parameters[] = {
fsparam_u32 ("uid", OPT_UID),
fsparam_u32 ("gid", OPT_GID),
fsparam_u32oct ("mode", OPT_MODE),
{}
};
struct bpf_mount_opts {
kuid_t uid;
kgid_t gid;
umode_t mode;
};
......@@ -641,6 +654,8 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct bpf_mount_opts *opts = fc->fs_private;
struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
int opt;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
......@@ -662,12 +677,42 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
}
switch (opt) {
case OPT_UID:
uid = make_kuid(current_user_ns(), result.uint_32);
if (!uid_valid(uid))
goto bad_value;
/*
* The requested uid must be representable in the
* filesystem's idmapping.
*/
if (!kuid_has_mapping(fc->user_ns, uid))
goto bad_value;
opts->uid = uid;
break;
case OPT_GID:
gid = make_kgid(current_user_ns(), result.uint_32);
if (!gid_valid(gid))
goto bad_value;
/*
* The requested gid must be representable in the
* filesystem's idmapping.
*/
if (!kgid_has_mapping(fc->user_ns, gid))
goto bad_value;
opts->gid = gid;
break;
case OPT_MODE:
opts->mode = result.uint_32 & S_IALLUGO;
break;
}
return 0;
bad_value:
return invalfc(fc, "Bad value for '%s'", param->key);
}
struct bpf_preload_ops *bpf_preload_ops;
......@@ -750,6 +795,8 @@ static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &bpf_super_ops;
inode = sb->s_root->d_inode;
inode->i_uid = opts->uid;
inode->i_gid = opts->gid;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
populate_bpffs(sb->s_root);
......@@ -785,6 +832,8 @@ static int bpf_init_fs_context(struct fs_context *fc)
return -ENOMEM;
opts->mode = S_IRWXUGO;
opts->uid = current_fsuid();
opts->gid = current_fsgid();
fc->fs_private = opts;
fc->ops = &bpf_context_ops;
......
......@@ -490,27 +490,6 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
}
static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
{
struct llist_node *first;
unsigned int obj_size;
first = c->free_llist.first;
if (!first)
return 0;
if (c->percpu_size)
obj_size = pcpu_alloc_size(((void **)first)[1]);
else
obj_size = ksize(first);
if (obj_size != c->unit_size) {
WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
idx, c->percpu_size, obj_size, c->unit_size);
return -EINVAL;
}
return 0;
}
/* When size != 0 bpf_mem_cache for each cpu.
* This is typical bpf hash map use case when all elements have equal size.
*
......@@ -521,10 +500,10 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
/* room for llist_node and per-cpu pointer */
if (percpu)
......@@ -560,7 +539,6 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
......@@ -574,28 +552,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->tgt = c;
init_refill_work(c);
/* Another bpf_mem_cache will be used when allocating
* c->unit_size in bpf_mem_alloc(), so doesn't prefill
* for the bpf_mem_cache because these free objects will
* never be used.
*/
if (i != bpf_mem_cache_idx(c->unit_size))
continue;
prefill_mem_cache(c, cpu);
err = check_obj_size(c, i);
if (err)
goto out;
}
}
out:
ma->caches = pcc;
/* refill_work is either zeroed or initialized, so it is safe to
* call irq_work_sync().
*/
if (err)
bpf_mem_alloc_destroy(ma);
return err;
return 0;
}
static void drain_mem_cache(struct bpf_mem_cache *c)
......@@ -869,7 +831,7 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
void *ret;
if (!size)
return ZERO_SIZE_PTR;
return NULL;
idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
if (idx < 0)
......@@ -879,26 +841,17 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
static notrace int bpf_mem_free_idx(void *ptr, bool percpu)
{
size_t size;
if (percpu)
size = pcpu_alloc_size(*((void **)ptr));
else
size = ksize(ptr - LLIST_NODE_SZ);
return bpf_mem_cache_idx(size);
}
void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
{
struct bpf_mem_cache *c;
int idx;
if (!ptr)
return;
idx = bpf_mem_free_idx(ptr, ma->percpu);
if (idx < 0)
c = *(void **)(ptr - LLIST_NODE_SZ);
idx = bpf_mem_cache_idx(c->unit_size);
if (WARN_ON_ONCE(idx < 0))
return;
unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
......@@ -906,13 +859,15 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
struct bpf_mem_cache *c;
int idx;
if (!ptr)
return;
idx = bpf_mem_free_idx(ptr, ma->percpu);
if (idx < 0)
c = *(void **)(ptr - LLIST_NODE_SZ);
idx = bpf_mem_cache_idx(c->unit_size);
if (WARN_ON_ONCE(idx < 0))
return;
unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
......@@ -986,41 +941,3 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
* actual size of dynamic per-cpu area will always be matched and there is
* no need to adjust size_index for per-cpu allocation. However for the
* simplicity of the implementation, use an unified size_index for both
* kmalloc and per-cpu allocation.
*/
static __init int bpf_mem_cache_adjust_size(void)
{
unsigned int size;
/* Adjusting the indexes in size_index() according to the object_size
* of underlying slab cache, so bpf_mem_alloc() will select a
* bpf_mem_cache with unit_size equal to the object_size of
* the underlying slab cache.
*
* The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
* 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
*/
for (size = 192; size >= 8; size -= 8) {
unsigned int kmalloc_size, index;
kmalloc_size = kmalloc_size_roundup(size);
if (kmalloc_size == size)
continue;
if (kmalloc_size <= 192)
index = size_index[(kmalloc_size - 1) / 8];
else
index = fls(kmalloc_size - 1) - 1;
/* Only overwrite if necessary */
if (size_index[(size - 1) / 8] != index)
size_index[(size - 1) / 8] = index;
}
return 0;
}
subsys_initcall(bpf_mem_cache_adjust_size);
This diff is collapsed.
......@@ -66,10 +66,10 @@ static void cpu_stat_print(void)
printf("CPU-%-6d ", j);
for (i = 0; i < MAX_CSTATE_ENTRIES; i++)
printf("%-11ld ", data->cstate[i] / 1000000);
printf("%-11lu ", data->cstate[i] / 1000000);
for (i = 0; i < MAX_PSTATE_ENTRIES; i++)
printf("%-11ld ", data->pstate[i] / 1000000);
printf("%-11lu ", data->pstate[i] / 1000000);
printf("\n");
}
......
......@@ -188,6 +188,9 @@ enum libbpf_tristate {
!!sym; \
})
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
#endif
......
......@@ -4355,6 +4355,8 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
scn = elf_sec_by_idx(obj, sec_idx);
scn_data = elf_sec_data(obj, scn);
if (!scn_data)
return -LIBBPF_ERRNO__FORMAT;
relo_sec_name = elf_sec_str(obj, shdr->sh_name);
sec_name = elf_sec_name(obj, scn);
......
......@@ -719,6 +719,9 @@ static int linker_sanity_check_elf(struct src_obj *obj)
return -EINVAL;
}
if (is_dwarf_sec_name(sec->sec_name))
continue;
if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) {
pr_warn("ELF section #%zu alignment %llu is non pow-of-2 alignment in %s\n",
sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
......
......@@ -335,6 +335,7 @@ static void htab_mem_report_final(struct bench_res res[], int res_cnt)
" peak memory usage %7.2lfMiB\n",
loop_mean, loop_stddev, mem_mean, mem_stddev, peak_mem / 1048576.0);
close(ctx.fd);
cleanup_cgroup_environment();
}
......
......@@ -348,7 +348,8 @@ static void test_func_sockmap_update(void)
}
static void test_obj_load_failure_common(const char *obj_file,
const char *target_obj_file)
const char *target_obj_file,
const char *exp_msg)
{
/*
* standalone test that asserts failure to load freplace prog
......@@ -356,6 +357,7 @@ static void test_obj_load_failure_common(const char *obj_file,
*/
struct bpf_object *obj = NULL, *pkt_obj;
struct bpf_program *prog;
char log_buf[64 * 1024];
int err, pkt_fd;
__u32 duration = 0;
......@@ -374,11 +376,21 @@ static void test_obj_load_failure_common(const char *obj_file,
err = bpf_program__set_attach_target(prog, pkt_fd, NULL);
ASSERT_OK(err, "set_attach_target");
log_buf[0] = '\0';
if (exp_msg)
bpf_program__set_log_buf(prog, log_buf, sizeof(log_buf));
if (env.verbosity > VERBOSE_NONE)
bpf_program__set_log_level(prog, 2);
/* It should fail to load the program */
err = bpf_object__load(obj);
if (env.verbosity > VERBOSE_NONE && exp_msg) /* we overtook log */
printf("VERIFIER LOG:\n================\n%s\n================\n", log_buf);
if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
goto close_prog;
if (exp_msg)
ASSERT_HAS_SUBSTR(log_buf, exp_msg, "fail_msg");
close_prog:
bpf_object__close(obj);
bpf_object__close(pkt_obj);
......@@ -388,14 +400,24 @@ static void test_func_replace_return_code(void)
{
/* test invalid return code in the replaced program */
test_obj_load_failure_common("./freplace_connect_v4_prog.bpf.o",
"./connect4_prog.bpf.o");
"./connect4_prog.bpf.o", NULL);
}
static void test_func_map_prog_compatibility(void)
{
/* test with spin lock map value in the replaced program */
test_obj_load_failure_common("./freplace_attach_probe.bpf.o",
"./test_attach_probe.bpf.o");
"./test_attach_probe.bpf.o", NULL);
}
static void test_func_replace_unreliable(void)
{
/* freplace'ing unreliable main prog should fail with error
* "Cannot replace static functions"
*/
test_obj_load_failure_common("freplace_unreliable_prog.bpf.o",
"./verifier_btf_unreliable_prog.bpf.o",
"Cannot replace static functions");
}
static void test_func_replace_global_func(void)
......@@ -563,6 +585,8 @@ void serial_test_fexit_bpf2bpf(void)
test_func_replace_return_code();
if (test__start_subtest("func_map_prog_compatibility"))
test_func_map_prog_compatibility();
if (test__start_subtest("func_replace_unreliable"))
test_func_replace_unreliable();
if (test__start_subtest("func_replace_multi"))
test_func_replace_multi();
if (test__start_subtest("fmod_ret_freplace"))
......
......@@ -169,9 +169,9 @@ void test_log_fixup(void)
if (test__start_subtest("bad_core_relo_trunc_none"))
bad_core_relo(0, TRUNC_NONE /* full buf */);
if (test__start_subtest("bad_core_relo_trunc_partial"))
bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */);
if (test__start_subtest("bad_core_relo_trunc_full"))
bad_core_relo(210, TRUNC_FULL /* truncate also libbpf's message patch */);
bad_core_relo(220, TRUNC_FULL /* truncate also libbpf's message patch */);
if (test__start_subtest("bad_core_relo_subprog"))
bad_core_relo_subprog();
if (test__start_subtest("missing_map"))
......
......@@ -590,12 +590,7 @@ static void range_cond(enum num_t t, struct range x, struct range y,
*newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
break;
case OP_NE:
/* generic case, can't derive more information */
*newx = range(t, x.a, x.b);
*newy = range(t, y.a, y.b);
break;
/* below extended logic is not supported by verifier just yet */
/* below logic is supported by the verifier now */
if (x.a == x.b && x.a == y.a) {
/* X is a constant matching left side of Y */
*newx = range(t, x.a, x.b);
......@@ -2097,10 +2092,22 @@ static struct subtest_case crafted_cases[] = {
{U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
{S32, U64, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)-255, 0}},
{S32, S64, {(u32)(s32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
{S32, S64, {0, 1}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
{S32, U32, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}, {(u32)(s32)S32_MIN, (u32)(s32)S32_MIN}},
{S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
{S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
{S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
{S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
/* edge overlap testings for BPF_NE */
{U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
{U64, U64, {0, U64_MAX}, {0, 0}},
{S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
{S64, U64, {S64_MIN, 0}, {0, 0}},
{S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
{U32, U32, {0, U32_MAX}, {0, 0}},
{U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
{S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
{S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
{S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
};
/* Go over crafted hard-coded cases. This is fast, so we do it as part of
......
......@@ -14,6 +14,7 @@
#include "verifier_bpf_get_stack.skel.h"
#include "verifier_bswap.skel.h"
#include "verifier_btf_ctx_access.skel.h"
#include "verifier_btf_unreliable_prog.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
#include "verifier_cgroup_skb.skel.h"
......@@ -125,6 +126,7 @@ void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_u
void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); }
void test_verifier_bswap(void) { RUN(verifier_bswap); }
void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); }
void test_verifier_btf_unreliable_prog(void) { RUN(verifier_btf_unreliable_prog); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
......
......@@ -78,7 +78,7 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
}
SEC("kretprobe/cgroup_destroy_locked")
__failure __msg("reg type unsupported for arg#0 function")
__failure __msg("calling kernel function bpf_cgroup_acquire is not allowed")
int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
{
struct cgroup *acquired;
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
SEC("freplace/btf_unreliable_kprobe")
/* context type is what BPF verifier expects for kprobe context, but target
* program has `stuct whatever *ctx` argument, so freplace operation will be
* rejected with the following message:
*
* arg0 replace_btf_unreliable_kprobe(struct pt_regs *) doesn't match btf_unreliable_kprobe(struct whatever *)
*/
int replace_btf_unreliable_kprobe(bpf_user_pt_regs_t *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -248,7 +248,7 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl
}
SEC("lsm/task_free")
__failure __msg("reg type unsupported for arg#0 function")
__failure __msg("R1 must be a rcu pointer")
int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task)
{
struct task_struct *acquired;
......
......@@ -17,7 +17,7 @@ struct generic_map_value {
char _license[] SEC("license") = "GPL";
const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
int err = 0;
......@@ -166,7 +166,7 @@ static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int
batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
} while (0)
DEFINE_ARRAY_WITH_KPTR(8);
/* kptr doesn't support bin_data_8 which is a zero-sized array */
DEFINE_ARRAY_WITH_KPTR(16);
DEFINE_ARRAY_WITH_KPTR(32);
DEFINE_ARRAY_WITH_KPTR(64);
......@@ -198,21 +198,20 @@ int test_batch_alloc_free(void *ctx)
if ((u32)bpf_get_current_pid_tgid() != pid)
return 0;
/* Alloc 128 8-bytes objects in batch to trigger refilling,
* then free 128 8-bytes objects in batch to trigger freeing.
/* Alloc 128 16-bytes objects in batch to trigger refilling,
* then free 128 16-bytes objects in batch to trigger freeing.
*/
CALL_BATCH_ALLOC_FREE(8, 128, 0);
CALL_BATCH_ALLOC_FREE(16, 128, 1);
CALL_BATCH_ALLOC_FREE(32, 128, 2);
CALL_BATCH_ALLOC_FREE(64, 128, 3);
CALL_BATCH_ALLOC_FREE(96, 128, 4);
CALL_BATCH_ALLOC_FREE(128, 128, 5);
CALL_BATCH_ALLOC_FREE(192, 128, 6);
CALL_BATCH_ALLOC_FREE(256, 128, 7);
CALL_BATCH_ALLOC_FREE(512, 64, 8);
CALL_BATCH_ALLOC_FREE(1024, 32, 9);
CALL_BATCH_ALLOC_FREE(2048, 16, 10);
CALL_BATCH_ALLOC_FREE(4096, 8, 11);
CALL_BATCH_ALLOC_FREE(16, 128, 0);
CALL_BATCH_ALLOC_FREE(32, 128, 1);
CALL_BATCH_ALLOC_FREE(64, 128, 2);
CALL_BATCH_ALLOC_FREE(96, 128, 3);
CALL_BATCH_ALLOC_FREE(128, 128, 4);
CALL_BATCH_ALLOC_FREE(192, 128, 5);
CALL_BATCH_ALLOC_FREE(256, 128, 6);
CALL_BATCH_ALLOC_FREE(512, 64, 7);
CALL_BATCH_ALLOC_FREE(1024, 32, 8);
CALL_BATCH_ALLOC_FREE(2048, 16, 9);
CALL_BATCH_ALLOC_FREE(4096, 8, 10);
return 0;
}
......@@ -223,21 +222,20 @@ int test_free_through_map_free(void *ctx)
if ((u32)bpf_get_current_pid_tgid() != pid)
return 0;
/* Alloc 128 8-bytes objects in batch to trigger refilling,
/* Alloc 128 16-bytes objects in batch to trigger refilling,
* then free these objects through map free.
*/
CALL_BATCH_ALLOC(8, 128, 0);
CALL_BATCH_ALLOC(16, 128, 1);
CALL_BATCH_ALLOC(32, 128, 2);
CALL_BATCH_ALLOC(64, 128, 3);
CALL_BATCH_ALLOC(96, 128, 4);
CALL_BATCH_ALLOC(128, 128, 5);
CALL_BATCH_ALLOC(192, 128, 6);
CALL_BATCH_ALLOC(256, 128, 7);
CALL_BATCH_ALLOC(512, 64, 8);
CALL_BATCH_ALLOC(1024, 32, 9);
CALL_BATCH_ALLOC(2048, 16, 10);
CALL_BATCH_ALLOC(4096, 8, 11);
CALL_BATCH_ALLOC(16, 128, 0);
CALL_BATCH_ALLOC(32, 128, 1);
CALL_BATCH_ALLOC(64, 128, 2);
CALL_BATCH_ALLOC(96, 128, 3);
CALL_BATCH_ALLOC(128, 128, 4);
CALL_BATCH_ALLOC(192, 128, 5);
CALL_BATCH_ALLOC(256, 128, 6);
CALL_BATCH_ALLOC(512, 64, 7);
CALL_BATCH_ALLOC(1024, 32, 8);
CALL_BATCH_ALLOC(2048, 16, 9);
CALL_BATCH_ALLOC(4096, 8, 10);
return 0;
}
......@@ -251,17 +249,17 @@ int test_batch_percpu_alloc_free(void *ctx)
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
* then free 128 16-bytes per-cpu objects in batch to trigger freeing.
*/
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0);
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1);
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2);
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3);
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4);
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5);
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6);
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7);
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8);
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9);
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10);
return 0;
}
......@@ -275,17 +273,17 @@ int test_percpu_free_through_map_free(void *ctx)
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
* then free these object through map free.
*/
CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
CALL_BATCH_PERCPU_ALLOC(16, 128, 0);
CALL_BATCH_PERCPU_ALLOC(32, 128, 1);
CALL_BATCH_PERCPU_ALLOC(64, 128, 2);
CALL_BATCH_PERCPU_ALLOC(96, 128, 3);
CALL_BATCH_PERCPU_ALLOC(128, 128, 4);
CALL_BATCH_PERCPU_ALLOC(192, 128, 5);
CALL_BATCH_PERCPU_ALLOC(256, 128, 6);
CALL_BATCH_PERCPU_ALLOC(512, 64, 7);
CALL_BATCH_PERCPU_ALLOC(1024, 32, 8);
CALL_BATCH_PERCPU_ALLOC(2048, 16, 9);
CALL_BATCH_PERCPU_ALLOC(4096, 8, 10);
return 0;
}
......@@ -26,7 +26,7 @@ int f3(int val, struct __sk_buff *skb)
}
SEC("tc")
__failure __msg("expected pointer to ctx, but got PTR")
__failure __msg("expects pointer to ctx")
int global_func5(struct __sk_buff *skb)
{
return f1(skb) + f2(2, skb) + f3(3, skb);
......
......@@ -1075,4 +1075,66 @@ l0_%=: r0 = 0; \
: __clobber_all);
}
SEC("tc")
__description("bounds check with JMP_NE for reg edge")
__success __retval(0)
__naked void reg_not_equal_const(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
call %[bpf_get_prandom_u32]; \
r4 = r0; \
r4 &= 7; \
if r4 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: r1 = r6; \
r2 = 0; \
r3 = r10; \
r3 += -8; \
r5 = 0; \
/* The 4th argument of bpf_skb_store_bytes is defined as \
* ARG_CONST_SIZE, so 0 is not allowed. The 'r4 != 0' \
* is providing us this exclusion of zero from initial \
* [0, 7] range. \
*/ \
call %[bpf_skb_store_bytes]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}
SEC("tc")
__description("bounds check with JMP_EQ for reg edge")
__success __retval(0)
__naked void reg_equal_const(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
call %[bpf_get_prandom_u32]; \
r4 = r0; \
r4 &= 7; \
if r4 == 0 goto l0_%=; \
r1 = r6; \
r2 = 0; \
r3 = r10; \
r3 += -8; \
r5 = 0; \
/* Just the same as what we do in reg_not_equal_const() */ \
call %[bpf_skb_store_bytes]; \
l0_%=: r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32),
__imm(bpf_skb_store_bytes)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
struct whatever {};
SEC("kprobe")
__success __log_level(2)
/* context type is wrong, making it impossible to freplace this program */
int btf_unreliable_kprobe(struct whatever *ctx)
{
return 0;
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <stdbool.h>
#include <errno.h>
#include <string.h>
#include <linux/bpf.h>
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "xdp_metadata.h"
#include "bpf_kfuncs.h"
int arr[1];
int unkn_idx;
......@@ -98,4 +97,96 @@ int unguarded_unsupp_global_called(void)
return global_unsupp(&x);
}
long stack[128];
__weak int subprog_nullable_ptr_bad(int *p)
{
return (*p) * 2; /* bad, missing null check */
}
SEC("?raw_tp")
__failure __log_level(2)
__msg("invalid mem access 'mem_or_null'")
int arg_tag_nullable_ptr_fail(void *ctx)
{
int x = 42;
return subprog_nullable_ptr_bad(&x);
}
__noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull)
{
return (*p1) * (*p2); /* good, no need for NULL checks */
}
int x = 47;
SEC("?raw_tp")
__success __log_level(2)
int arg_tag_nonnull_ptr_good(void *ctx)
{
int y = 74;
return subprog_nonnull_ptr_good(&x, &y);
}
/* this global subprog can be now called from many types of entry progs, each
* with different context type
*/
__weak int subprog_ctx_tag(void *ctx __arg_ctx)
{
return bpf_get_stack(ctx, stack, sizeof(stack), 0);
}
SEC("?raw_tp")
__success __log_level(2)
int arg_tag_ctx_raw_tp(void *ctx)
{
return subprog_ctx_tag(ctx);
}
SEC("?tp")
__success __log_level(2)
int arg_tag_ctx_tp(void *ctx)
{
return subprog_ctx_tag(ctx);
}
SEC("?kprobe")
__success __log_level(2)
int arg_tag_ctx_kprobe(void *ctx)
{
return subprog_ctx_tag(ctx);
}
__weak int subprog_dynptr(struct bpf_dynptr *dptr)
{
long *d, t, buf[1] = {};
d = bpf_dynptr_data(dptr, 0, sizeof(long));
if (!d)
return 0;
t = *d + 1;
d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long));
if (!d)
return t;
t = *d + 2;
return t;
}
SEC("?xdp")
__success __log_level(2)
int arg_tag_dynptr(struct xdp_md *ctx)
{
struct bpf_dynptr dptr;
bpf_dynptr_from_xdp(ctx, 0, &dptr);
return subprog_dynptr(&dptr);
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment