Commit 1b4ae19e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2023-03-23

We've added 8 non-merge commits during the last 13 day(s) which contain
a total of 21 files changed, 238 insertions(+), 161 deletions(-).

The main changes are:

1) Fix verification issues in some BPF programs due to their stack usage
   patterns, from Eduard Zingerman.

2) Fix to add missing overflow checks in xdp_umem_reg and return an error
   in such case, from Kal Conley.

3) Fix and undo poisoning of strlcpy in libbpf given it broke builds for
   libcs which provided the former like uClibc-ng, from Jesus Sanchez-Palencia.

4) Fix insufficient bpf_jit_limit default to avoid users running into hard
   to debug seccomp BPF errors, from Daniel Borkmann.

5) Fix driver return code when they don't support a bpf_xdp_metadata kfunc
   to make it unambiguous from other errors, from Jesper Dangaard Brouer.

6) Two BPF selftest fixes to address compilation errors from recent changes
   in kernel structures, from Alexei Starovoitov.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  xdp: bpf_xdp_metadata use EOPNOTSUPP for no driver support
  bpf: Adjust insufficient default bpf_jit_limit
  xsk: Add missing overflow check in xdp_umem_reg
  selftests/bpf: Fix progs/test_deny_namespace.c issues.
  selftests/bpf: Fix progs/find_vma_fail1.c build error.
  libbpf: Revert poisoning of strlcpy
  selftests/bpf: Tests for uninitialized stack reads
  bpf: Allow reads from uninit stack
====================

Link: https://lore.kernel.org/r/20230323225221.6082-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2e63a2df 915efd8a
......@@ -23,10 +23,13 @@ metadata is supported, this set will grow:
An XDP program can use these kfuncs to read the metadata into stack
variables for its own consumption. Or, to pass the metadata on to other
consumers, an XDP program can store it into the metadata area carried
ahead of the packet.
ahead of the packet. Not all packets will necessary have the requested
metadata available in which case the driver returns ``-ENODATA``.
Not all kfuncs have to be implemented by the device driver; when not
implemented, the default ones that return ``-EOPNOTSUPP`` will be used.
implemented, the default ones that return ``-EOPNOTSUPP`` will be used
to indicate the device driver have not implemented this kfunc.
Within an XDP frame, the metadata layout (accessed via ``xdp_buff``) is
as follows::
......
......@@ -674,7 +674,7 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
return -EOPNOTSUPP;
return -ENODATA;
*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
mlx4_en_get_cqe_ts(_ctx->cqe));
......@@ -686,7 +686,7 @@ int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
return -EOPNOTSUPP;
return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
return 0;
......
......@@ -162,7 +162,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
return -EOPNOTSUPP;
return -ENODATA;
*timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
_ctx->rq->clock, get_cqe_ts(_ctx->cqe));
......@@ -174,7 +174,7 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
return -EOPNOTSUPP;
return -ENODATA;
*hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
return 0;
......
......@@ -1642,7 +1642,7 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
return -EOPNOTSUPP;
return -ENODATA;
*timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
return 0;
......@@ -1653,7 +1653,7 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
struct veth_xdp_buff *_ctx = (void *)ctx;
if (!_ctx->skb)
return -EOPNOTSUPP;
return -ENODATA;
*hash = skb_get_hash(_ctx->skb);
return 0;
......
......@@ -972,7 +972,7 @@ static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
PAGE_SIZE), LONG_MAX);
return 0;
}
......
......@@ -3826,6 +3826,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_MISC)
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
......@@ -3863,6 +3865,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue;
if (type == STACK_ZERO)
continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
......@@ -5754,7 +5758,8 @@ static int check_stack_range_initialized(
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
if (*stype == STACK_ZERO) {
if ((*stype == STACK_ZERO) ||
(*stype == STACK_INVALID && env->allow_uninit_stack)) {
if (clobber) {
/* helper can write anything into the stack */
*stype = STACK_MISC;
......@@ -13936,6 +13941,10 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
if (env->allow_uninit_stack &&
old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
continue;
/* explored stack has more populated slots than current stack
* and these slots were used
*/
......
......@@ -720,7 +720,10 @@ __diag_ignore_all("-Wmissing-prototypes",
* @ctx: XDP context pointer.
* @timestamp: Return value pointer.
*
* Returns 0 on success or ``-errno`` on error.
* Return:
* * Returns 0 on success or ``-errno`` on error.
* * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
* * ``-ENODATA`` : means no RX-timestamp available for this frame
*/
__bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
......@@ -732,7 +735,10 @@ __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *tim
* @ctx: XDP context pointer.
* @hash: Return value pointer.
*
* Returns 0 on success or ``-errno`` on error.
* Return:
* * Returns 0 on success or ``-errno`` on error.
* * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
* * ``-ENODATA`` : means no RX-hash available for this frame
*/
__bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash)
{
......
......@@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
u64 npgs, addr = mr->addr, size = mr->len;
unsigned int chunks, chunks_rem;
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
u64 addr = mr->addr, size = mr->len;
u32 chunks_rem, npgs_rem;
u64 chunks, npgs;
int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
......@@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (npgs > U32_MAX)
return -EINVAL;
chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
if (chunks == 0)
chunks = div_u64_rem(size, chunk_size, &chunks_rem);
if (!chunks || chunks > U32_MAX)
return -EINVAL;
if (!unaligned_chunks && chunks_rem)
......@@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->headroom = headroom;
umem->chunk_size = chunk_size;
umem->chunks = chunks;
umem->npgs = (u32)npgs;
umem->npgs = npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
......
......@@ -20,8 +20,8 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
/* prevent accidental re-addition of reallocarray()/strlcpy() */
#pragma GCC poison reallocarray strlcpy
/* prevent accidental re-addition of reallocarray() */
#pragma GCC poison reallocarray
#include "libbpf.h"
#include "btf.h"
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "uninit_stack.skel.h"
void test_uninit_stack(void)
{
RUN_TESTS(uninit_stack);
}
......@@ -2,6 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#define vm_flags vm_start
char _license[] SEC("license") = "GPL";
......
......@@ -5,12 +5,10 @@
#include <errno.h>
#include <linux/capability.h>
struct kernel_cap_struct {
__u64 val;
} __attribute__((preserve_access_index));
typedef struct { unsigned long long val; } kernel_cap_t;
struct cred {
struct kernel_cap_struct cap_effective;
kernel_cap_t cap_effective;
} __attribute__((preserve_access_index));
char _license[] SEC("license") = "GPL";
......@@ -18,8 +16,8 @@ char _license[] SEC("license") = "GPL";
SEC("lsm.s/userns_create")
int BPF_PROG(test_userns_create, const struct cred *cred, int ret)
{
struct kernel_cap_struct caps = cred->cap_effective;
__u64 cap_mask = BIT_LL(CAP_SYS_ADMIN);
kernel_cap_t caps = cred->cap_effective;
__u64 cap_mask = 1ULL << CAP_SYS_ADMIN;
if (ret)
return 0;
......
......@@ -5,12 +5,12 @@
#include "bpf_misc.h"
struct Small {
int x;
long x;
};
struct Big {
int x;
int y;
long x;
long y;
};
__noinline int foo(const struct Big *big)
......@@ -22,7 +22,7 @@ __noinline int foo(const struct Big *big)
}
SEC("cgroup_skb/ingress")
__failure __msg("invalid indirect read from stack")
__failure __msg("invalid indirect access to stack")
int global_func10(struct __sk_buff *skb)
{
const struct Small small = {.x = skb->len };
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* Read an uninitialized value from stack at a fixed offset */
SEC("socket")
__naked int read_uninit_stack_fixed_off(void *ctx)
{
asm volatile (" \
r0 = 0; \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = *(u8 *)(r10 - 8 ); \
r0 += r1; \
r1 = *(u8 *)(r10 - 11); \
r1 = *(u8 *)(r10 - 13); \
r1 = *(u8 *)(r10 - 15); \
r1 = *(u16*)(r10 - 16); \
r1 = *(u32*)(r10 - 32); \
r1 = *(u64*)(r10 - 64); \
/* read from a spill of a wrong size, it is a separate \
* branch in check_stack_read_fixed_off() \
*/ \
*(u32*)(r10 - 72) = r1; \
r1 = *(u64*)(r10 - 72); \
r0 = 0; \
exit; \
"
::: __clobber_all);
}
/* Read an uninitialized value from stack at a variable offset */
SEC("socket")
__naked int read_uninit_stack_var_off(void *ctx)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
/* force stack depth to be 64 */ \
*(u64*)(r10 - 64) = r0; \
r0 = -r0; \
/* give r0 a range [-31, -1] */ \
if r0 s<= -32 goto exit_%=; \
if r0 s>= 0 goto exit_%=; \
/* access stack using r0 */ \
r1 = r10; \
r1 += r0; \
r2 = *(u8*)(r1 + 0); \
exit_%=: r0 = 0; \
exit; \
"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
static __noinline void dummy(void) {}
/* Pass a pointer to uninitialized stack memory to a helper.
* Passed memory block should be marked as STACK_MISC after helper call.
*/
SEC("socket")
__log_level(7) __msg("fp-104=mmmmmmmm")
__naked int helper_uninit_to_misc(void *ctx)
{
asm volatile (" \
/* force stack depth to be 128 */ \
*(u64*)(r10 - 128) = r1; \
r1 = r10; \
r1 += -128; \
r2 = 32; \
call %[bpf_trace_printk]; \
/* Call to dummy() forces print_verifier_state(..., true), \
* thus showing the stack state, matched by __msg(). \
*/ \
call %[dummy]; \
r0 = 0; \
exit; \
"
:
: __imm(bpf_trace_printk),
__imm(dummy)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
......@@ -2221,19 +2221,22 @@
* that fp-8 stack slot was unused in the fall-through
* branch and will accept the program incorrectly
*/
BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 2, 2),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 6 },
.errstr = "invalid indirect read from stack R2 off -8+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
.fixup_map_hash_48b = { 7 },
.errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"calls: ctx read at start of subprog",
......
......@@ -29,19 +29,30 @@
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
/* use bitwise AND to limit r3 range to [0, 64] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory at &fp[-64] is
* not initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 4 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
......@@ -183,20 +194,31 @@
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
/* use JMP to limit r3 range to [0, 64] */
BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory at &fp[-64] is
* not initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+0 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 4 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: stack, JMP (signed), no min check",
......@@ -564,29 +586,41 @@
{
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
/* set max stack size */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0),
/* set r3 to a random value */
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
/* Note: fp[-32] left uninitialized */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
/* Limit r3 range to [1, 64] */
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1),
BPF_MOV64_IMM(BPF_REG_4, 0),
/* Call bpf_ringbuf_output(), it is one of a few helper functions with
* ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.
* For unpriv this should signal an error, because memory region [1, 64]
* at &fp[-64] is not fully initialized.
*/
BPF_EMIT_CALL(BPF_FUNC_ringbuf_output),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R1 off -64+32 size 64",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_ringbuf = { 3 },
.errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"helper access to variable memory: 8 bytes no leak (init memory)",
......
......@@ -54,12 +54,13 @@
/* bpf_strtoul() */
BPF_EMIT_CALL(BPF_FUNC_strtoul),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL,
.errstr = "invalid indirect read from stack R4 off -16+4 size 8",
.result_unpriv = REJECT,
.errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8",
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"ARG_PTR_TO_LONG misaligned",
......
......@@ -128,9 +128,10 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.errstr = "invalid read from stack off -16+0 size 8",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr_unpriv = "invalid read from stack off -16+0 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"precision tracking for u32 spill/fill",
......@@ -258,6 +259,8 @@
BPF_EXIT_INSN(),
},
.flags = BPF_F_TEST_STATE_FREQ,
.errstr = "invalid read from stack off -8+1 size 8",
.result = REJECT,
.errstr_unpriv = "invalid read from stack off -8+1 size 8",
.result_unpriv = REJECT,
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
......@@ -530,33 +530,6 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"sk_storage_get(map, skb->sk, &stack_value, 1): partially init stack_value",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_4, 1),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_storage_get),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_sk_storage_map = { 14 },
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "invalid indirect read from stack",
},
{
"bpf_map_lookup_elem(smap, &key)",
.insns = {
......
......@@ -171,9 +171,10 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid read from stack off -4+0 size 4",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result_unpriv = REJECT,
.errstr_unpriv = "invalid read from stack off -4+0 size 4",
/* in privileged mode reads from uninitialized stack locations are permitted */
.result = ACCEPT,
},
{
"Spill a u32 const scalar. Refill as u16. Offset to skb->data",
......
......@@ -212,31 +212,6 @@
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access, max_off+size > max_initialized",
.insns = {
/* Fill only the second from top 8 bytes of the stack. */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
/* Get an unknown value. */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
/* Make it small and 4-byte aligned. */
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16),
/* Add it to fp. We now have either fp-12 or fp-16, but we don't know
* which. fp-12 size 8 is partially uninitialized stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
/* Dereference it indirectly. */
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 5 },
.errstr = "invalid indirect read from stack R2 var_off",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_LWT_IN,
},
{
"indirect variable-offset stack access, min_off < min_initialized",
.insns = {
......@@ -289,33 +264,6 @@
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"indirect variable-offset stack access, uninitialized",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_3, 28),
/* Fill the top 16 bytes of the stack. */
BPF_ST_MEM(BPF_W, BPF_REG_10, -16, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* Get an unknown value. */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, 0),
/* Make it small and 4-byte aligned. */
BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 4),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16),
/* Add it to fp. We now have either fp-12 or fp-16, we don't know
* which, but either way it points to initialized stack.
*/
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10),
BPF_MOV64_IMM(BPF_REG_5, 8),
/* Dereference it indirectly. */
BPF_EMIT_CALL(BPF_FUNC_getsockopt),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid indirect read from stack R4 var_off",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
},
{
"indirect variable-offset stack access, ok",
.insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment