Commit 94079b64 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-bounded-loops'

Alexei Starovoitov says:

====================
v2->v3: fixed issues in backtracking pointed out by Andrii.
The next step is to add a lot more tests for backtracking.

v1->v2: addressed Andrii's feedback.

this patch set introduces verifier support for bounded loops and
adds several other improvements.
Ideally they would be introduced one at a time,
but to support bounded loop the verifier needs to 'step back'
in the patch 1. That patch introduces tracking of spill/fill
of constants through the stack. Though it's a useful feature
it hurts cilium tests.
Patch 3 introduces another feature by extending is_branch_taken
logic to 'if rX op rY' conditions. This feature is also
necessary to support bounded loops.
Then patch 4 adds support for the loops while adding
key heuristics with jmp_processed.
Introduction of parentage chain of verifier states in patch 4
allows patch 9 to add backtracking of precise scalar registers
which finally resolves degradation from patch 1.

The end result is much faster verifier for existing programs
and new support for loops.
See patch 8 for many kinds of loops that are now validated.
Patch 9 is the most tricky one and could be rewritten with
a different algorithm in the future.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents a324aae3 b5dc0163
......@@ -139,6 +139,8 @@ struct bpf_reg_state {
*/
s32 subreg_def;
enum bpf_reg_liveness live;
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
bool precise;
};
enum bpf_stack_slot_type {
......@@ -190,14 +192,77 @@ struct bpf_func_state {
struct bpf_stack_state *stack;
};
struct bpf_idx_pair {
u32 prev_idx;
u32 idx;
};
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
* were safely pruned
* 1 - at least one path is being explored.
* This state hasn't reached bpf_exit
* 2 - at least two paths are being explored.
* This state is an immediate parent of two children.
* One is fallthrough branch with branches==1 and another
* state is pushed into stack (to be explored later) also with
* branches==1. The parent of this state has branches==1.
* The verifier state tree connected via 'parent' pointer looks like:
* 1
* 1
* 2 -> 1 (first 'if' pushed into stack)
* 1
* 2 -> 1 (second 'if' pushed into stack)
* 1
* 1
* 1 bpf_exit.
*
* Once do_check() reaches bpf_exit, it calls update_branch_counts()
* and the verifier state tree will look:
* 1
* 1
* 2 -> 1 (first 'if' pushed into stack)
* 1
* 1 -> 1 (second 'if' pushed into stack)
* 0
* 0
* 0 bpf_exit.
* After pop_stack() the do_check() will resume at second 'if'.
*
* If is_state_visited() sees a state with branches > 0 it means
* there is a loop. If such state is exactly equal to the current state
* it's an infinite loop. Note states_equal() checks for states
* equvalency, so two states being 'states_equal' does not mean
* infinite loop. The exact comparison is provided by
* states_maybe_looping() function. It's a stronger pre-check and
* much faster than states_equal().
*
* This algorithm may not find all possible infinite loops or
* loop iteration count may be too high.
* In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
*/
u32 branches;
u32 insn_idx;
u32 curframe;
u32 active_spin_lock;
bool speculative;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
struct bpf_idx_pair *jmp_history;
u32 jmp_history_cnt;
};
#define bpf_get_spilled_reg(slot, frame) \
......@@ -312,7 +377,9 @@ struct bpf_verifier_env {
} cfg;
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 insn_processed;
u32 prev_insn_processed, insn_processed;
/* number of jmps, calls, exits analyzed so far */
u32 prev_jmps_processed, jmps_processed;
/* total verification time */
u64 verification_time;
/* maximum number of verifier states kept in 'branching' instructions */
......
This diff is collapsed.
......@@ -5,7 +5,7 @@ static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level != LIBBPF_DEBUG)
return 0;
return vfprintf(stderr, format, args);
if (!strstr(format, "verifier log"))
return 0;
......@@ -32,24 +32,69 @@ static int check_load(const char *file, enum bpf_prog_type type)
void test_bpf_verif_scale(void)
{
const char *scale[] = {
"./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o"
const char *sched_cls[] = {
"./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o",
};
const char *pyperf[] = {
"./pyperf50.o", "./pyperf100.o", "./pyperf180.o"
const char *raw_tp[] = {
/* full unroll by llvm */
"./pyperf50.o", "./pyperf100.o", "./pyperf180.o",
/* partial unroll. llvm will unroll loop ~150 times.
* C loop count -> 600.
* Asm loop count -> 4.
* 16k insns in loop body.
* Total of 5 such loops. Total program size ~82k insns.
*/
"./pyperf600.o",
/* no unroll at all.
* C loop count -> 600.
* ASM loop count -> 600.
* ~110 insns in loop body.
* Total of 5 such loops. Total program size ~1500 insns.
*/
"./pyperf600_nounroll.o",
"./loop1.o", "./loop2.o",
/* partial unroll. 19k insn in a loop.
* Total program size 20.8k insn.
* ~350k processed_insns
*/
"./strobemeta.o",
/* no unroll, tiny loops */
"./strobemeta_nounroll1.o",
"./strobemeta_nounroll2.o",
};
const char *cg_sysctl[] = {
"./test_sysctl_loop1.o", "./test_sysctl_loop2.o",
};
int err, i;
if (verifier_stats)
libbpf_set_print(libbpf_debug_print);
for (i = 0; i < ARRAY_SIZE(scale); i++) {
err = check_load(scale[i], BPF_PROG_TYPE_SCHED_CLS);
printf("test_scale:%s:%s\n", scale[i], err ? "FAIL" : "OK");
err = check_load("./loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT);
printf("test_scale:loop3:%s\n", err ? (error_cnt--, "OK") : "FAIL");
for (i = 0; i < ARRAY_SIZE(sched_cls); i++) {
err = check_load(sched_cls[i], BPF_PROG_TYPE_SCHED_CLS);
printf("test_scale:%s:%s\n", sched_cls[i], err ? "FAIL" : "OK");
}
for (i = 0; i < ARRAY_SIZE(pyperf); i++) {
err = check_load(pyperf[i], BPF_PROG_TYPE_RAW_TRACEPOINT);
printf("test_scale:%s:%s\n", pyperf[i], err ? "FAIL" : "OK");
for (i = 0; i < ARRAY_SIZE(raw_tp); i++) {
err = check_load(raw_tp[i], BPF_PROG_TYPE_RAW_TRACEPOINT);
printf("test_scale:%s:%s\n", raw_tp[i], err ? "FAIL" : "OK");
}
for (i = 0; i < ARRAY_SIZE(cg_sysctl); i++) {
err = check_load(cg_sysctl[i], BPF_PROG_TYPE_CGROUP_SYSCTL);
printf("test_scale:%s:%s\n", cg_sysctl[i], err ? "FAIL" : "OK");
}
err = check_load("./test_xdp_loop.o", BPF_PROG_TYPE_XDP);
printf("test_scale:test_xdp_loop:%s\n", err ? "FAIL" : "OK");
err = check_load("./test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL);
printf("test_scale:test_seg6_loop:%s\n", err ? "FAIL" : "OK");
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/kfree_skb")
int nested_loops(volatile struct pt_regs* ctx)
{
int i, j, sum = 0, m;
for (j = 0; j < 300; j++)
for (i = 0; i < j; i++) {
if (j & 1)
m = ctx->rax;
else
m = j;
sum += i * m;
}
return sum;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(volatile struct pt_regs* ctx)
{
int i = 0;
while (true) {
if (ctx->rax & 1)
i += 3;
else
i += 7;
if (i > 40)
break;
}
return i;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
char _license[] SEC("license") = "GPL";
SEC("raw_tracepoint/consume_skb")
int while_true(volatile struct pt_regs* ctx)
{
__u64 i = 0, sum = 0;
do {
i++;
sum += ctx->rax;
} while (i < 0x100000000ULL);
return sum;
}
......@@ -220,7 +220,11 @@ static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs *
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
return 0;
#pragma unroll
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
#else
#pragma clang loop unroll(full)
#endif
/* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
/* clang will not unroll the loop 600 times.
* Instead it will unroll it to the amount it deemed
* appropriate, but the loop will still execute 600 times.
* Total program size is around 90k insns
*/
#include "pyperf.h"
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
#define NO_UNROLL
/* clang will not unroll at all.
* Total program size is around 2k insns
*/
#include "pyperf.h"
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 100
#define STROBE_MAX_MAP_ENTRIES 20
/* full unroll by llvm #undef NO_UNROLL */
#include "strobemeta.h"
This diff is collapsed.
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 13
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
// Copyright (c) 2019 Facebook
#define STROBE_MAX_INTS 2
#define STROBE_MAX_STRS 25
#define STROBE_MAX_MAPS 30
#define STROBE_MAX_MAP_ENTRIES 20
#define NO_UNROLL
#include "strobemeta.h"
#include <stddef.h>
#include <inttypes.h>
#include <errno.h>
#include <linux/seg6_local.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
#define SR6_FLAG_ALERT (1 << 4)
#define htonll(x) ((bpf_htonl(1)) == 1 ? (x) : ((uint64_t)bpf_htonl((x) & \
0xFFFFFFFF) << 32) | bpf_htonl((x) >> 32))
#define ntohll(x) ((bpf_ntohl(1)) == 1 ? (x) : ((uint64_t)bpf_ntohl((x) & \
0xFFFFFFFF) << 32) | bpf_ntohl((x) >> 32))
#define BPF_PACKET_HEADER __attribute__((packed))
struct ip6_t {
unsigned int ver:4;
unsigned int priority:8;
unsigned int flow_label:20;
unsigned short payload_len;
unsigned char next_header;
unsigned char hop_limit;
unsigned long long src_hi;
unsigned long long src_lo;
unsigned long long dst_hi;
unsigned long long dst_lo;
} BPF_PACKET_HEADER;
struct ip6_addr_t {
unsigned long long hi;
unsigned long long lo;
} BPF_PACKET_HEADER;
struct ip6_srh_t {
unsigned char nexthdr;
unsigned char hdrlen;
unsigned char type;
unsigned char segments_left;
unsigned char first_segment;
unsigned char flags;
unsigned short tag;
struct ip6_addr_t segments[0];
} BPF_PACKET_HEADER;
struct sr6_tlv_t {
unsigned char type;
unsigned char len;
unsigned char value[0];
} BPF_PACKET_HEADER;
static __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
struct ip6_t *ip;
uint8_t *ipver;
data_end = (void *)(long)skb->data_end;
cursor = (void *)(long)skb->data;
ipver = (uint8_t *)cursor;
if ((void *)ipver + sizeof(*ipver) > data_end)
return NULL;
if ((*ipver >> 4) != 6)
return NULL;
ip = cursor_advance(cursor, sizeof(*ip));
if ((void *)ip + sizeof(*ip) > data_end)
return NULL;
if (ip->next_header != 43)
return NULL;
srh = cursor_advance(cursor, sizeof(*srh));
if ((void *)srh + sizeof(*srh) > data_end)
return NULL;
if (srh->type != 4)
return NULL;
return srh;
}
static __attribute__((always_inline))
int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
uint32_t old_pad, uint32_t pad_off)
{
int err;
if (new_pad != old_pad) {
err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
(int) new_pad - (int) old_pad);
if (err)
return err;
}
if (new_pad > 0) {
char pad_tlv_buf[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0};
struct sr6_tlv_t *pad_tlv = (struct sr6_tlv_t *) pad_tlv_buf;
pad_tlv->type = SR6_TLV_PADDING;
pad_tlv->len = new_pad - 2;
err = bpf_lwt_seg6_store_bytes(skb, pad_off,
(void *)pad_tlv_buf, new_pad);
if (err)
return err;
}
return 0;
}
static __attribute__((always_inline))
int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t *tlv_off, uint32_t *pad_size,
uint32_t *pad_off)
{
uint32_t srh_off, cur_off;
int offset_valid = 0;
int err;
srh_off = (char *)srh - (char *)(long)skb->data;
// cur_off = end of segments, start of possible TLVs
cur_off = srh_off + sizeof(*srh) +
sizeof(struct ip6_addr_t) * (srh->first_segment + 1);
*pad_off = 0;
// we can only go as far as ~10 TLVs due to the BPF max stack size
#pragma clang loop unroll(disable)
for (int i = 0; i < 100; i++) {
struct sr6_tlv_t tlv;
if (cur_off == *tlv_off)
offset_valid = 1;
if (cur_off >= srh_off + ((srh->hdrlen + 1) << 3))
break;
err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
if (err)
return err;
if (tlv.type == SR6_TLV_PADDING) {
*pad_size = tlv.len + sizeof(tlv);
*pad_off = cur_off;
if (*tlv_off == srh_off) {
*tlv_off = cur_off;
offset_valid = 1;
}
break;
} else if (tlv.type == SR6_TLV_HMAC) {
break;
}
cur_off += sizeof(tlv) + tlv.len;
} // we reached the padding or HMAC TLVs, or the end of the SRH
if (*pad_off == 0)
*pad_off = cur_off;
if (*tlv_off == -1)
*tlv_off = cur_off;
else if (!offset_valid)
return -EINVAL;
return 0;
}
static __attribute__((always_inline))
int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
uint8_t len_remaining, new_pad;
uint32_t pad_off = 0;
uint32_t pad_size = 0;
uint32_t partial_srh_len;
int err;
if (tlv_off != -1)
tlv_off += srh_off;
if (itlv->type == SR6_TLV_PADDING || itlv->type == SR6_TLV_HMAC)
return -EINVAL;
err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
if (err)
return err;
err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
if (err)
return err;
err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
if (err)
return err;
// the following can't be moved inside update_tlv_pad because the
// bpf verifier has some issues with it
pad_off += sizeof(*itlv) + itlv->len;
partial_srh_len = pad_off - srh_off;
len_remaining = partial_srh_len % 8;
new_pad = 8 - len_remaining;
if (new_pad == 1) // cannot pad for 1 byte only
new_pad = 9;
else if (new_pad == 8)
new_pad = 0;
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
// Add an Egress TLV fc00::4, add the flag A,
// and apply End.X action to fc42::1
SEC("lwt_seg6local")
int __add_egr_x(struct __sk_buff *skb)
{
unsigned long long hi = 0xfc42000000000000;
unsigned long long lo = 0x1;
struct ip6_srh_t *srh = get_srh(skb);
uint8_t new_flags = SR6_FLAG_ALERT;
struct ip6_addr_t addr;
int err, offset;
if (srh == NULL)
return BPF_DROP;
uint8_t tlv[20] = {2, 18, 0, 0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4};
err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
(struct sr6_tlv_t *)&tlv, 20);
if (err)
return BPF_DROP;
offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags);
err = bpf_lwt_seg6_store_bytes(skb, offset,
(void *)&new_flags, sizeof(new_flags));
if (err)
return BPF_DROP;
addr.lo = htonll(lo);
addr.hi = htonll(hi);
err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
(void *)&addr, sizeof(addr));
if (err)
return BPF_DROP;
return BPF_REDIRECT;
}
char __license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stdint.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
unsigned char i;
char name[64];
int ret;
memset(name, 0, sizeof(name));
ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
return 1;
}
SEC("cgroup/sysctl")
int sysctl_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
int ret;
if (ctx->write)
return 0;
if (!is_tcp_mem(ctx))
return 0;
ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
return 0;
off += ret & MAX_ULONG_STR_LEN;
}
return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stdint.h>
#include <string.h>
#include <linux/stddef.h>
#include <linux/bpf.h>
#include "bpf_helpers.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
{
volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
unsigned char i;
char name[64];
int ret;
memset(name, 0, sizeof(name));
ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(tcp_mem_name); ++i)
if (name[i] != tcp_mem_name[i])
return 0;
return 1;
}
SEC("cgroup/sysctl")
int sysctl_tcp_mem(struct bpf_sysctl *ctx)
{
unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
char value[MAX_VALUE_STR_LEN];
unsigned char i, off = 0;
int ret;
if (ctx->write)
return 0;
if (!is_tcp_mem(ctx))
return 0;
ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
return 0;
#pragma clang loop unroll(disable)
for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
tcp_mem + i);
if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
return 0;
off += ret & MAX_ULONG_STR_LEN;
}
return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
}
char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <stddef.h>
#include <string.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/pkt_cls.h>
#include <sys/socket.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
#include "test_iptunnel_common.h"
int _version SEC("version") = 1;
struct bpf_map_def SEC("maps") rxcnt = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 256,
};
struct bpf_map_def SEC("maps") vip2tnl = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct iptnl_info),
.max_entries = MAX_IPTNL_ENTRIES,
};
static __always_inline void count_tx(__u32 protocol)
{
__u64 *rxcnt_count;
rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
if (rxcnt_count)
*rxcnt_count += 1;
}
static __always_inline int get_dport(void *trans_data, void *data_end,
__u8 protocol)
{
struct tcphdr *th;
struct udphdr *uh;
switch (protocol) {
case IPPROTO_TCP:
th = (struct tcphdr *)trans_data;
if (th + 1 > data_end)
return -1;
return th->dest;
case IPPROTO_UDP:
uh = (struct udphdr *)trans_data;
if (uh + 1 > data_end)
return -1;
return uh->dest;
default:
return 0;
}
}
static __always_inline void set_ethhdr(struct ethhdr *new_eth,
const struct ethhdr *old_eth,
const struct iptnl_info *tnl,
__be16 h_proto)
{
memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
new_eth->h_proto = h_proto;
}
static __always_inline int handle_ipv4(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct iphdr *iph = data + sizeof(struct ethhdr);
__u16 *next_iph;
__u16 payload_len;
struct vip vip = {};
int dport;
__u32 csum = 0;
int i;
if (iph + 1 > data_end)
return XDP_DROP;
dport = get_dport(iph + 1, data_end, iph->protocol);
if (dport == -1)
return XDP_DROP;
vip.protocol = iph->protocol;
vip.family = AF_INET;
vip.daddr.v4 = iph->daddr;
vip.dport = dport;
payload_len = bpf_ntohs(iph->tot_len);
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v4-in-v4 */
if (!tnl || tnl->family != AF_INET)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
iph = data + sizeof(*new_eth);
old_eth = data + sizeof(*iph);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end ||
iph + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
iph->version = 4;
iph->ihl = sizeof(*iph) >> 2;
iph->frag_off = 0;
iph->protocol = IPPROTO_IPIP;
iph->check = 0;
iph->tos = 0;
iph->tot_len = bpf_htons(payload_len + sizeof(*iph));
iph->daddr = tnl->daddr.v4;
iph->saddr = tnl->saddr.v4;
iph->ttl = 8;
next_iph = (__u16 *)iph;
#pragma clang loop unroll(disable)
for (i = 0; i < sizeof(*iph) >> 1; i++)
csum += *next_iph++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
count_tx(vip.protocol);
return XDP_TX;
}
static __always_inline int handle_ipv6(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct iptnl_info *tnl;
struct ethhdr *new_eth;
struct ethhdr *old_eth;
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
__u16 payload_len;
struct vip vip = {};
int dport;
if (ip6h + 1 > data_end)
return XDP_DROP;
dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
if (dport == -1)
return XDP_DROP;
vip.protocol = ip6h->nexthdr;
vip.family = AF_INET6;
memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
vip.dport = dport;
payload_len = ip6h->payload_len;
tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
/* It only does v6-in-v6 */
if (!tnl || tnl->family != AF_INET6)
return XDP_PASS;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
return XDP_DROP;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
ip6h = data + sizeof(*new_eth);
old_eth = data + sizeof(*ip6h);
if (new_eth + 1 > data_end || old_eth + 1 > data_end ||
ip6h + 1 > data_end)
return XDP_DROP;
set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
ip6h->version = 6;
ip6h->priority = 0;
memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h));
ip6h->nexthdr = IPPROTO_IPV6;
ip6h->hop_limit = 8;
memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
count_tx(vip.protocol);
return XDP_TX;
}
SEC("xdp_tx_iptunnel")
int _xdp_tx_iptunnel(struct xdp_md *xdp)
{
void *data_end = (void *)(long)xdp->data_end;
void *data = (void *)(long)xdp->data;
struct ethhdr *eth = data;
__u16 h_proto;
if (eth + 1 > data_end)
return XDP_DROP;
h_proto = eth->h_proto;
if (h_proto == bpf_htons(ETH_P_IP))
return handle_ipv4(xdp);
else if (h_proto == bpf_htons(ETH_P_IPV6))
return handle_ipv6(xdp);
else
return XDP_DROP;
}
char _license[] SEC("license") = "GPL";
......@@ -237,10 +237,10 @@ static void bpf_fill_scale1(struct bpf_test *self)
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % 64 + 1));
}
/* every jump adds 1 step to insn_processed, so to stay exactly
* within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
/* is_state_visited() doesn't allocate state for pruning for every jump.
* Hence multiply jmps by 4 to accommodate that heuristic
*/
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
......@@ -269,10 +269,7 @@ static void bpf_fill_scale2(struct bpf_test *self)
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % (64 - 4 * FUNC_NEST) + 1));
}
/* every jump adds 1 step to insn_processed, so to stay exactly
* within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
*/
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
......
......@@ -215,9 +215,11 @@
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.errstr_unpriv = "back-edge from insn",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 1,
},
{
"calls: conditional call 4",
......@@ -250,22 +252,24 @@
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"calls: conditional call 6",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.errstr = "back-edge from insn",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "infinite loop detected",
.result = REJECT,
},
{
......
......@@ -41,7 +41,8 @@
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.errstr = "unreachable insn 1",
.errstr_unpriv = "back-edge",
.result = REJECT,
},
{
......@@ -53,18 +54,20 @@
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.errstr = "unreachable insn 4",
.errstr_unpriv = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.errstr = "infinite loop detected",
.errstr_unpriv = "back-edge",
.result = REJECT,
},
......@@ -511,7 +511,8 @@
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
......
......@@ -29,9 +29,9 @@
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
......@@ -46,9 +46,9 @@
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
......@@ -122,9 +122,9 @@
{
"helper access to variable memory: stack, JMP, bounds + offset",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
......@@ -143,9 +143,9 @@
{
"helper access to variable memory: stack, JMP, wrong max",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
......@@ -163,9 +163,9 @@
{
"helper access to variable memory: stack, JMP, no max check",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_MOV64_IMM(BPF_REG_4, 0),
......@@ -183,9 +183,9 @@
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
......@@ -201,9 +201,9 @@
{
"helper access to variable memory: stack, JMP (signed), no min check",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
......@@ -244,6 +244,7 @@
{
"helper access to variable memory: map, JMP, wrong max",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
......@@ -251,7 +252,7 @@
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
......@@ -262,7 +263,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.fixup_map_hash_48b = { 4 },
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
......@@ -296,6 +297,7 @@
{
"helper access to variable memory: map adjusted, JMP, wrong max",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
......@@ -304,7 +306,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
......@@ -315,7 +317,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.fixup_map_hash_48b = { 4 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
......@@ -337,8 +339,8 @@
{
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
......@@ -562,6 +564,7 @@
{
"helper access to variable memory: 8 bytes leak",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
......@@ -572,7 +575,6 @@
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
......
{
"bounded loop, count to 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.retval = 4,
},
{
"bounded loop, count to 20",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bounded loop, count from positive unknown to 4",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.retval = 4,
},
{
"bounded loop, count from totally unknown to 4",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bounded loop, count to 4 with equality",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bounded loop, start in the middle",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "back-edge",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.retval = 4,
},
{
"bounded loop containing a forward jump",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.retval = 4,
},
{
"bounded loop that jumps out rather than in",
.insns = {
BPF_MOV64_IMM(BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_A(-4),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"infinite loop after a conditional jump",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 5),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_JMP_A(-2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "program is too large",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bounded recursion",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "back-edge",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"infinite loop in two jumps",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_A(0),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "loop detected",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"infinite loop: three-jump trick",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "loop detected",
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment