Commit 9a5a908c authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2020-06-02

The following pull-request contains BPF _fixes-only_ for your *net-next*
tree.

We've added 10 non-merge commits during the last 1 day(s) which contain
a total of 15 files changed, 229 insertions(+), 74 deletions(-).

The main changes are:

1) Several fixes to s390 BPF JIT e.g. fixing kernel panic when BPF stack is
   not 8-byte aligned, from Ilya Leoshkevich.

2) Fix bpf_skb_adjust_room() helper's CHECKSUM_UNNECESSARY handling which
   was wrongly bypassing TCP checksum verification, from Daniel Borkmann.

3) Fix tools/bpf/ build under MAKEFLAGS=rR which causes built-in CXX and
   others vars to be undefined, also from Ilya Leoshkevich.

4) Fix BPF ringbuf's selftest shared sample_cnt variable to avoid compiler
   optimizations on it, from Andrii Nakryiko.

5) Fix up test_verifier selftest due to addition of rx_queue_mapping to
   the bpf_sock structure, from Alexei Starovoitov.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a25c1df e7ad28e6
...@@ -503,7 +503,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) ...@@ -503,7 +503,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
} else { } else {
/* j tail_call_start: NOP if no tail calls are used */ /* j tail_call_start: NOP if no tail calls are used */
EMIT4_PCREL(0xa7f40000, 6); EMIT4_PCREL(0xa7f40000, 6);
_EMIT2(0); /* bcr 0,%0 */
EMIT2(0x0700, 0, REG_0);
} }
/* Tail calls have to skip above initialization */ /* Tail calls have to skip above initialization */
jit->tail_call_start = jit->prg; jit->tail_call_start = jit->prg;
...@@ -594,7 +595,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) ...@@ -594,7 +595,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
* stack space for the large switch statement. * stack space for the large switch statement.
*/ */
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
int i, bool extra_pass) int i, bool extra_pass, u32 stack_depth)
{ {
struct bpf_insn *insn = &fp->insnsi[i]; struct bpf_insn *insn = &fp->insnsi[i];
u32 dst_reg = insn->dst_reg; u32 dst_reg = insn->dst_reg;
...@@ -1207,7 +1208,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, ...@@ -1207,7 +1208,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/ */
if (jit->seen & SEEN_STACK) if (jit->seen & SEEN_STACK)
off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth; off = STK_OFF_TCCNT + STK_OFF + stack_depth;
else else
off = STK_OFF_TCCNT; off = STK_OFF_TCCNT;
/* lhi %w0,1 */ /* lhi %w0,1 */
...@@ -1249,7 +1250,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, ...@@ -1249,7 +1250,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* /*
* Restore registers before calling function * Restore registers before calling function
*/ */
save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth); save_restore_regs(jit, REGS_RESTORE, stack_depth);
/* /*
* goto *(prog->bpf_func + tail_call_start); * goto *(prog->bpf_func + tail_call_start);
...@@ -1519,7 +1520,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i) ...@@ -1519,7 +1520,7 @@ static int bpf_set_addr(struct bpf_jit *jit, int i)
* Compile eBPF program into s390x code * Compile eBPF program into s390x code
*/ */
static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass) bool extra_pass, u32 stack_depth)
{ {
int i, insn_count, lit32_size, lit64_size; int i, insn_count, lit32_size, lit64_size;
...@@ -1527,18 +1528,18 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, ...@@ -1527,18 +1528,18 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
jit->lit64 = jit->lit64_start; jit->lit64 = jit->lit64_start;
jit->prg = 0; jit->prg = 0;
bpf_jit_prologue(jit, fp->aux->stack_depth); bpf_jit_prologue(jit, stack_depth);
if (bpf_set_addr(jit, 0) < 0) if (bpf_set_addr(jit, 0) < 0)
return -1; return -1;
for (i = 0; i < fp->len; i += insn_count) { for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i, extra_pass); insn_count = bpf_jit_insn(jit, fp, i, extra_pass, stack_depth);
if (insn_count < 0) if (insn_count < 0)
return -1; return -1;
/* Next instruction address */ /* Next instruction address */
if (bpf_set_addr(jit, i + insn_count) < 0) if (bpf_set_addr(jit, i + insn_count) < 0)
return -1; return -1;
} }
bpf_jit_epilogue(jit, fp->aux->stack_depth); bpf_jit_epilogue(jit, stack_depth);
lit32_size = jit->lit32 - jit->lit32_start; lit32_size = jit->lit32 - jit->lit32_start;
lit64_size = jit->lit64 - jit->lit64_start; lit64_size = jit->lit64 - jit->lit64_start;
...@@ -1569,6 +1570,7 @@ struct s390_jit_data { ...@@ -1569,6 +1570,7 @@ struct s390_jit_data {
*/ */
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{ {
u32 stack_depth = round_up(fp->aux->stack_depth, 8);
struct bpf_prog *tmp, *orig_fp = fp; struct bpf_prog *tmp, *orig_fp = fp;
struct bpf_binary_header *header; struct bpf_binary_header *header;
struct s390_jit_data *jit_data; struct s390_jit_data *jit_data;
...@@ -1621,7 +1623,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1621,7 +1623,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
* - 3: Calculate program size and addrs arrray * - 3: Calculate program size and addrs arrray
*/ */
for (pass = 1; pass <= 3; pass++) { for (pass = 1; pass <= 3; pass++) {
if (bpf_jit_prog(&jit, fp, extra_pass)) { if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
fp = orig_fp; fp = orig_fp;
goto free_addrs; goto free_addrs;
} }
...@@ -1635,7 +1637,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) ...@@ -1635,7 +1637,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs; goto free_addrs;
} }
skip_init_ctx: skip_init_ctx:
if (bpf_jit_prog(&jit, fp, extra_pass)) { if (bpf_jit_prog(&jit, fp, extra_pass, stack_depth)) {
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
fp = orig_fp; fp = orig_fp;
goto free_addrs; goto free_addrs;
......
...@@ -3919,6 +3919,14 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) ...@@ -3919,6 +3919,14 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
} }
} }
static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
skb->ip_summed = CHECKSUM_NONE;
skb->csum_level = 0;
}
}
/* Check if we need to perform checksum complete validation. /* Check if we need to perform checksum complete validation.
* *
* Returns true if checksum complete is needed, false otherwise * Returns true if checksum complete is needed, false otherwise
......
...@@ -1635,6 +1635,13 @@ union bpf_attr { ...@@ -1635,6 +1635,13 @@ union bpf_attr {
* Grow or shrink the room for data in the packet associated to * Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*. * *skb* by *len_diff*, and according to the selected *mode*.
* *
* By default, the helper will reset any offloaded checksum
* indicator of the skb to CHECKSUM_NONE. This can be avoided
* by the following flag:
*
* * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
* checksum data of the skb to CHECKSUM_NONE.
*
* There are two supported modes at this time: * There are two supported modes at this time:
* *
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
...@@ -3213,6 +3220,38 @@ union bpf_attr { ...@@ -3213,6 +3220,38 @@ union bpf_attr {
* calculation. * calculation.
* Return * Return
* Requested value, or 0, if flags are not recognized. * Requested value, or 0, if flags are not recognized.
*
* int bpf_csum_level(struct sk_buff *skb, u64 level)
* Description
* Change the skbs checksum level by one layer up or down, or
* reset it entirely to none in order to have the stack perform
* checksum validation. The level is applicable to the following
* protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
* | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
* through **bpf_skb_adjust_room**\ () helper with passing in
* **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
* to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
* the UDP header is removed. Similarly, an encap of the latter
* into the former could be accompanied by a helper call to
* **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
* skb is still intended to be processed in higher layers of the
* stack instead of just egressing at tc.
*
* There are three supported level settings at this time:
*
* * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
* with CHECKSUM_UNNECESSARY.
* * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
* with CHECKSUM_UNNECESSARY.
* * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
* sets CHECKSUM_NONE to force checksum validation by the stack.
* * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
* skb->csum_level.
* Return
* 0 on success, or a negative error in case of failure. In the
* case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
* is returned or the error code -EACCES in case the skb is not
* subject to CHECKSUM_UNNECESSARY.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -3349,7 +3388,8 @@ union bpf_attr { ...@@ -3349,7 +3388,8 @@ union bpf_attr {
FN(ringbuf_reserve), \ FN(ringbuf_reserve), \
FN(ringbuf_submit), \ FN(ringbuf_submit), \
FN(ringbuf_discard), \ FN(ringbuf_discard), \
FN(ringbuf_query), FN(ringbuf_query), \
FN(csum_level),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -3426,6 +3466,14 @@ enum { ...@@ -3426,6 +3466,14 @@ enum {
BPF_F_CURRENT_NETNS = (-1L), BPF_F_CURRENT_NETNS = (-1L),
}; };
/* BPF_FUNC_csum_level level values. */
enum {
BPF_CSUM_LEVEL_QUERY,
BPF_CSUM_LEVEL_INC,
BPF_CSUM_LEVEL_DEC,
BPF_CSUM_LEVEL_RESET,
};
/* BPF_FUNC_skb_adjust_room flags. */ /* BPF_FUNC_skb_adjust_room flags. */
enum { enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
...@@ -3433,6 +3481,7 @@ enum { ...@@ -3433,6 +3481,7 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
}; };
enum { enum {
......
...@@ -2015,6 +2015,40 @@ static const struct bpf_func_proto bpf_csum_update_proto = { ...@@ -2015,6 +2015,40 @@ static const struct bpf_func_proto bpf_csum_update_proto = {
.arg2_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING,
}; };
BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
{
/* The interface is to be used in combination with bpf_skb_adjust_room()
* for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET
* is passed as flags, for example.
*/
switch (level) {
case BPF_CSUM_LEVEL_INC:
__skb_incr_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_DEC:
__skb_decr_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_RESET:
__skb_reset_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_QUERY:
return skb->ip_summed == CHECKSUM_UNNECESSARY ?
skb->csum_level : -EACCES;
default:
return -EINVAL;
}
return 0;
}
static const struct bpf_func_proto bpf_csum_level_proto = {
.func = bpf_csum_level,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{ {
return dev_forward_skb(dev, skb); return dev_forward_skb(dev, skb);
...@@ -3113,7 +3147,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, ...@@ -3113,7 +3147,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
{ {
int ret; int ret;
if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO) if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL; return -EINVAL;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
...@@ -3163,7 +3198,8 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, ...@@ -3163,7 +3198,8 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32 off; u32 off;
int ret; int ret;
if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK)) if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL; return -EINVAL;
if (unlikely(len_diff_abs > 0xfffU)) if (unlikely(len_diff_abs > 0xfffU))
return -EFAULT; return -EFAULT;
...@@ -3191,6 +3227,8 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, ...@@ -3191,6 +3227,8 @@ BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
bpf_skb_net_grow(skb, off, len_diff_abs, flags); bpf_skb_net_grow(skb, off, len_diff_abs, flags);
if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
__skb_reset_checksum_unnecessary(skb);
bpf_compute_data_pointers(skb); bpf_compute_data_pointers(skb);
return ret; return ret;
...@@ -6276,6 +6314,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6276,6 +6314,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_csum_diff_proto; return &bpf_csum_diff_proto;
case BPF_FUNC_csum_update: case BPF_FUNC_csum_update:
return &bpf_csum_update_proto; return &bpf_csum_update_proto;
case BPF_FUNC_csum_level:
return &bpf_csum_level_proto;
case BPF_FUNC_l3_csum_replace: case BPF_FUNC_l3_csum_replace:
return &bpf_l3_csum_replace_proto; return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace: case BPF_FUNC_l4_csum_replace:
...@@ -6609,6 +6649,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -6609,6 +6649,8 @@ lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_store_bytes_proto; return &bpf_skb_store_bytes_proto;
case BPF_FUNC_csum_update: case BPF_FUNC_csum_update:
return &bpf_csum_update_proto; return &bpf_csum_update_proto;
case BPF_FUNC_csum_level:
return &bpf_csum_level_proto;
case BPF_FUNC_l3_csum_replace: case BPF_FUNC_l3_csum_replace:
return &bpf_l3_csum_replace_proto; return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace: case BPF_FUNC_l4_csum_replace:
......
...@@ -64,12 +64,12 @@ $(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l ...@@ -64,12 +64,12 @@ $(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l
$(QUIET_FLEX)$(LEX) -o $@ $< $(QUIET_FLEX)$(LEX) -o $@ $<
$(OUTPUT)%.o: $(srctree)/tools/bpf/%.c $(OUTPUT)%.o: $(srctree)/tools/bpf/%.c
$(QUIET_CC)$(COMPILE.c) -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
$(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c $(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c
$(QUIET_CC)$(COMPILE.c) -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
$(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c $(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c
$(QUIET_CC)$(COMPILE.c) -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm
......
...@@ -126,7 +126,7 @@ else ...@@ -126,7 +126,7 @@ else
endif endif
$(OUTPUT)_prog.o: prog.c $(OUTPUT)_prog.o: prog.c
$(QUIET_CC)$(COMPILE.c) -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF) $(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
...@@ -141,10 +141,10 @@ profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o ...@@ -141,10 +141,10 @@ profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
$(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@ $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
$(OUTPUT)prog.o: prog.c profiler.skel.h $(OUTPUT)prog.o: prog.c profiler.skel.h
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
$(OUTPUT)feature.o: | zdep $(OUTPUT)feature.o: | zdep
...@@ -152,7 +152,7 @@ $(OUTPUT)bpftool: $(__OBJS) $(LIBBPF) ...@@ -152,7 +152,7 @@ $(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
$(OUTPUT)%.o: %.c $(OUTPUT)%.o: %.c
$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
clean: $(LIBBPF)-clean clean: $(LIBBPF)-clean
$(call QUIET_CLEAN, bpftool) $(call QUIET_CLEAN, bpftool)
......
...@@ -1635,6 +1635,13 @@ union bpf_attr { ...@@ -1635,6 +1635,13 @@ union bpf_attr {
* Grow or shrink the room for data in the packet associated to * Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*. * *skb* by *len_diff*, and according to the selected *mode*.
* *
* By default, the helper will reset any offloaded checksum
* indicator of the skb to CHECKSUM_NONE. This can be avoided
* by the following flag:
*
* * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
* checksum data of the skb to CHECKSUM_NONE.
*
* There are two supported modes at this time: * There are two supported modes at this time:
* *
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
...@@ -3213,6 +3220,38 @@ union bpf_attr { ...@@ -3213,6 +3220,38 @@ union bpf_attr {
* calculation. * calculation.
* Return * Return
* Requested value, or 0, if flags are not recognized. * Requested value, or 0, if flags are not recognized.
*
* int bpf_csum_level(struct sk_buff *skb, u64 level)
* Description
* Change the skbs checksum level by one layer up or down, or
* reset it entirely to none in order to have the stack perform
* checksum validation. The level is applicable to the following
* protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
* | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
* through **bpf_skb_adjust_room**\ () helper with passing in
* **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
* to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
* the UDP header is removed. Similarly, an encap of the latter
* into the former could be accompanied by a helper call to
* **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
* skb is still intended to be processed in higher layers of the
* stack instead of just egressing at tc.
*
* There are three supported level settings at this time:
*
* * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
* with CHECKSUM_UNNECESSARY.
* * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
* with CHECKSUM_UNNECESSARY.
* * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
* sets CHECKSUM_NONE to force checksum validation by the stack.
* * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
* skb->csum_level.
* Return
* 0 on success, or a negative error in case of failure. In the
* case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
* is returned or the error code -EACCES in case the skb is not
* subject to CHECKSUM_UNNECESSARY.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -3349,7 +3388,8 @@ union bpf_attr { ...@@ -3349,7 +3388,8 @@ union bpf_attr {
FN(ringbuf_reserve), \ FN(ringbuf_reserve), \
FN(ringbuf_submit), \ FN(ringbuf_submit), \
FN(ringbuf_discard), \ FN(ringbuf_discard), \
FN(ringbuf_query), FN(ringbuf_query), \
FN(csum_level),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
...@@ -3426,6 +3466,14 @@ enum { ...@@ -3426,6 +3466,14 @@ enum {
BPF_F_CURRENT_NETNS = (-1L), BPF_F_CURRENT_NETNS = (-1L),
}; };
/* BPF_FUNC_csum_level level values. */
enum {
BPF_CSUM_LEVEL_QUERY,
BPF_CSUM_LEVEL_INC,
BPF_CSUM_LEVEL_DEC,
BPF_CSUM_LEVEL_RESET,
};
/* BPF_FUNC_skb_adjust_room flags. */ /* BPF_FUNC_skb_adjust_room flags. */
enum { enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
...@@ -3433,6 +3481,7 @@ enum { ...@@ -3433,6 +3481,7 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
}; };
enum { enum {
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
include ../../../../scripts/Kbuild.include include ../../../../scripts/Kbuild.include
include ../../../scripts/Makefile.arch include ../../../scripts/Makefile.arch
CXX ?= $(CROSS_COMPILE)g++
CURDIR := $(abspath .) CURDIR := $(abspath .)
TOOLSDIR := $(abspath ../../..) TOOLSDIR := $(abspath ../../..)
LIBDIR := $(TOOLSDIR)/lib LIBDIR := $(TOOLSDIR)/lib
......
...@@ -25,7 +25,7 @@ struct sample { ...@@ -25,7 +25,7 @@ struct sample {
char comm[16]; char comm[16];
}; };
static int sample_cnt; static volatile int sample_cnt;
static int process_sample(void *ctx, void *data, size_t len) static int process_sample(void *ctx, void *data, size_t len)
{ {
......
...@@ -380,9 +380,10 @@ static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap) ...@@ -380,9 +380,10 @@ static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
} }
if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC, if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC,
BPF_F_ADJ_ROOM_FIXED_GSO)) { BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC))
return TC_ACT_SHOT; return TC_ACT_SHOT;
}
return bpf_redirect(skb->ifindex, BPF_F_INGRESS); return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
} }
...@@ -472,7 +473,9 @@ static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap, ...@@ -472,7 +473,9 @@ static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
} }
if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET, if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET,
BPF_F_ADJ_ROOM_FIXED_GSO)) { BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_NO_CSUM_RESET) ||
bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) {
metrics->errors_total_encap_adjust_failed++; metrics->errors_total_encap_adjust_failed++;
return TC_ACT_SHOT; return TC_ACT_SHOT;
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
BPF_MOV64_IMM(BPF_REG_2, 34), BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT, .result = ACCEPT,
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
BPF_MOV64_IMM(BPF_REG_2, 34), BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid stack type R1 off=-48 access_size=58", .errstr = "invalid stack type R1 off=-48 access_size=58",
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
BPF_MOV64_IMM(BPF_REG_4, 13), BPF_MOV64_IMM(BPF_REG_4, 13),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT, .result = ACCEPT,
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
BPF_MOV64_IMM(BPF_REG_4, 24), BPF_MOV64_IMM(BPF_REG_4, 24),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid stack type R1 off=-48 access_size=58", .errstr = "invalid stack type R1 off=-48 access_size=58",
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid indirect read from stack off -64+0 size 64", .errstr = "invalid indirect read from stack off -64+0 size 64",
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -84,7 +84,7 @@ ...@@ -84,7 +84,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -112,7 +112,7 @@ ...@@ -112,7 +112,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -132,7 +132,7 @@ ...@@ -132,7 +132,7 @@
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -152,7 +152,7 @@ ...@@ -152,7 +152,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -171,7 +171,7 @@ ...@@ -171,7 +171,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -190,7 +190,7 @@ ...@@ -190,7 +190,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -208,7 +208,7 @@ ...@@ -208,7 +208,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -233,7 +233,7 @@ ...@@ -233,7 +233,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -259,7 +259,7 @@ ...@@ -259,7 +259,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -286,7 +286,7 @@ ...@@ -286,7 +286,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -313,7 +313,7 @@ ...@@ -313,7 +313,7 @@
BPF_MOV64_IMM(BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -468,7 +468,7 @@ ...@@ -468,7 +468,7 @@
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "R1 type=inv expected=fp", .errstr = "R1 type=inv expected=fp",
...@@ -481,7 +481,7 @@ ...@@ -481,7 +481,7 @@
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 1), BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "R1 type=inv expected=fp", .errstr = "R1 type=inv expected=fp",
...@@ -495,7 +495,7 @@ ...@@ -495,7 +495,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT, .result = ACCEPT,
...@@ -513,7 +513,7 @@ ...@@ -513,7 +513,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
...@@ -534,7 +534,7 @@ ...@@ -534,7 +534,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
...@@ -554,7 +554,7 @@ ...@@ -554,7 +554,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_8b = { 3 }, .fixup_map_hash_8b = { 3 },
...@@ -580,7 +580,7 @@ ...@@ -580,7 +580,7 @@
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -607,7 +607,7 @@ ...@@ -607,7 +607,7 @@
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 8), BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -87,7 +87,7 @@ ...@@ -87,7 +87,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, -8), BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -109,7 +109,7 @@ ...@@ -109,7 +109,7 @@
BPF_MOV64_IMM(BPF_REG_2, BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) - offsetof(struct test_val, foo)), sizeof(struct test_val) - offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -129,7 +129,7 @@ ...@@ -129,7 +129,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, 8), BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -170,7 +170,7 @@ ...@@ -170,7 +170,7 @@
BPF_MOV64_IMM(BPF_REG_2, BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), sizeof(struct test_val) - offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -191,7 +191,7 @@ ...@@ -191,7 +191,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -8), BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -212,7 +212,7 @@ ...@@ -212,7 +212,7 @@
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -235,7 +235,7 @@ ...@@ -235,7 +235,7 @@
BPF_MOV64_IMM(BPF_REG_2, BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) - offsetof(struct test_val, foo)), sizeof(struct test_val) - offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -256,7 +256,7 @@ ...@@ -256,7 +256,7 @@
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8), BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -300,7 +300,7 @@ ...@@ -300,7 +300,7 @@
sizeof(struct test_val) - sizeof(struct test_val) -
offsetof(struct test_val, foo) + 8), offsetof(struct test_val, foo) + 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -322,7 +322,7 @@ ...@@ -322,7 +322,7 @@
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -8), BPF_MOV64_IMM(BPF_REG_2, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -344,7 +344,7 @@ ...@@ -344,7 +344,7 @@
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, -1), BPF_MOV64_IMM(BPF_REG_2, -1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -368,7 +368,7 @@ ...@@ -368,7 +368,7 @@
BPF_MOV64_IMM(BPF_REG_2, BPF_MOV64_IMM(BPF_REG_2,
sizeof(struct test_val) - offsetof(struct test_val, foo)), sizeof(struct test_val) - offsetof(struct test_val, foo)),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -390,7 +390,7 @@ ...@@ -390,7 +390,7 @@
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 8), BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -433,7 +433,7 @@ ...@@ -433,7 +433,7 @@
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 1), BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
...@@ -458,7 +458,7 @@ ...@@ -458,7 +458,7 @@
sizeof(struct test_val) - sizeof(struct test_val) -
offsetof(struct test_val, foo) + 1), offsetof(struct test_val, foo) + 1),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_hash_48b = { 3 }, .fixup_map_hash_48b = { 3 },
......
...@@ -31,14 +31,14 @@ ...@@ -31,14 +31,14 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
.fixup_map_array_48b = { 1 }, .fixup_map_array_48b = { 1 },
.result = VERBOSE_ACCEPT, .result = VERBOSE_ACCEPT,
.errstr = .errstr =
"26: (85) call bpf_probe_read#4\ "26: (85) call bpf_probe_read_kernel#113\
last_idx 26 first_idx 20\ last_idx 26 first_idx 20\
regs=4 stack=0 before 25\ regs=4 stack=0 before 25\
regs=4 stack=0 before 24\ regs=4 stack=0 before 24\
...@@ -91,7 +91,7 @@ ...@@ -91,7 +91,7 @@
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0), BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.prog_type = BPF_PROG_TYPE_TRACEPOINT, .prog_type = BPF_PROG_TYPE_TRACEPOINT,
...@@ -99,7 +99,7 @@ ...@@ -99,7 +99,7 @@
.result = VERBOSE_ACCEPT, .result = VERBOSE_ACCEPT,
.flags = BPF_F_TEST_STATE_FREQ, .flags = BPF_F_TEST_STATE_FREQ,
.errstr = .errstr =
"26: (85) call bpf_probe_read#4\ "26: (85) call bpf_probe_read_kernel#113\
last_idx 26 first_idx 22\ last_idx 26 first_idx 22\
regs=4 stack=0 before 25\ regs=4 stack=0 before 25\
regs=4 stack=0 before 24\ regs=4 stack=0 before 24\
......
...@@ -222,7 +222,7 @@ ...@@ -222,7 +222,7 @@
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, state)), BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, rx_queue_mapping)),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment