Commit c3ad3eca authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2019-03-09

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix a crash in AF_XDP's xsk_diag_put_ring() which was passing
   wrong queue argument, from Eric.

2) Fix a regression due to wrong test for TCP GSO packets used in
   various BPF helpers like NAT64, from Willem.

3) Fix a sk_msg strparser warning which asserts that strparser must
   be stopped first, from Jakub.

4) Fix rejection of invalid options/bind flags in AF_XDP, from Björn.

5) Fix GSO in bpf_lwt_push_ip_encap() which must properly set inner
   headers and inner protocol, from Peter.

6) Fix a libbpf leak when kernel does not support BTF, from Nikita.

7) Various BPF selftest and libbpf build fixes to make out-of-tree
   compilation work and to properly resolve dependencies via fixdep
   target, from Stanislav.

8) Fix rejection of invalid ldimm64 imm field, from Daniel.

9) Fix bpf stats sysctl compile warning of unused helper function
   proc_dointvec_minmax_bpf_stats() under some configs, from Arnd.

10) Fix couple of warnings about using plain integer as NULL, from Bo.

11) Fix some BPF sample spelling mistakes, from Colin.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9d3e1368 71b91a50
...@@ -292,7 +292,7 @@ static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, ...@@ -292,7 +292,7 @@ static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
struct bpf_map *map) {} struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
static inline void bpf_cgroup_storage_free( static inline void bpf_cgroup_storage_free(
struct bpf_cgroup_storage *storage) {} struct bpf_cgroup_storage *storage) {}
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
......
...@@ -4286,10 +4286,10 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) ...@@ -4286,10 +4286,10 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
} }
/* Note: Should be called only if skb_is_gso(skb) is true */
static inline bool skb_is_gso_tcp(const struct sk_buff *skb) static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
{ {
return skb_is_gso(skb) && return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
} }
static inline void skb_gso_reset(struct sk_buff *skb) static inline void skb_gso_reset(struct sk_buff *skb)
......
...@@ -6678,17 +6678,17 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -6678,17 +6678,17 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
/* valid generic load 64-bit imm */ /* valid generic load 64-bit imm */
goto next_insn; goto next_insn;
if (insn->src_reg != BPF_PSEUDO_MAP_FD) { if (insn[0].src_reg != BPF_PSEUDO_MAP_FD ||
verbose(env, insn[1].imm != 0) {
"unrecognized bpf_ld_imm64 insn\n"); verbose(env, "unrecognized bpf_ld_imm64 insn\n");
return -EINVAL; return -EINVAL;
} }
f = fdget(insn->imm); f = fdget(insn[0].imm);
map = __bpf_map_get(f); map = __bpf_map_get(f);
if (IS_ERR(map)) { if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n", verbose(env, "fd %d is not pointing to valid bpf_map\n",
insn->imm); insn[0].imm);
return PTR_ERR(map); return PTR_ERR(map);
} }
......
...@@ -3274,7 +3274,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, ...@@ -3274,7 +3274,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
#endif /* CONFIG_PROC_SYSCTL */ #endif /* CONFIG_PROC_SYSCTL */
#ifdef CONFIG_BPF_SYSCALL #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write, static int proc_dointvec_minmax_bpf_stats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time) u32 *retval, u32 *time)
{ {
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
enum bpf_cgroup_storage_type stype; enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0; u64 time_start, time_spent = 0;
int ret = 0; int ret = 0;
......
...@@ -2804,7 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) ...@@ -2804,7 +2804,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
if (!skb_is_gso_tcp(skb)) if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
...@@ -2845,7 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) ...@@ -2845,7 +2845,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb); u32 off = skb_mac_header_len(skb);
int ret; int ret;
if (!skb_is_gso_tcp(skb)) if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
...@@ -2970,7 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) ...@@ -2970,7 +2970,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
if (!skb_is_gso_tcp(skb)) if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_cow(skb, len_diff); ret = skb_cow(skb, len_diff);
...@@ -2999,7 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) ...@@ -2999,7 +2999,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret; int ret;
if (!skb_is_gso_tcp(skb)) if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
return -ENOTSUPP; return -ENOTSUPP;
ret = skb_unclone(skb, GFP_ATOMIC); ret = skb_unclone(skb, GFP_ATOMIC);
......
...@@ -625,6 +625,8 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress) ...@@ -625,6 +625,8 @@ int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
/* push the encap headers and fix pointers */ /* push the encap headers and fix pointers */
skb_reset_inner_headers(skb); skb_reset_inner_headers(skb);
skb_reset_inner_mac_header(skb); /* mac header is not yet set */
skb_set_inner_protocol(skb, skb->protocol);
skb->encapsulation = 1; skb->encapsulation = 1;
skb_push(skb, len); skb_push(skb, len);
if (ingress) if (ingress)
......
...@@ -554,6 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) ...@@ -554,6 +554,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
struct sk_psock *psock = container_of(gc, struct sk_psock, gc); struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
/* No sk_callback_lock since already detached. */ /* No sk_callback_lock since already detached. */
strp_stop(&psock->parser.strp);
strp_done(&psock->parser.strp); strp_done(&psock->parser.strp);
cancel_work_sync(&psock->work); cancel_work_sync(&psock->work);
......
...@@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) ...@@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (sxdp->sxdp_family != AF_XDP) if (sxdp->sxdp_family != AF_XDP)
return -EINVAL; return -EINVAL;
flags = sxdp->sxdp_flags;
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
return -EINVAL;
mutex_lock(&xs->mutex); mutex_lock(&xs->mutex);
if (xs->dev) { if (xs->dev) {
err = -EBUSY; err = -EBUSY;
...@@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) ...@@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
} }
qid = sxdp->sxdp_queue_id; qid = sxdp->sxdp_queue_id;
flags = sxdp->sxdp_flags;
if (flags & XDP_SHARED_UMEM) { if (flags & XDP_SHARED_UMEM) {
struct xdp_sock *umem_xs; struct xdp_sock *umem_xs;
......
...@@ -68,9 +68,9 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) ...@@ -68,9 +68,9 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
if (!err && umem->fq) if (!err && umem->fq)
err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb); err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
if (!err && umem->cq) { if (!err && umem->cq) {
err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING, err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
nlskb); nlskb);
} }
return err; return err;
......
...@@ -174,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) ...@@ -174,8 +174,8 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
if (!xskq_is_valid_addr(q, d->addr)) if (!xskq_is_valid_addr(q, d->addr))
return false; return false;
if (((d->addr + d->len) & q->chunk_mask) != if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
(d->addr & q->chunk_mask)) { d->options) {
q->invalid_descs++; q->invalid_descs++;
return false; return false;
} }
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* Default is /hbm1 * Default is /hbm1
* -r <rate> Rate limit in Mbps * -r <rate> Rate limit in Mbps
* -s Get HBM stats (marked, dropped, etc.) * -s Get HBM stats (marked, dropped, etc.)
* -t <time> Exit after specified seconds (deault is 0) * -t <time> Exit after specified seconds (default is 0)
* -w Work conserving flag. cgroup can increase its bandwidth * -w Work conserving flag. cgroup can increase its bandwidth
* beyond the rate limit specified while there is available * beyond the rate limit specified while there is available
* bandwidth. Current implementation assumes there is only * bandwidth. Current implementation assumes there is only
...@@ -376,7 +376,7 @@ static void Usage(void) ...@@ -376,7 +376,7 @@ static void Usage(void)
" Default is /hbm1\n" " Default is /hbm1\n"
" -r <rate> Rate in Mbps\n" " -r <rate> Rate in Mbps\n"
" -s Update HBM stats\n" " -s Update HBM stats\n"
" -t <time> Exit after specified seconds (deault is 0)\n" " -t <time> Exit after specified seconds (default is 0)\n"
" -w Work conserving flag. cgroup can increase\n" " -w Work conserving flag. cgroup can increase\n"
" bandwidth beyond the rate limit specified\n" " bandwidth beyond the rate limit specified\n"
" while there is available bandwidth. Current\n" " while there is available bandwidth. Current\n"
......
...@@ -147,7 +147,8 @@ endif ...@@ -147,7 +147,8 @@ endif
TARGETS = $(CMD_TARGETS) TARGETS = $(CMD_TARGETS)
all: fixdep all_cmd all: fixdep
$(Q)$(MAKE) all_cmd
all_cmd: $(CMD_TARGETS) check all_cmd: $(CMD_TARGETS) check
......
...@@ -838,6 +838,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) ...@@ -838,6 +838,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
if (IS_ERR(obj->btf) || btf__load(obj->btf)) { if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_ELF_SEC, PTR_ERR(obj->btf)); BTF_ELF_SEC, PTR_ERR(obj->btf));
if (!IS_ERR(obj->btf))
btf__free(obj->btf);
obj->btf = NULL; obj->btf = NULL;
} }
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
......
...@@ -153,6 +153,9 @@ endif ...@@ -153,6 +153,9 @@ endif
endif endif
endif endif
TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
ifneq ($(SUBREG_CODEGEN),) ifneq ($(SUBREG_CODEGEN),)
ALU32_BUILD_DIR = $(OUTPUT)/alu32 ALU32_BUILD_DIR = $(OUTPUT)/alu32
TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32 TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
...@@ -162,13 +165,15 @@ $(ALU32_BUILD_DIR): ...@@ -162,13 +165,15 @@ $(ALU32_BUILD_DIR):
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read $(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
cp $< $@ cp $< $@
$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
$(ALU32_BUILD_DIR) \
$(ALU32_BUILD_DIR)/urandom_read $(ALU32_BUILD_DIR)/urandom_read
$(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \ $(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
trace_helpers.c prog_tests/*.c $(OUTPUT)/libbpf.a $(LDLIBS) -o $(ALU32_BUILD_DIR)/test_progs_32 \
test_progs.c trace_helpers.c prog_tests/*.c \
$(OUTPUT)/libbpf.a $(LDLIBS)
$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H) $(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
$(ALU32_BUILD_DIR)/test_progs_32: CFLAGS += -I$(OUTPUT)
$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c $(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \ $(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
...@@ -202,12 +207,16 @@ endif ...@@ -202,12 +207,16 @@ endif
PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
$(OUTPUT)/test_progs: $(PROG_TESTS_H) $(OUTPUT)/test_progs: $(PROG_TESTS_H)
$(OUTPUT)/test_progs: CFLAGS += -I$(OUTPUT) $(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
$(OUTPUT)/test_progs: prog_tests/*.c $(OUTPUT)/test_progs: prog_tests/*.c
PROG_TESTS_DIR = $(OUTPUT)/prog_tests
$(PROG_TESTS_DIR):
mkdir -p $@
PROG_TESTS_FILES := $(wildcard prog_tests/*.c) PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
$(PROG_TESTS_H): $(PROG_TESTS_FILES) $(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
$(shell ( cd prog_tests/ $(shell ( cd prog_tests/; \
echo '/* Generated header, do not edit */'; \ echo '/* Generated header, do not edit */'; \
echo '#ifdef DECLARE'; \ echo '#ifdef DECLARE'; \
ls *.c 2> /dev/null | \ ls *.c 2> /dev/null | \
...@@ -221,11 +230,15 @@ $(PROG_TESTS_H): $(PROG_TESTS_FILES) ...@@ -221,11 +230,15 @@ $(PROG_TESTS_H): $(PROG_TESTS_FILES)
VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
$(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H) $(OUTPUT)/test_verifier: $(VERIFIER_TESTS_H)
$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT) $(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
$(VERIFIER_TESTS_DIR):
mkdir -p $@
VERIFIER_TEST_FILES := $(wildcard verifier/*.c) VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES) $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
$(shell ( cd verifier/ $(shell ( cd verifier/; \
echo '/* Generated header, do not edit */'; \ echo '/* Generated header, do not edit */'; \
echo '#ifdef FILL_ARRAY'; \ echo '#ifdef FILL_ARRAY'; \
ls *.c 2> /dev/null | \ ls *.c 2> /dev/null | \
......
...@@ -12,7 +12,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) ...@@ -12,7 +12,7 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type)
struct itimerval timeo = { struct itimerval timeo = {
.it_value.tv_usec = 100000, /* 100ms */ .it_value.tv_usec = 100000, /* 100ms */
}; };
__u32 duration, retval; __u32 duration = 0, retval;
int prog_fd; int prog_fd;
int err; int err;
int i; int i;
......
...@@ -78,6 +78,8 @@ TEST_STATUS=0 ...@@ -78,6 +78,8 @@ TEST_STATUS=0
TESTS_SUCCEEDED=0 TESTS_SUCCEEDED=0
TESTS_FAILED=0 TESTS_FAILED=0
TMPFILE=""
process_test_results() process_test_results()
{ {
if [[ "${TEST_STATUS}" -eq 0 ]] ; then if [[ "${TEST_STATUS}" -eq 0 ]] ; then
...@@ -147,7 +149,6 @@ setup() ...@@ -147,7 +149,6 @@ setup()
ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7 ip -netns ${NS2} -6 addr add ${IPv6_7}/128 nodad dev veth7
ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8 ip -netns ${NS3} -6 addr add ${IPv6_8}/128 nodad dev veth8
ip -netns ${NS1} link set dev veth1 up ip -netns ${NS1} link set dev veth1 up
ip -netns ${NS2} link set dev veth2 up ip -netns ${NS2} link set dev veth2 up
ip -netns ${NS2} link set dev veth3 up ip -netns ${NS2} link set dev veth3 up
...@@ -205,7 +206,7 @@ setup() ...@@ -205,7 +206,7 @@ setup()
# configure IPv4 GRE device in NS3, and a route to it via the "bottom" route # configure IPv4 GRE device in NS3, and a route to it via the "bottom" route
ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255 ip -netns ${NS3} tunnel add gre_dev mode gre remote ${IPv4_1} local ${IPv4_GRE} ttl 255
ip -netns ${NS3} link set gre_dev up ip -netns ${NS3} link set gre_dev up
ip -netns ${NS3} addr add ${IPv4_GRE} nodad dev gre_dev ip -netns ${NS3} addr add ${IPv4_GRE} dev gre_dev
ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6} ip -netns ${NS1} route add ${IPv4_GRE}/32 dev veth5 via ${IPv4_6}
ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8} ip -netns ${NS2} route add ${IPv4_GRE}/32 dev veth7 via ${IPv4_8}
...@@ -222,12 +223,18 @@ setup() ...@@ -222,12 +223,18 @@ setup()
ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0 ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
sleep 1 # reduce flakiness sleep 1 # reduce flakiness
set +e set +e
} }
cleanup() cleanup()
{ {
if [ -f ${TMPFILE} ] ; then
rm ${TMPFILE}
fi
ip netns del ${NS1} 2> /dev/null ip netns del ${NS1} 2> /dev/null
ip netns del ${NS2} 2> /dev/null ip netns del ${NS2} 2> /dev/null
ip netns del ${NS3} 2> /dev/null ip netns del ${NS3} 2> /dev/null
...@@ -278,6 +285,46 @@ test_ping() ...@@ -278,6 +285,46 @@ test_ping()
fi fi
} }
test_gso()
{
local readonly PROTO=$1
local readonly PKT_SZ=5000
local IP_DST=""
: > ${TMPFILE} # trim the capture file
# check that nc is present
command -v nc >/dev/null 2>&1 || \
{ echo >&2 "nc is not available: skipping TSO tests"; return; }
# listen on IPv*_DST, capture TCP into $TMPFILE
if [ "${PROTO}" == "IPv4" ] ; then
IP_DST=${IPv4_DST}
ip netns exec ${NS3} bash -c \
"nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
elif [ "${PROTO}" == "IPv6" ] ; then
IP_DST=${IPv6_DST}
ip netns exec ${NS3} bash -c \
"nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
RET=$?
else
echo " test_gso: unknown PROTO: ${PROTO}"
TEST_STATUS=1
fi
sleep 1 # let nc start listening
# send a packet larger than MTU
ip netns exec ${NS1} bash -c \
"dd if=/dev/zero bs=$PKT_SZ count=1 > /dev/tcp/${IP_DST}/9000 2>/dev/null"
sleep 2 # let the packet get delivered
# verify we received all expected bytes
SZ=$(stat -c %s ${TMPFILE})
if [ "$SZ" != "$PKT_SZ" ] ; then
echo " test_gso failed: ${PROTO}"
TEST_STATUS=1
fi
}
test_egress() test_egress()
{ {
local readonly ENCAP=$1 local readonly ENCAP=$1
...@@ -307,6 +354,8 @@ test_egress() ...@@ -307,6 +354,8 @@ test_egress()
fi fi
test_ping IPv4 0 test_ping IPv4 0
test_ping IPv6 0 test_ping IPv6 0
test_gso IPv4
test_gso IPv6
# a negative test: remove routes to GRE devices: ping fails # a negative test: remove routes to GRE devices: ping fails
remove_routes_to_gredev remove_routes_to_gredev
...@@ -350,6 +399,7 @@ test_ingress() ...@@ -350,6 +399,7 @@ test_ingress()
ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2 ip -netns ${NS2} -6 route add ${IPv6_DST} encap bpf in obj test_lwt_ip_encap.o sec encap_gre6 dev veth2
else else
echo "FAIL: unknown encap ${ENCAP}" echo "FAIL: unknown encap ${ENCAP}"
TEST_STATUS=1
fi fi
test_ping IPv4 0 test_ping IPv4 0
test_ping IPv6 0 test_ping IPv6 0
......
...@@ -122,7 +122,7 @@ ...@@ -122,7 +122,7 @@
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
BPF_RAW_INSN(0, 0, 0, 0, 1), BPF_RAW_INSN(0, 0, 0, 0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "not pointing to valid bpf_map", .errstr = "not pointing to valid bpf_map",
...@@ -139,3 +139,16 @@ ...@@ -139,3 +139,16 @@
.errstr = "invalid bpf_ld_imm64 insn", .errstr = "invalid bpf_ld_imm64 insn",
.result = REJECT, .result = REJECT,
}, },
{
"test14 ld_imm64: reject 2nd imm != 0",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_1,
BPF_PSEUDO_MAP_FD, 0, 0),
BPF_RAW_INSN(0, 0, 0, 0, 0xfefefe),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 1 },
.errstr = "unrecognized bpf_ld_imm64 insn",
.result = REJECT,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment