Commit 0954df70 authored by Stefano Brivio's avatar Stefano Brivio Committed by Pablo Neira Ayuso

selftests: nft_concat_range: Add test for reported add/flush/add issue

Add a specific test for the crash reported by Phil Sutter and addressed
in the previous patch. The test cases that, in my intention, should
have covered these cases, that is, the ones from the 'concurrency'
section, don't run these sequences tightly enough and spectacularly
failed to catch this.

While at it, define a convenient way to add these kind of tests, by
adding a "reported issues" test section.

It's more convenient, for this particular test, to execute the set
setup in its own function. However, future test cases like this one
might need to call setup functions, and will typically need no tools
other than nft, so allow for this in check_tools().

The original form of the reproducer used here was provided by Phil.
Reported-by: default avatarPhil Sutter <phil@nwl.cc>
Signed-off-by: default avatarStefano Brivio <sbrivio@redhat.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 212d58c1
...@@ -13,11 +13,12 @@ ...@@ -13,11 +13,12 @@
KSELFTEST_SKIP=4 KSELFTEST_SKIP=4
# Available test groups: # Available test groups:
# - reported_issues: check for issues that were reported in the past
# - correctness: check that packets match given entries, and only those # - correctness: check that packets match given entries, and only those
# - concurrency: attempt races between insertion, deletion and lookup # - concurrency: attempt races between insertion, deletion and lookup
# - timeout: check that packets match entries until they expire # - timeout: check that packets match entries until they expire
# - performance: estimate matching rate, compare with rbtree and hash baselines # - performance: estimate matching rate, compare with rbtree and hash baselines
TESTS="correctness concurrency timeout" TESTS="reported_issues correctness concurrency timeout"
[ "${quicktest}" != "1" ] && TESTS="${TESTS} performance" [ "${quicktest}" != "1" ] && TESTS="${TESTS} performance"
# Set types, defined by TYPE_ variables below # Set types, defined by TYPE_ variables below
...@@ -25,6 +26,9 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto ...@@ -25,6 +26,9 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
net_port_mac_proto_net" net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
BUGS="flush_remove_add"
# List of possible paths to pktgen script from kernel tree for performance tests # List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS=" PKTGEN_SCRIPT_PATHS="
../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
...@@ -327,6 +331,12 @@ flood_spec ip daddr . tcp dport . meta l4proto . ip saddr ...@@ -327,6 +331,12 @@ flood_spec ip daddr . tcp dport . meta l4proto . ip saddr
perf_duration 0 perf_duration 0
" "
# Definition of tests for bugs reported in the past:
# display display text for test report
TYPE_flush_remove_add="
display Add two elements, flush, re-add
"
# Set template for all tests, types and rules are filled in depending on test # Set template for all tests, types and rules are filled in depending on test
set_template=' set_template='
flush ruleset flush ruleset
...@@ -440,6 +450,8 @@ setup_set() { ...@@ -440,6 +450,8 @@ setup_set() {
# Check that at least one of the needed tools is available # Check that at least one of the needed tools is available
check_tools() { check_tools() {
[ -z "${tools}" ] && return 0
__tools= __tools=
for tool in ${tools}; do for tool in ${tools}; do
if [ "${tool}" = "nc" ] && [ "${proto}" = "udp6" ] && \ if [ "${tool}" = "nc" ] && [ "${proto}" = "udp6" ] && \
...@@ -1430,6 +1442,23 @@ test_performance() { ...@@ -1430,6 +1442,23 @@ test_performance() {
kill "${perf_pid}" kill "${perf_pid}"
} }
test_bug_flush_remove_add() {
set_cmd='{ set s { type ipv4_addr . inet_service; flags interval; }; }'
elem1='{ 10.0.0.1 . 22-25, 10.0.0.1 . 10-20 }'
elem2='{ 10.0.0.1 . 10-20, 10.0.0.1 . 22-25 }'
for i in `seq 1 100`; do
nft add table t ${set_cmd} || return ${KSELFTEST_SKIP}
nft add element t s ${elem1} 2>/dev/null || return 1
nft flush set t s 2>/dev/null || return 1
nft add element t s ${elem2} 2>/dev/null || return 1
done
nft flush ruleset
}
test_reported_issues() {
eval test_bug_"${subtest}"
}
# Run everything in a separate network namespace # Run everything in a separate network namespace
[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; } [ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
tmp="$(mktemp)" tmp="$(mktemp)"
...@@ -1438,9 +1467,15 @@ trap cleanup EXIT ...@@ -1438,9 +1467,15 @@ trap cleanup EXIT
# Entry point for test runs # Entry point for test runs
passed=0 passed=0
for name in ${TESTS}; do for name in ${TESTS}; do
printf "TEST: %s\n" "${name}" printf "TEST: %s\n" "$(echo ${name} | tr '_' ' ')"
for type in ${TYPES}; do if [ "${name}" = "reported_issues" ]; then
eval desc=\$TYPE_"${type}" SUBTESTS="${BUGS}"
else
SUBTESTS="${TYPES}"
fi
for subtest in ${SUBTESTS}; do
eval desc=\$TYPE_"${subtest}"
IFS=' IFS='
' '
for __line in ${desc}; do for __line in ${desc}; do
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment