Commit f54a2a13 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2023-08-16

We've added 17 non-merge commits during the last 6 day(s) which contain
a total of 20 files changed, 1179 insertions(+), 37 deletions(-).

The main changes are:

1) Add a BPF hook in sys_socket() to change the protocol ID
   from IPPROTO_TCP to IPPROTO_MPTCP to cover migration for legacy
   applications, from Geliang Tang.

2) Follow-up/fallout fix from the SO_REUSEPORT + bpf_sk_assign work
   to fix a splat on non-fullsock sks in inet[6]_steal_sock,
   from Lorenz Bauer.

3) Improvements to struct_ops links to avoid forcing presence of
   update/validate callbacks. Also add bpf_struct_ops fields documentation,
   from David Vernet.

4) Ensure libbpf sets close-on-exec flag on gzopen, from Marco Vedovati.

5) Several new tcx selftest additions and bpftool link show support for
   tcx and xdp links, from Daniel Borkmann.

6) Fix a smatch warning on uninitialized symbol in
   bpf_perf_link_fill_kprobe, from Yafang Shao.

7) BPF selftest fixes e.g. misplaced break in kfunc_call test,
   from Yipeng Zou.

8) Small cleanup to remove unused declaration bpf_link_new_file,
   from Yue Haibing.

9) Small typo fix to bpftool's perf help message, from Daniel T. Lee.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next:
  selftests/bpf: Add mptcpify test
  selftests/bpf: Fix error checks of mptcp open_and_load
  selftests/bpf: Add two mptcp netns helpers
  bpf: Add update_socket_protocol hook
  bpftool: Implement link show support for xdp
  bpftool: Implement link show support for tcx
  selftests/bpf: Add selftest for fill_link_info
  bpf: Fix uninitialized symbol in bpf_perf_link_fill_kprobe()
  net: Fix slab-out-of-bounds in inet[6]_steal_sock
  bpf: Document struct bpf_struct_ops fields
  bpf: Support default .validate() and .update() behavior for struct_ops links
  selftests/bpf: Add various more tcx test cases
  selftests/bpf: Clean up fmod_ret in bench_rename test script
  selftests/bpf: Fix repeat option when kfunc_call verification fails
  libbpf: Set close-on-exec flag on gzopen
  bpftool: fix perf help message
  bpf: Remove unused declaration bpf_link_new_file()
====================

Link: https://lore.kernel.org/r/20230816212840.1539-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 42b118c9 de405373
......@@ -1550,6 +1550,53 @@ struct bpf_struct_ops_value;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
/**
* struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
* define a BPF_MAP_TYPE_STRUCT_OPS map type composed
* of BPF_PROG_TYPE_STRUCT_OPS progs.
* @verifier_ops: A structure of callbacks that are invoked by the verifier
* when determining whether the struct_ops progs in the
* struct_ops map are valid.
* @init: A callback that is invoked a single time, and before any other
* callback, to initialize the structure. A nonzero return value means
* the subsystem could not be initialized.
* @check_member: When defined, a callback invoked by the verifier to allow
* the subsystem to determine if an entry in the struct_ops map
* is valid. A nonzero return value means that the map is
* invalid and should be rejected by the verifier.
* @init_member: A callback that is invoked for each member of the struct_ops
* map to allow the subsystem to initialize the member. A nonzero
* value means the member could not be initialized. This callback
* is exclusive with the @type, @type_id, @value_type, and
* @value_id fields.
* @reg: A callback that is invoked when the struct_ops map has been
* initialized and is being attached to. Zero means the struct_ops map
* has been successfully registered and is live. A nonzero return value
* means the struct_ops map could not be registered.
* @unreg: A callback that is invoked when the struct_ops map should be
* unregistered.
* @update: A callback that is invoked when the live struct_ops map is being
* updated to contain new values. This callback is only invoked when
* the struct_ops map is loaded with BPF_F_LINK. If not defined, the
* it is assumed that the struct_ops map cannot be updated.
* @validate: A callback that is invoked after all of the members have been
* initialized. This callback should perform static checks on the
* map, meaning that it should either fail or succeed
* deterministically. A struct_ops map that has been validated may
* not necessarily succeed in being registered if the call to @reg
* fails. For example, a valid struct_ops map may be loaded, but
* then fail to be registered due to there being another active
* struct_ops map on the system in the subsystem already. For this
* reason, if this callback is not defined, the check is skipped as
* the struct_ops map will have final verification performed in
* @reg.
* @type: BTF type.
* @value_type: Value type.
* @name: The name of the struct bpf_struct_ops object.
* @func_models: Func models
* @type_id: BTF type id.
* @value_id: BTF value id.
*/
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
......@@ -2120,7 +2167,6 @@ void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
......
......@@ -116,7 +116,7 @@ struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff,
if (!sk)
return NULL;
if (!prefetched)
if (!prefetched || !sk_fullsock(sk))
return sk;
if (sk->sk_protocol == IPPROTO_TCP) {
......
......@@ -462,7 +462,7 @@ struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
if (!sk)
return NULL;
if (!prefetched)
if (!prefetched || !sk_fullsock(sk))
return sk;
if (sk->sk_protocol == IPPROTO_TCP) {
......
......@@ -509,9 +509,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
}
if (st_map->map.map_flags & BPF_F_LINK) {
err = st_ops->validate(kdata);
if (err)
goto reset_unlock;
err = 0;
if (st_ops->validate) {
err = st_ops->validate(kdata);
if (err)
goto reset_unlock;
}
set_memory_rox((long)st_map->image, 1);
/* Let bpf_link handle registration & unregistration.
*
......@@ -663,9 +666,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
if (attr->value_size != vt->size)
return ERR_PTR(-EINVAL);
if (attr->map_flags & BPF_F_LINK && (!st_ops->validate || !st_ops->update))
return ERR_PTR(-EOPNOTSUPP);
t = st_ops->type;
st_map_size = sizeof(*st_map) +
......@@ -823,6 +823,9 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map
if (!bpf_struct_ops_valid_to_reg(new_map))
return -EINVAL;
if (!st_map->st_ops->update)
return -EOPNOTSUPP;
mutex_lock(&update_mutex);
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
......
......@@ -3378,14 +3378,13 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
if (!ulen ^ !uname)
return -EINVAL;
if (!uname)
return 0;
err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
probe_offset, probe_addr);
if (err)
return err;
if (!uname)
return 0;
if (buf) {
len = strlen(buf);
err = bpf_copy_to_user(uname, buf, ulen, len);
......
......@@ -19,3 +19,18 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
return NULL;
}
BTF_SET8_START(bpf_mptcp_fmodret_ids)
BTF_ID_FLAGS(func, update_socket_protocol)
BTF_SET8_END(bpf_mptcp_fmodret_ids)
static const struct btf_kfunc_id_set bpf_mptcp_fmodret_set = {
.owner = THIS_MODULE,
.set = &bpf_mptcp_fmodret_ids,
};
static int __init bpf_mptcp_kfunc_init(void)
{
return register_btf_fmodret_id_set(&bpf_mptcp_fmodret_set);
}
late_initcall(bpf_mptcp_kfunc_init);
......@@ -1657,12 +1657,36 @@ struct file *__sys_socket_file(int family, int type, int protocol)
return sock_alloc_file(sock, flags, NULL);
}
/* A hook for bpf progs to attach to and update socket protocol.
*
* A static noinline declaration here could cause the compiler to
* optimize away the function. A global noinline declaration will
* keep the definition, but may optimize away the callsite.
* Therefore, __weak is needed to ensure that the call is still
* emitted, by telling the compiler that we don't know what the
* function might eventually be.
*
* __diag_* below are needed to dismiss the missing prototype warning.
*/
__diag_push();
__diag_ignore_all("-Wmissing-prototypes",
"A fmod_ret entry point for BPF programs");
__weak noinline int update_socket_protocol(int family, int type, int protocol)
{
return protocol;
}
__diag_pop();
int __sys_socket(int family, int type, int protocol)
{
struct socket *sock;
int flags;
sock = __sys_socket_create(family, type, protocol);
sock = __sys_socket_create(family, type,
update_socket_protocol(family, type, protocol));
if (IS_ERR(sock))
return PTR_ERR(sock);
......
......@@ -150,6 +150,18 @@ static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
jsonw_uint_field(wtr, "attach_type", attach_type);
}
static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
{
char devname[IF_NAMESIZE] = "(unknown)";
if (ifindex)
if_indextoname(ifindex, devname);
else
snprintf(devname, sizeof(devname), "(detached)");
jsonw_string_field(wtr, "devname", devname);
jsonw_uint_field(wtr, "ifindex", ifindex);
}
static bool is_iter_map_target(const char *target_name)
{
return strcmp(target_name, "bpf_map_elem") == 0 ||
......@@ -433,6 +445,13 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_json(info, json_wtr);
break;
case BPF_LINK_TYPE_TCX:
show_link_ifindex_json(info->tcx.ifindex, json_wtr);
show_link_attach_type_json(info->tcx.attach_type, json_wtr);
break;
case BPF_LINK_TYPE_XDP:
show_link_ifindex_json(info->xdp.ifindex, json_wtr);
break;
case BPF_LINK_TYPE_STRUCT_OPS:
jsonw_uint_field(json_wtr, "map_id",
info->struct_ops.map_id);
......@@ -509,6 +528,22 @@ static void show_link_attach_type_plain(__u32 attach_type)
printf("attach_type %u ", attach_type);
}
static void show_link_ifindex_plain(__u32 ifindex)
{
char devname[IF_NAMESIZE * 2] = "(unknown)";
char tmpname[IF_NAMESIZE];
char *ret = NULL;
if (ifindex)
ret = if_indextoname(ifindex, tmpname);
else
snprintf(devname, sizeof(devname), "(detached)");
if (ret)
snprintf(devname, sizeof(devname), "%s(%d)",
tmpname, ifindex);
printf("ifindex %s ", devname);
}
static void show_iter_plain(struct bpf_link_info *info)
{
const char *target_name = u64_to_ptr(info->iter.target_name);
......@@ -745,6 +780,15 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_plain(info);
break;
case BPF_LINK_TYPE_TCX:
printf("\n\t");
show_link_ifindex_plain(info->tcx.ifindex);
show_link_attach_type_plain(info->tcx.attach_type);
break;
case BPF_LINK_TYPE_XDP:
printf("\n\t");
show_link_ifindex_plain(info->xdp.ifindex);
break;
case BPF_LINK_TYPE_KPROBE_MULTI:
show_kprobe_multi_plain(info);
break;
......
......@@ -236,7 +236,7 @@ static int do_help(int argc, char **argv)
{
fprintf(stderr,
"Usage: %1$s %2$s { show | list }\n"
" %1$s %2$s help }\n"
" %1$s %2$s help\n"
"\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
......
......@@ -1978,9 +1978,9 @@ static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
return -ENAMETOOLONG;
/* gzopen also accepts uncompressed files. */
file = gzopen(buf, "r");
file = gzopen(buf, "re");
if (!file)
file = gzopen("/proc/config.gz", "r");
file = gzopen("/proc/config.gz", "re");
if (!file) {
pr_warn("failed to open system Kconfig\n");
......
......@@ -12,3 +12,6 @@ kprobe_multi_test/skel_api # libbpf: failed to load BPF sk
module_attach # prog 'kprobe_multi': failed to auto-attach: -95
fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524
fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
......@@ -2,7 +2,7 @@
set -eufo pipefail
for i in base kprobe kretprobe rawtp fentry fexit fmodret
for i in base kprobe kretprobe rawtp fentry fexit
do
summary=$(sudo ./bench -w2 -d5 -a rename-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-)
printf "%-10s: %s\n" $i "$summary"
......
This diff is collapsed.
......@@ -173,8 +173,8 @@ static void verify_fail(struct kfunc_test_params *param)
case tc_test:
topts.data_in = &pkt_v4;
topts.data_size_in = sizeof(pkt_v4);
break;
topts.repeat = 1;
break;
}
skel = kfunc_call_fail__open_opts(&opts);
......
......@@ -2,17 +2,59 @@
/* Copyright (c) 2020, Tessares SA. */
/* Copyright (c) 2022, SUSE. */
#include <linux/const.h>
#include <netinet/in.h>
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_sock.skel.h"
#include "mptcpify.skel.h"
#define NS_TEST "mptcp_ns"
#ifndef IPPROTO_MPTCP
#define IPPROTO_MPTCP 262
#endif
#ifndef SOL_MPTCP
#define SOL_MPTCP 284
#endif
#ifndef MPTCP_INFO
#define MPTCP_INFO 1
#endif
#ifndef MPTCP_INFO_FLAG_FALLBACK
#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
#endif
#ifndef MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
#endif
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
#endif
struct __mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
__u8 mptcpi_add_addr_accepted;
__u8 mptcpi_subflows_max;
__u8 mptcpi_add_addr_signal_max;
__u8 mptcpi_add_addr_accepted_max;
__u32 mptcpi_flags;
__u32 mptcpi_token;
__u64 mptcpi_write_seq;
__u64 mptcpi_snd_una;
__u64 mptcpi_rcv_nxt;
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
__u8 mptcpi_csum_enabled;
__u32 mptcpi_retransmits;
__u64 mptcpi_bytes_retrans;
__u64 mptcpi_bytes_sent;
__u64 mptcpi_bytes_received;
__u64 mptcpi_bytes_acked;
};
struct mptcp_storage {
__u32 invoked;
__u32 is_mptcp;
......@@ -22,6 +64,24 @@ struct mptcp_storage {
char ca_name[TCP_CA_NAME_MAX];
};
static struct nstoken *create_netns(void)
{
SYS(fail, "ip netns add %s", NS_TEST);
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
return open_netns(NS_TEST);
fail:
return NULL;
}
static void cleanup_netns(struct nstoken *nstoken)
{
if (nstoken)
close_netns(nstoken);
SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST);
}
static int verify_tsk(int map_fd, int client_fd)
{
int err, cfd = client_fd;
......@@ -100,24 +160,14 @@ static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)
sock_skel = mptcp_sock__open_and_load();
if (!ASSERT_OK_PTR(sock_skel, "skel_open_load"))
return -EIO;
return libbpf_get_error(sock_skel);
err = mptcp_sock__attach(sock_skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
prog_fd = bpf_program__fd(sock_skel->progs._sockops);
if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) {
err = -EIO;
goto out;
}
map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map);
if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) {
err = -EIO;
goto out;
}
err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
if (!ASSERT_OK(err, "bpf_prog_attach"))
goto out;
......@@ -147,11 +197,8 @@ static void test_base(void)
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
SYS(fail, "ip netns add %s", NS_TEST);
SYS(fail, "ip -net %s link set dev lo up", NS_TEST);
nstoken = open_netns(NS_TEST);
if (!ASSERT_OK_PTR(nstoken, "open_netns"))
nstoken = create_netns();
if (!ASSERT_OK_PTR(nstoken, "create_netns"))
goto fail;
/* without MPTCP */
......@@ -174,11 +221,104 @@ static void test_base(void)
close(server_fd);
fail:
if (nstoken)
close_netns(nstoken);
cleanup_netns(nstoken);
close(cgroup_fd);
}
SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null");
static void send_byte(int fd)
{
char b = 0x55;
ASSERT_EQ(write(fd, &b, sizeof(b)), 1, "send single byte");
}
static int verify_mptcpify(int server_fd, int client_fd)
{
struct __mptcp_info info;
socklen_t optlen;
int protocol;
int err = 0;
optlen = sizeof(protocol);
if (!ASSERT_OK(getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen),
"getsockopt(SOL_PROTOCOL)"))
return -1;
if (!ASSERT_EQ(protocol, IPPROTO_MPTCP, "protocol isn't MPTCP"))
err++;
optlen = sizeof(info);
if (!ASSERT_OK(getsockopt(client_fd, SOL_MPTCP, MPTCP_INFO, &info, &optlen),
"getsockopt(MPTCP_INFO)"))
return -1;
if (!ASSERT_GE(info.mptcpi_flags, 0, "unexpected mptcpi_flags"))
err++;
if (!ASSERT_FALSE(info.mptcpi_flags & MPTCP_INFO_FLAG_FALLBACK,
"MPTCP fallback"))
err++;
if (!ASSERT_TRUE(info.mptcpi_flags & MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED,
"no remote key received"))
err++;
return err;
}
static int run_mptcpify(int cgroup_fd)
{
int server_fd, client_fd, err = 0;
struct mptcpify *mptcpify_skel;
mptcpify_skel = mptcpify__open_and_load();
if (!ASSERT_OK_PTR(mptcpify_skel, "skel_open_load"))
return libbpf_get_error(mptcpify_skel);
err = mptcpify__attach(mptcpify_skel);
if (!ASSERT_OK(err, "skel_attach"))
goto out;
/* without MPTCP */
server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
if (!ASSERT_GE(server_fd, 0, "start_server")) {
err = -EIO;
goto out;
}
client_fd = connect_to_fd(server_fd, 0);
if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
err = -EIO;
goto close_server;
}
send_byte(client_fd);
err = verify_mptcpify(server_fd, client_fd);
close(client_fd);
close_server:
close(server_fd);
out:
mptcpify__destroy(mptcpify_skel);
return err;
}
static void test_mptcpify(void)
{
struct nstoken *nstoken = NULL;
int cgroup_fd;
cgroup_fd = test__join_cgroup("/mptcpify");
if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
return;
nstoken = create_netns();
if (!ASSERT_OK_PTR(nstoken, "create_netns"))
goto fail;
ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify");
fail:
cleanup_netns(nstoken);
close(cgroup_fd);
}
......@@ -186,4 +326,6 @@ void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
if (test__start_subtest("mptcpify"))
test_mptcpify();
}
......@@ -2268,3 +2268,113 @@ void serial_test_tc_opts_delete_empty(void)
test_tc_opts_delete_empty(BPF_TCX_INGRESS, true);
test_tc_opts_delete_empty(BPF_TCX_EGRESS, true);
}
static void test_tc_chain_mixed(int target)
{
LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
LIBBPF_OPTS(bpf_tc_hook, tc_hook, .ifindex = loopback);
LIBBPF_OPTS(bpf_prog_attach_opts, opta);
LIBBPF_OPTS(bpf_prog_detach_opts, optd);
__u32 fd1, fd2, fd3, id1, id2, id3;
struct test_tc_link *skel;
int err, detach_fd;
skel = test_tc_link__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
fd1 = bpf_program__fd(skel->progs.tc4);
fd2 = bpf_program__fd(skel->progs.tc5);
fd3 = bpf_program__fd(skel->progs.tc6);
id1 = id_from_prog_fd(fd1);
id2 = id_from_prog_fd(fd2);
id3 = id_from_prog_fd(fd3);
ASSERT_NEQ(id1, id2, "prog_ids_1_2");
ASSERT_NEQ(id2, id3, "prog_ids_2_3");
assert_mprog_count(target, 0);
tc_hook.attach_point = target == BPF_TCX_INGRESS ?
BPF_TC_INGRESS : BPF_TC_EGRESS;
err = bpf_tc_hook_create(&tc_hook);
err = err == -EEXIST ? 0 : err;
if (!ASSERT_OK(err, "bpf_tc_hook_create"))
goto cleanup;
tc_opts.prog_fd = fd2;
err = bpf_tc_attach(&tc_hook, &tc_opts);
if (!ASSERT_OK(err, "bpf_tc_attach"))
goto cleanup_hook;
err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_filter;
detach_fd = fd3;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
LIBBPF_OPTS_RESET(opta,
.flags = BPF_F_REPLACE,
.replace_prog_fd = fd3,
);
err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
if (!ASSERT_EQ(err, 0, "prog_attach"))
goto cleanup_opts;
detach_fd = fd1;
assert_mprog_count(target, 1);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
skel->bss->seen_tc4 = false;
skel->bss->seen_tc5 = false;
skel->bss->seen_tc6 = false;
cleanup_opts:
err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
ASSERT_OK(err, "prog_detach");
__assert_mprog_count(target, 0, true, loopback);
ASSERT_OK(system(ping_cmd), ping_cmd);
ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
cleanup_filter:
tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
err = bpf_tc_detach(&tc_hook, &tc_opts);
ASSERT_OK(err, "bpf_tc_detach");
cleanup_hook:
tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&tc_hook);
cleanup:
test_tc_link__destroy(skel);
}
void serial_test_tc_opts_chain_mixed(void)
{
test_tc_chain_mixed(BPF_TCX_INGRESS);
test_tc_chain_mixed(BPF_TCX_EGRESS);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023, SUSE. */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
char _license[] SEC("license") = "GPL";
SEC("fmod_ret/update_socket_protocol")
int BPF_PROG(mptcpify, int family, int type, int protocol)
{
if ((family == AF_INET || family == AF_INET6) &&
type == SOCK_STREAM &&
(!protocol || protocol == IPPROTO_TCP)) {
return IPPROTO_MPTCP;
}
return protocol;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Yafang Shao <laoar.shao@gmail.com> */
#include "vmlinux.h"
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
/* This function is here to have CONFIG_X86_KERNEL_IBT
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_X86_KERNEL_IBT ? 0 : 1;
}
SEC("kprobe")
int BPF_PROG(kprobe_run)
{
return 0;
}
SEC("uprobe")
int BPF_PROG(uprobe_run)
{
return 0;
}
SEC("tracepoint")
int BPF_PROG(tp_run)
{
return 0;
}
SEC("kprobe.multi")
int BPF_PROG(kmulti_run)
{
return 0;
}
char _license[] SEC("license") = "GPL";
......@@ -10,6 +10,8 @@ bool seen_tc1;
bool seen_tc2;
bool seen_tc3;
bool seen_tc4;
bool seen_tc5;
bool seen_tc6;
SEC("tc/ingress")
int tc1(struct __sk_buff *skb)
......@@ -38,3 +40,17 @@ int tc4(struct __sk_buff *skb)
seen_tc4 = true;
return TCX_NEXT;
}
SEC("tc/egress")
int tc5(struct __sk_buff *skb)
{
seen_tc5 = true;
return TCX_PASS;
}
SEC("tc/egress")
int tc6(struct __sk_buff *skb)
{
seen_tc6 = true;
return TCX_PASS;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment