Commit 04f8cb6d authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Get ingress_ifindex in BPF_SK_LOOKUP prog type'

Mark Pashmfouroush says:

====================

BPF_SK_LOOKUP users may want to have access to the ifindex of the skb
which triggered the socket lookup. This may be useful for selectively
applying programmable socket lookup logic to packets that arrive on a
specific interface, or excluding packets from an interface.

v3:
- Rename ifindex field to ingress_ifindex for consistency. (Yonghong)

v2:
- Fix inaccurate comment (Alexei)
- Add more details to commit messages (John)
====================
Revieview-by: default avatarLorenz Bauer <lmb@cloudflare.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 1a8b597d 8b4fd2bf
...@@ -1374,6 +1374,7 @@ struct bpf_sk_lookup_kern { ...@@ -1374,6 +1374,7 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *daddr; const struct in6_addr *daddr;
} v6; } v6;
struct sock *selected_sk; struct sock *selected_sk;
u32 ingress_ifindex;
bool no_reuseport; bool no_reuseport;
}; };
...@@ -1436,7 +1437,7 @@ extern struct static_key_false bpf_sk_lookup_enabled; ...@@ -1436,7 +1437,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
const __be32 saddr, const __be16 sport, const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport, const __be32 daddr, const u16 dport,
struct sock **psk) const int ifindex, struct sock **psk)
{ {
struct bpf_prog_array *run_array; struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL; struct sock *selected_sk = NULL;
...@@ -1452,6 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, ...@@ -1452,6 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
.v4.daddr = daddr, .v4.daddr = daddr,
.sport = sport, .sport = sport,
.dport = dport, .dport = dport,
.ingress_ifindex = ifindex,
}; };
u32 act; u32 act;
...@@ -1474,7 +1476,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, ...@@ -1474,7 +1476,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
const __be16 sport, const __be16 sport,
const struct in6_addr *daddr, const struct in6_addr *daddr,
const u16 dport, const u16 dport,
struct sock **psk) const int ifindex, struct sock **psk)
{ {
struct bpf_prog_array *run_array; struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL; struct sock *selected_sk = NULL;
...@@ -1490,6 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, ...@@ -1490,6 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
.v6.daddr = daddr, .v6.daddr = daddr,
.sport = sport, .sport = sport,
.dport = dport, .dport = dport,
.ingress_ifindex = ifindex,
}; };
u32 act; u32 act;
......
...@@ -6316,6 +6316,7 @@ struct bpf_sk_lookup { ...@@ -6316,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */ __u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */ __u32 local_port; /* Host byte order */
__u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
}; };
/* /*
......
...@@ -10491,6 +10491,7 @@ static bool sk_lookup_is_valid_access(int off, int size, ...@@ -10491,6 +10491,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
case bpf_ctx_range(struct bpf_sk_lookup, remote_port): case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
case bpf_ctx_range(struct bpf_sk_lookup, local_port): case bpf_ctx_range(struct bpf_sk_lookup, local_port):
case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
bpf_ctx_record_field_size(info, sizeof(__u32)); bpf_ctx_record_field_size(info, sizeof(__u32));
return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
...@@ -10580,6 +10581,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, ...@@ -10580,6 +10581,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct bpf_sk_lookup_kern, bpf_target_off(struct bpf_sk_lookup_kern,
dport, 2, target_size)); dport, 2, target_size));
break; break;
case offsetof(struct bpf_sk_lookup, ingress_ifindex):
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
bpf_target_off(struct bpf_sk_lookup_kern,
ingress_ifindex, 4, target_size));
break;
} }
return insn - insn_buf; return insn - insn_buf;
......
...@@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, ...@@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
struct inet_hashinfo *hashinfo, struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff, struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport, __be32 saddr, __be16 sport,
__be32 daddr, u16 hnum) __be32 daddr, u16 hnum, const int dif)
{ {
struct sock *sk, *reuse_sk; struct sock *sk, *reuse_sk;
bool no_reuseport; bool no_reuseport;
...@@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net, ...@@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo) if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */ return NULL; /* only TCP is supported */
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
saddr, sport, daddr, hnum, &sk); daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk)) if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk; return sk;
...@@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net, ...@@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net,
/* Lookup redirect from BPF */ /* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet_lookup_run_bpf(net, hashinfo, skb, doff, result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
saddr, sport, daddr, hnum); saddr, sport, daddr, hnum, dif);
if (result) if (result)
goto done; goto done;
} }
......
...@@ -460,7 +460,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, ...@@ -460,7 +460,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
struct udp_table *udptable, struct udp_table *udptable,
struct sk_buff *skb, struct sk_buff *skb,
__be32 saddr, __be16 sport, __be32 saddr, __be16 sport,
__be32 daddr, u16 hnum) __be32 daddr, u16 hnum, const int dif)
{ {
struct sock *sk, *reuse_sk; struct sock *sk, *reuse_sk;
bool no_reuseport; bool no_reuseport;
...@@ -468,8 +468,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, ...@@ -468,8 +468,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
if (udptable != &udp_table) if (udptable != &udp_table)
return NULL; /* only UDP is supported */ return NULL; /* only UDP is supported */
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport,
saddr, sport, daddr, hnum, &sk); daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk)) if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk; return sk;
...@@ -505,7 +505,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, ...@@ -505,7 +505,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
/* Lookup redirect from BPF */ /* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp4_lookup_run_bpf(net, udptable, skb, sk = udp4_lookup_run_bpf(net, udptable, skb,
saddr, sport, daddr, hnum); saddr, sport, daddr, hnum, dif);
if (sk) { if (sk) {
result = sk; result = sk;
goto done; goto done;
......
...@@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, ...@@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr, const struct in6_addr *saddr,
const __be16 sport, const __be16 sport,
const struct in6_addr *daddr, const struct in6_addr *daddr,
const u16 hnum) const u16 hnum, const int dif)
{ {
struct sock *sk, *reuse_sk; struct sock *sk, *reuse_sk;
bool no_reuseport; bool no_reuseport;
...@@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net, ...@@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo) if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */ return NULL; /* only TCP is supported */
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
saddr, sport, daddr, hnum, &sk); daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk)) if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk; return sk;
...@@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net, ...@@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net,
/* Lookup redirect from BPF */ /* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
saddr, sport, daddr, hnum); saddr, sport, daddr, hnum, dif);
if (result) if (result)
goto done; goto done;
} }
......
...@@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, ...@@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr, const struct in6_addr *saddr,
__be16 sport, __be16 sport,
const struct in6_addr *daddr, const struct in6_addr *daddr,
u16 hnum) u16 hnum, const int dif)
{ {
struct sock *sk, *reuse_sk; struct sock *sk, *reuse_sk;
bool no_reuseport; bool no_reuseport;
...@@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, ...@@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
if (udptable != &udp_table) if (udptable != &udp_table)
return NULL; /* only UDP is supported */ return NULL; /* only UDP is supported */
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
saddr, sport, daddr, hnum, &sk); daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk)) if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk; return sk;
...@@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net, ...@@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
/* Lookup redirect from BPF */ /* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp6_lookup_run_bpf(net, udptable, skb, sk = udp6_lookup_run_bpf(net, udptable, skb,
saddr, sport, daddr, hnum); saddr, sport, daddr, hnum, dif);
if (sk) { if (sk) {
result = sk; result = sk;
goto done; goto done;
......
...@@ -6316,6 +6316,7 @@ struct bpf_sk_lookup { ...@@ -6316,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */ __u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */ __u32 local_port; /* Host byte order */
__u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
}; };
/* /*
......
...@@ -937,6 +937,37 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel) ...@@ -937,6 +937,37 @@ static void test_drop_on_lookup(struct test_sk_lookup *skel)
.connect_to = { EXT_IP6, EXT_PORT }, .connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, INT_PORT }, .listen_at = { EXT_IP6, INT_PORT },
}, },
/* The program will drop on success, meaning that the ifindex
* was 1.
*/
{
.desc = "TCP IPv4 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "TCP IPv6 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_STREAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, EXT_PORT },
},
{
.desc = "UDP IPv4 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP4, EXT_PORT },
.listen_at = { EXT_IP4, EXT_PORT },
},
{
.desc = "UDP IPv6 drop on valid ifindex",
.lookup_prog = skel->progs.check_ifindex,
.sotype = SOCK_DGRAM,
.connect_to = { EXT_IP6, EXT_PORT },
.listen_at = { EXT_IP6, EXT_PORT },
},
}; };
const struct test *t; const struct test *t;
......
...@@ -84,6 +84,14 @@ int lookup_drop(struct bpf_sk_lookup *ctx) ...@@ -84,6 +84,14 @@ int lookup_drop(struct bpf_sk_lookup *ctx)
return SK_DROP; return SK_DROP;
} }
SEC("sk_lookup")
int check_ifindex(struct bpf_sk_lookup *ctx)
{
if (ctx->ingress_ifindex == 1)
return SK_DROP;
return SK_PASS;
}
SEC("sk_reuseport") SEC("sk_reuseport")
int reuseport_pass(struct sk_reuseport_md *ctx) int reuseport_pass(struct sk_reuseport_md *ctx)
{ {
......
...@@ -229,6 +229,24 @@ ...@@ -229,6 +229,24 @@
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, local_port)), offsetof(struct bpf_sk_lookup, local_port)),
/* 1-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 3),
/* 2-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex) + 2),
/* 4-byte read from ingress_ifindex field */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
/* 8-byte read from sk field */ /* 8-byte read from sk field */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, sk)), offsetof(struct bpf_sk_lookup, sk)),
...@@ -351,6 +369,20 @@ ...@@ -351,6 +369,20 @@
.expected_attach_type = BPF_SK_LOOKUP, .expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}, },
{
"invalid 8-byte read from bpf_sk_lookup ingress_ifindex field",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct bpf_sk_lookup, ingress_ifindex)),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_LOOKUP,
.expected_attach_type = BPF_SK_LOOKUP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */ /* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */
{ {
"invalid 4-byte read from bpf_sk_lookup sk field", "invalid 4-byte read from bpf_sk_lookup sk field",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment