Commit faef26fa authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf, selftests: Use bpf_tail_call_static where appropriate

For those locations where we use an immediate tail call map index use the
newly added bpf_tail_call_static() helper.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/3cfb2b799a62d22c6e7ae5897c23940bdcc24cbc.1601477936.git.daniel@iogearbox.net
parent 0e9f6841
...@@ -31,28 +31,30 @@ struct { ...@@ -31,28 +31,30 @@ struct {
#define PARSE_IP 3 #define PARSE_IP 3
#define PARSE_IPV6 4 #define PARSE_IPV6 4
/* protocol dispatch routine. /* Protocol dispatch routine. It tail-calls next BPF program depending
* It tail-calls next BPF program depending on eth proto * on eth proto. Note, we could have used ...
* Note, we could have used: *
* bpf_tail_call(skb, &jmp_table, proto); * bpf_tail_call(skb, &jmp_table, proto);
* but it would need large prog_array *
* ... but it would need large prog_array and cannot be optimised given
* the map key is not static.
*/ */
static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto) static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
{ {
switch (proto) { switch (proto) {
case ETH_P_8021Q: case ETH_P_8021Q:
case ETH_P_8021AD: case ETH_P_8021AD:
bpf_tail_call(skb, &jmp_table, PARSE_VLAN); bpf_tail_call_static(skb, &jmp_table, PARSE_VLAN);
break; break;
case ETH_P_MPLS_UC: case ETH_P_MPLS_UC:
case ETH_P_MPLS_MC: case ETH_P_MPLS_MC:
bpf_tail_call(skb, &jmp_table, PARSE_MPLS); bpf_tail_call_static(skb, &jmp_table, PARSE_MPLS);
break; break;
case ETH_P_IP: case ETH_P_IP:
bpf_tail_call(skb, &jmp_table, PARSE_IP); bpf_tail_call_static(skb, &jmp_table, PARSE_IP);
break; break;
case ETH_P_IPV6: case ETH_P_IPV6:
bpf_tail_call(skb, &jmp_table, PARSE_IPV6); bpf_tail_call_static(skb, &jmp_table, PARSE_IPV6);
break; break;
} }
} }
......
...@@ -118,18 +118,18 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto) ...@@ -118,18 +118,18 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
switch (proto) { switch (proto) {
case bpf_htons(ETH_P_IP): case bpf_htons(ETH_P_IP):
bpf_tail_call(skb, &jmp_table, IP); bpf_tail_call_static(skb, &jmp_table, IP);
break; break;
case bpf_htons(ETH_P_IPV6): case bpf_htons(ETH_P_IPV6):
bpf_tail_call(skb, &jmp_table, IPV6); bpf_tail_call_static(skb, &jmp_table, IPV6);
break; break;
case bpf_htons(ETH_P_MPLS_MC): case bpf_htons(ETH_P_MPLS_MC):
case bpf_htons(ETH_P_MPLS_UC): case bpf_htons(ETH_P_MPLS_UC):
bpf_tail_call(skb, &jmp_table, MPLS); bpf_tail_call_static(skb, &jmp_table, MPLS);
break; break;
case bpf_htons(ETH_P_8021Q): case bpf_htons(ETH_P_8021Q):
case bpf_htons(ETH_P_8021AD): case bpf_htons(ETH_P_8021AD):
bpf_tail_call(skb, &jmp_table, VLAN); bpf_tail_call_static(skb, &jmp_table, VLAN);
break; break;
default: default:
/* Protocol not supported */ /* Protocol not supported */
...@@ -246,10 +246,10 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) ...@@ -246,10 +246,10 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
switch (nexthdr) { switch (nexthdr) {
case IPPROTO_HOPOPTS: case IPPROTO_HOPOPTS:
case IPPROTO_DSTOPTS: case IPPROTO_DSTOPTS:
bpf_tail_call(skb, &jmp_table, IPV6OP); bpf_tail_call_static(skb, &jmp_table, IPV6OP);
break; break;
case IPPROTO_FRAGMENT: case IPPROTO_FRAGMENT:
bpf_tail_call(skb, &jmp_table, IPV6FR); bpf_tail_call_static(skb, &jmp_table, IPV6FR);
break; break;
default: default:
return parse_ip_proto(skb, nexthdr); return parse_ip_proto(skb, nexthdr);
......
...@@ -26,20 +26,20 @@ int entry(struct __sk_buff *skb) ...@@ -26,20 +26,20 @@ int entry(struct __sk_buff *skb)
/* Multiple locations to make sure we patch /* Multiple locations to make sure we patch
* all of them. * all of them.
*/ */
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
return 3; return 3;
} }
......
...@@ -13,14 +13,14 @@ struct { ...@@ -13,14 +13,14 @@ struct {
SEC("classifier/0") SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb) int bpf_func_0(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
return 0; return 0;
} }
SEC("classifier/1") SEC("classifier/1")
int bpf_func_1(struct __sk_buff *skb) int bpf_func_1(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
return 1; return 1;
} }
...@@ -33,25 +33,25 @@ int bpf_func_2(struct __sk_buff *skb) ...@@ -33,25 +33,25 @@ int bpf_func_2(struct __sk_buff *skb)
SEC("classifier/3") SEC("classifier/3")
int bpf_func_3(struct __sk_buff *skb) int bpf_func_3(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 4); bpf_tail_call_static(skb, &jmp_table, 4);
return 3; return 3;
} }
SEC("classifier/4") SEC("classifier/4")
int bpf_func_4(struct __sk_buff *skb) int bpf_func_4(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 3); bpf_tail_call_static(skb, &jmp_table, 3);
return 4; return 4;
} }
SEC("classifier") SEC("classifier")
int entry(struct __sk_buff *skb) int entry(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
/* Check multi-prog update. */ /* Check multi-prog update. */
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
/* Check tail call limit. */ /* Check tail call limit. */
bpf_tail_call(skb, &jmp_table, 3); bpf_tail_call_static(skb, &jmp_table, 3);
return 3; return 3;
} }
......
...@@ -16,14 +16,14 @@ SEC("classifier/0") ...@@ -16,14 +16,14 @@ SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb) int bpf_func_0(struct __sk_buff *skb)
{ {
count++; count++;
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return 1; return 1;
} }
SEC("classifier") SEC("classifier")
int entry(struct __sk_buff *skb) int entry(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return 0; return 0;
} }
......
...@@ -21,7 +21,7 @@ TAIL_FUNC(1) ...@@ -21,7 +21,7 @@ TAIL_FUNC(1)
static __noinline static __noinline
int subprog_tail(struct __sk_buff *skb) int subprog_tail(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return skb->len * 2; return skb->len * 2;
} }
...@@ -29,7 +29,7 @@ int subprog_tail(struct __sk_buff *skb) ...@@ -29,7 +29,7 @@ int subprog_tail(struct __sk_buff *skb)
SEC("classifier") SEC("classifier")
int entry(struct __sk_buff *skb) int entry(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
return subprog_tail(skb); return subprog_tail(skb);
} }
......
...@@ -14,9 +14,9 @@ static __noinline ...@@ -14,9 +14,9 @@ static __noinline
int subprog_tail(struct __sk_buff *skb) int subprog_tail(struct __sk_buff *skb)
{ {
if (load_byte(skb, 0)) if (load_byte(skb, 0))
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
else else
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return 1; return 1;
} }
...@@ -32,7 +32,7 @@ int bpf_func_0(struct __sk_buff *skb) ...@@ -32,7 +32,7 @@ int bpf_func_0(struct __sk_buff *skb)
SEC("classifier") SEC("classifier")
int entry(struct __sk_buff *skb) int entry(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return 0; return 0;
} }
......
...@@ -16,9 +16,9 @@ int subprog_tail2(struct __sk_buff *skb) ...@@ -16,9 +16,9 @@ int subprog_tail2(struct __sk_buff *skb)
volatile char arr[64] = {}; volatile char arr[64] = {};
if (load_word(skb, 0) || load_half(skb, 0)) if (load_word(skb, 0) || load_half(skb, 0))
bpf_tail_call(skb, &jmp_table, 10); bpf_tail_call_static(skb, &jmp_table, 10);
else else
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
return skb->len; return skb->len;
} }
...@@ -28,7 +28,7 @@ int subprog_tail(struct __sk_buff *skb) ...@@ -28,7 +28,7 @@ int subprog_tail(struct __sk_buff *skb)
{ {
volatile char arr[64] = {}; volatile char arr[64] = {};
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return skb->len * 2; return skb->len * 2;
} }
......
...@@ -14,21 +14,21 @@ static volatile int count; ...@@ -14,21 +14,21 @@ static volatile int count;
__noinline __noinline
int subprog_tail_2(struct __sk_buff *skb) int subprog_tail_2(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 2); bpf_tail_call_static(skb, &jmp_table, 2);
return skb->len * 3; return skb->len * 3;
} }
__noinline __noinline
int subprog_tail_1(struct __sk_buff *skb) int subprog_tail_1(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 1); bpf_tail_call_static(skb, &jmp_table, 1);
return skb->len * 2; return skb->len * 2;
} }
__noinline __noinline
int subprog_tail(struct __sk_buff *skb) int subprog_tail(struct __sk_buff *skb)
{ {
bpf_tail_call(skb, &jmp_table, 0); bpf_tail_call_static(skb, &jmp_table, 0);
return skb->len; return skb->len;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment