Commit a166151c authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

bpf: fix bpf helpers to use skb->mac_header relative offsets

For the short-term solution, lets fix bpf helper functions to use
skb->mac_header relative offsets instead of skb->data in order to
get the same eBPF programs with cls_bpf and act_bpf work on ingress
and egress qdisc path. We need to ensure that mac_header is set
before calling into programs. This is effectively the first option
from below referenced discussion.

More long term solution for LD_ABS|LD_IND instructions will be more
intrusive but also more beneficial than this, and implemented later
as it's too risky at this point in time.

I.e., we plan to look into the option of moving skb_pull() out of
eth_type_trans() and into netif_receive_skb() as has been suggested
as second option. Meanwhile, this solution ensures ingress can be
used with eBPF, too, and that we won't run into ABI troubles later.
For dealing with negative offsets inside eBPF helper functions,
we've implemented bpf_skb_clone_unwritable() to test for unwriteable
headers.

Reference: http://thread.gmane.org/gmane.linux.network/359129/focus=359694
Fixes: 608cd71a ("tc: bpf: generalize pedit action")
Fixes: 91bc4822 ("tc: bpf: add checksum helpers")
Signed-off-by: default avatarAlexei Starovoitov <ast@plumgrid.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 51b5df88
...@@ -177,7 +177,7 @@ enum bpf_func_id { ...@@ -177,7 +177,7 @@ enum bpf_func_id {
/** /**
* skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
* @skb: pointer to skb * @skb: pointer to skb
* @offset: offset within packet from skb->data * @offset: offset within packet from skb->mac_header
* @from: pointer where to copy bytes from * @from: pointer where to copy bytes from
* @len: number of bytes to store into packet * @len: number of bytes to store into packet
* @flags: bit 0 - if true, recompute skb->csum * @flags: bit 0 - if true, recompute skb->csum
......
...@@ -79,8 +79,11 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ ...@@ -79,8 +79,11 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_RANDOM 56 #define SKF_AD_RANDOM 56
#define SKF_AD_VLAN_TPID 60 #define SKF_AD_VLAN_TPID 60
#define SKF_AD_MAX 64 #define SKF_AD_MAX 64
#define SKF_NET_OFF (-0x100000) #define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000) #define SKF_LL_OFF (-0x200000)
#define BPF_NET_OFF SKF_NET_OFF
#define BPF_LL_OFF SKF_LL_OFF
#endif /* _UAPI__LINUX_FILTER_H__ */ #endif /* _UAPI__LINUX_FILTER_H__ */
...@@ -1175,12 +1175,27 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) ...@@ -1175,12 +1175,27 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
return 0; return 0;
} }
/**
* bpf_skb_clone_not_writable - is the header of a clone not writable
* @skb: buffer to check
* @len: length up to which to write, can be negative
*
* Returns true if modifying the header part of the cloned buffer
* does require the data to be copied. I.e. this version works with
* negative lengths needed for eBPF case!
*/
static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
{
return skb_header_cloned(skb) ||
(int) skb_headroom(skb) + len > skb->hdr_len;
}
#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1)
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1; struct sk_buff *skb = (struct sk_buff *) (long) r1;
unsigned int offset = (unsigned int) r2; int offset = (int) r2;
void *from = (void *) (long) r3; void *from = (void *) (long) r3;
unsigned int len = (unsigned int) r4; unsigned int len = (unsigned int) r4;
char buf[16]; char buf[16];
...@@ -1194,10 +1209,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) ...@@ -1194,10 +1209,12 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
* *
* so check for invalid 'offset' and too large 'len' * so check for invalid 'offset' and too large 'len'
*/ */
if (unlikely(offset > 0xffff || len > sizeof(buf))) if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
return -EFAULT; return -EFAULT;
if (skb_cloned(skb) && !skb_clone_writable(skb, offset + len)) offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
bpf_skb_clone_unwritable(skb, offset + len)))
return -EFAULT; return -EFAULT;
ptr = skb_header_pointer(skb, offset, len, buf); ptr = skb_header_pointer(skb, offset, len, buf);
...@@ -1232,15 +1249,18 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = { ...@@ -1232,15 +1249,18 @@ const struct bpf_func_proto bpf_skb_store_bytes_proto = {
#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) #define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f)
#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) #define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10)
static u64 bpf_l3_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1; struct sk_buff *skb = (struct sk_buff *) (long) r1;
int offset = (int) r2;
__sum16 sum, *ptr; __sum16 sum, *ptr;
if (unlikely(offset > 0xffff)) if (unlikely((u32) offset > 0xffff))
return -EFAULT; return -EFAULT;
if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
return -EFAULT; return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
...@@ -1276,16 +1296,19 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = { ...@@ -1276,16 +1296,19 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.arg5_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING,
}; };
static u64 bpf_l4_csum_replace(u64 r1, u64 offset, u64 from, u64 to, u64 flags) static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
{ {
struct sk_buff *skb = (struct sk_buff *) (long) r1; struct sk_buff *skb = (struct sk_buff *) (long) r1;
u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
int offset = (int) r2;
__sum16 sum, *ptr; __sum16 sum, *ptr;
if (unlikely(offset > 0xffff)) if (unlikely((u32) offset > 0xffff))
return -EFAULT; return -EFAULT;
if (skb_cloned(skb) && !skb_clone_writable(skb, offset + sizeof(sum))) offset -= skb->data - skb_mac_header(skb);
if (unlikely(skb_cloned(skb) &&
bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
return -EFAULT; return -EFAULT;
ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
......
...@@ -38,6 +38,9 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, ...@@ -38,6 +38,9 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_bpf *prog = act->priv; struct tcf_bpf *prog = act->priv;
int action, filter_res; int action, filter_res;
if (unlikely(!skb_mac_header_was_set(skb)))
return TC_ACT_UNSPEC;
spin_lock(&prog->tcf_lock); spin_lock(&prog->tcf_lock);
prog->tcf_tm.lastuse = jiffies; prog->tcf_tm.lastuse = jiffies;
......
...@@ -66,6 +66,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -66,6 +66,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog; struct cls_bpf_prog *prog;
int ret = -1; int ret = -1;
if (unlikely(!skb_mac_header_was_set(skb)))
return -1;
/* Needed here for accessing maps. */ /* Needed here for accessing maps. */
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) { list_for_each_entry_rcu(prog, &head->plist, link) {
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <uapi/linux/ip.h> #include <uapi/linux/ip.h>
#include <uapi/linux/in.h> #include <uapi/linux/in.h>
#include <uapi/linux/tcp.h> #include <uapi/linux/tcp.h>
#include <uapi/linux/filter.h>
#include "bpf_helpers.h" #include "bpf_helpers.h"
/* compiler workaround */ /* compiler workaround */
...@@ -14,18 +16,12 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac) ...@@ -14,18 +16,12 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1); bpf_skb_store_bytes(skb, 0, mac, ETH_ALEN, 1);
} }
/* use 1 below for ingress qdisc and 0 for egress */
#if 0
#undef ETH_HLEN
#define ETH_HLEN 0
#endif
#define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check)) #define IP_CSUM_OFF (ETH_HLEN + offsetof(struct iphdr, check))
#define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos)) #define TOS_OFF (ETH_HLEN + offsetof(struct iphdr, tos))
static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
{ {
__u8 old_tos = load_byte(skb, TOS_OFF); __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF);
bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
...@@ -38,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) ...@@ -38,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
{ {
__u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
...@@ -48,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) ...@@ -48,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
#define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
{ {
__u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF));
bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
...@@ -57,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) ...@@ -57,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
SEC("classifier") SEC("classifier")
int bpf_prog1(struct __sk_buff *skb) int bpf_prog1(struct __sk_buff *skb)
{ {
__u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol));
long *value; long *value;
if (proto == IPPROTO_TCP) { if (proto == IPPROTO_TCP) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment