Commit 9f06f87f authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: skbuff: generalize the skb->decrypted bit

The ->decrypted bit can be reused for other crypto protocols.
Remove the direct dependency on TLS, add helpers to clean up
the ifdefs leaking out everywhere.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0d875bb4
...@@ -992,7 +992,7 @@ struct sk_buff { ...@@ -992,7 +992,7 @@ struct sk_buff {
#ifdef CONFIG_NETFILTER_SKIP_EGRESS #ifdef CONFIG_NETFILTER_SKIP_EGRESS
__u8 nf_skip_egress:1; __u8 nf_skip_egress:1;
#endif #endif
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
__u8 decrypted:1; __u8 decrypted:1;
#endif #endif
__u8 slow_gro:1; __u8 slow_gro:1;
...@@ -1615,17 +1615,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) ...@@ -1615,17 +1615,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline int skb_cmp_decrypted(const struct sk_buff *skb1, static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
const struct sk_buff *skb2) const struct sk_buff *skb2)
{ {
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
return skb2->decrypted - skb1->decrypted; return skb2->decrypted - skb1->decrypted;
#else #else
return 0; return 0;
#endif #endif
} }
static inline bool skb_is_decrypted(const struct sk_buff *skb)
{
#ifdef CONFIG_SKB_DECRYPTED
return skb->decrypted;
#else
return false;
#endif
}
static inline void skb_copy_decrypted(struct sk_buff *to, static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from) const struct sk_buff *from)
{ {
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
to->decrypted = from->decrypted; to->decrypted = from->decrypted;
#endif #endif
} }
......
...@@ -2835,12 +2835,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, ...@@ -2835,12 +2835,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
skb = sk->sk_validate_xmit_skb(sk, dev, skb); skb = sk->sk_validate_xmit_skb(sk, dev, skb);
#ifdef CONFIG_TLS_DEVICE } else if (unlikely(skb_is_decrypted(skb))) {
} else if (unlikely(skb->decrypted)) {
pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
kfree_skb(skb); kfree_skb(skb);
skb = NULL; skb = NULL;
#endif
} }
#endif #endif
......
...@@ -60,6 +60,9 @@ config NET_XGRESS ...@@ -60,6 +60,9 @@ config NET_XGRESS
config NET_REDIRECT config NET_REDIRECT
bool bool
config SKB_DECRYPTED
bool
config SKB_EXTENSIONS config SKB_EXTENSIONS
bool bool
......
...@@ -2526,13 +2526,12 @@ EXPORT_SYMBOL(skb_set_owner_w); ...@@ -2526,13 +2526,12 @@ EXPORT_SYMBOL(skb_set_owner_w);
static bool can_skb_orphan_partial(const struct sk_buff *skb) static bool can_skb_orphan_partial(const struct sk_buff *skb)
{ {
#ifdef CONFIG_TLS_DEVICE
/* Drivers depend on in-order delivery for crypto offload, /* Drivers depend on in-order delivery for crypto offload,
* partial orphan breaks out-of-order-OK logic. * partial orphan breaks out-of-order-OK logic.
*/ */
if (skb->decrypted) if (skb_is_decrypted(skb))
return false; return false;
#endif
return (skb->destructor == sock_wfree || return (skb->destructor == sock_wfree ||
(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
} }
......
...@@ -4805,10 +4805,8 @@ static bool tcp_try_coalesce(struct sock *sk, ...@@ -4805,10 +4805,8 @@ static bool tcp_try_coalesce(struct sock *sk,
if (!mptcp_skb_can_collapse(to, from)) if (!mptcp_skb_can_collapse(to, from))
return false; return false;
#ifdef CONFIG_TLS_DEVICE if (skb_cmp_decrypted(from, to))
if (from->decrypted != to->decrypted)
return false; return false;
#endif
if (!skb_try_coalesce(to, from, fragstolen, &delta)) if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false; return false;
...@@ -5377,9 +5375,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, ...@@ -5377,9 +5375,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
break; break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
#ifdef CONFIG_TLS_DEVICE skb_copy_decrypted(nskb, skb);
nskb->decrypted = skb->decrypted;
#endif
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
if (list) if (list)
__skb_queue_before(list, skb, nskb); __skb_queue_before(list, skb, nskb);
...@@ -5409,10 +5405,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, ...@@ -5409,10 +5405,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
!mptcp_skb_can_collapse(nskb, skb) || !mptcp_skb_can_collapse(nskb, skb) ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end; goto end;
#ifdef CONFIG_TLS_DEVICE if (skb_cmp_decrypted(skb, nskb))
if (skb->decrypted != nskb->decrypted)
goto end; goto end;
#endif
} }
} }
} }
......
...@@ -2044,10 +2044,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, ...@@ -2044,10 +2044,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^ ((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
tail->decrypted != skb->decrypted ||
#endif
!mptcp_skb_can_collapse(tail, skb) || !mptcp_skb_can_collapse(tail, skb) ||
skb_cmp_decrypted(tail, skb) ||
thtail->doff != th->doff || thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce; goto no_coalesce;
......
...@@ -265,9 +265,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) ...@@ -265,9 +265,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
flush |= (len - 1) >= mss; flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
#ifdef CONFIG_TLS_DEVICE flush |= skb_cmp_decrypted(p, skb);
flush |= p->decrypted ^ skb->decrypted;
#endif
if (flush || skb_gro_receive(p, skb)) { if (flush || skb_gro_receive(p, skb)) {
mss = 1; mss = 1;
......
...@@ -20,6 +20,7 @@ config TLS ...@@ -20,6 +20,7 @@ config TLS
config TLS_DEVICE config TLS_DEVICE
bool "Transport Layer Security HW offload" bool "Transport Layer Security HW offload"
depends on TLS depends on TLS
select SKB_DECRYPTED
select SOCK_VALIDATE_XMIT select SOCK_VALIDATE_XMIT
select SOCK_RX_QUEUE_MAPPING select SOCK_RX_QUEUE_MAPPING
default n default n
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment