Commit 1be68a87 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Paolo Abeni

tcp: add a helper for setting EOR on tail skb

TLS (and hopefully soon PSP will) use EOR to prevent skbs
with different decrypted state from getting merged, without
adding new tests to the skb handling. In both cases once
the connection switches to an "encrypted" state, all subsequent
skbs will be encrypted, so a single "EOR fence" is sufficient
to prevent mixing.

Add a helper for setting the EOR bit, to make this arrangement
more explicit.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 07111530
...@@ -1066,6 +1066,7 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) ...@@ -1066,6 +1066,7 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse(const struct sk_buff *to, static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from) const struct sk_buff *from)
{ {
/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) && return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) && mptcp_skb_can_collapse(to, from) &&
skb_pure_zcopy_same(to, from)); skb_pure_zcopy_same(to, from));
...@@ -2102,6 +2103,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc ...@@ -2102,6 +2103,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
tcp_wmem_free_skb(sk, skb); tcp_wmem_free_skb(sk, skb);
} }
static inline void tcp_write_collapse_fence(struct sock *sk)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
}
static inline void tcp_push_pending_frames(struct sock *sk) static inline void tcp_push_pending_frames(struct sock *sk)
{ {
if (tcp_send_head(sk)) { if (tcp_send_head(sk)) {
......
...@@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, ...@@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq) u32 seq)
{ {
struct net_device *netdev; struct net_device *netdev;
struct sk_buff *skb;
int err = 0; int err = 0;
u8 *rcd_sn; u8 *rcd_sn;
skb = tcp_write_queue_tail(sk); tcp_write_collapse_fence(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
rcd_sn = tls_ctx->tx.rec_seq; rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn); trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
...@@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk) ...@@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk)
struct tls_prot_info *prot; struct tls_prot_info *prot;
struct net_device *netdev; struct net_device *netdev;
struct tls_context *ctx; struct tls_context *ctx;
struct sk_buff *skb;
char *iv, *rec_seq; char *iv, *rec_seq;
int rc; int rc;
...@@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk) ...@@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk)
* SKBs where only part of the payload needs to be encrypted. * SKBs where only part of the payload needs to be encrypted.
* So mark the last skb in the write queue as end of record. * So mark the last skb in the write queue as end of record.
*/ */
skb = tcp_write_queue_tail(sk); tcp_write_collapse_fence(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
/* Avoid offloading if the device is down /* Avoid offloading if the device is down
* We don't want to offload new flows after * We don't want to offload new flows after
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment