Commit cd0057ad authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'tcp-refactor-skb_cmp_decrypted-checks'

Jakub Kicinski says:

====================
tcp: refactor skb_cmp_decrypted() checks

Refactor the input patch coalescing checks and wrap "EOR forcing"
logic into a helper. This will hopefully make the code easier to
follow. While at it throw some DEBUG_NET checks into skb_shift().
====================

Link: https://lore.kernel.org/r/20240530233616.85897-1-kuba@kernel.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 2589d668 99b8add0
......@@ -1066,11 +1066,19 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
const struct sk_buff *from)
{
/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
skb_pure_zcopy_same(to, from));
}
static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
const struct sk_buff *from)
{
return likely(mptcp_skb_can_collapse(to, from) &&
!skb_cmp_decrypted(to, from));
}
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
......@@ -2095,6 +2103,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc
tcp_wmem_free_skb(sk, skb);
}
static inline void tcp_write_collapse_fence(struct sock *sk)
{
struct sk_buff *skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
}
static inline void tcp_push_pending_frames(struct sock *sk)
{
if (tcp_send_head(sk)) {
......
......@@ -4139,6 +4139,9 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
if (skb_zcopy(tgt) || skb_zcopy(skb))
return 0;
DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle);
DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb));
todo = shiftlen;
from = 0;
to = skb_shinfo(tgt)->nr_frags;
......
......@@ -4813,10 +4813,7 @@ static bool tcp_try_coalesce(struct sock *sk,
if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
return false;
if (!mptcp_skb_can_collapse(to, from))
return false;
if (skb_cmp_decrypted(from, to))
if (!tcp_skb_can_collapse_rx(to, from))
return false;
if (!skb_try_coalesce(to, from, fragstolen, &delta))
......@@ -5372,7 +5369,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
break;
}
if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
......@@ -5423,11 +5420,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
!mptcp_skb_can_collapse(nskb, skb) ||
!tcp_skb_can_collapse_rx(nskb, skb) ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end;
if (skb_cmp_decrypted(skb, nskb))
goto end;
}
}
}
......
......@@ -2044,8 +2044,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
!mptcp_skb_can_collapse(tail, skb) ||
skb_cmp_decrypted(tail, skb) ||
!tcp_skb_can_collapse_rx(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
......
......@@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
u32 seq)
{
struct net_device *netdev;
struct sk_buff *skb;
int err = 0;
u8 *rcd_sn;
skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
tcp_write_collapse_fence(sk);
rcd_sn = tls_ctx->tx.rec_seq;
trace_tls_device_tx_resync_send(sk, seq, rcd_sn);
......@@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk)
struct tls_prot_info *prot;
struct net_device *netdev;
struct tls_context *ctx;
struct sk_buff *skb;
char *iv, *rec_seq;
int rc;
......@@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk)
* SKBs where only part of the payload needs to be encrypted.
* So mark the last skb in the write queue as end of record.
*/
skb = tcp_write_queue_tail(sk);
if (skb)
TCP_SKB_CB(skb)->eor = 1;
tcp_write_collapse_fence(sk);
/* Avoid offloading if the device is down
* We don't want to offload new flows after
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment