Commit d5b38a71 authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

tcp: call tcp_set_skb_tso_segs() from tcp_write_xmit()

tcp_write_xmit() calls tcp_init_tso_segs()
to set gso_size and gso_segs on the packet.

tcp_init_tso_segs() requires the stack to maintain
an up to date tcp_skb_pcount(), and this makes sense
for packets in rtx queue. Not so much for packets
still in the write queue.

In the following patch, we don't want to deal with
tcp_skb_pcount() when moving payload from 2nd
skb to 1st skb in the write queue.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240418214600.1291486-3-edumazet@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 22555032
...@@ -1502,18 +1502,22 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) ...@@ -1502,18 +1502,22 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
} }
/* Initialize TSO segments for a packet. */ /* Initialize TSO segments for a packet. */
static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{ {
int tso_segs;
if (skb->len <= mss_now) { if (skb->len <= mss_now) {
/* Avoid the costly divide in the normal /* Avoid the costly divide in the normal
* non-TSO case. * non-TSO case.
*/ */
tcp_skb_pcount_set(skb, 1);
TCP_SKB_CB(skb)->tcp_gso_size = 0; TCP_SKB_CB(skb)->tcp_gso_size = 0;
} else { tcp_skb_pcount_set(skb, 1);
tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); return 1;
TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
} }
TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
tso_segs = DIV_ROUND_UP(skb->len, mss_now);
tcp_skb_pcount_set(skb, tso_segs);
return tso_segs;
} }
/* Pcount in the middle of the write queue got changed, we need to do various /* Pcount in the middle of the write queue got changed, we need to do various
...@@ -2097,10 +2101,9 @@ static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) ...@@ -2097,10 +2101,9 @@ static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{ {
int tso_segs = tcp_skb_pcount(skb); int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now))
tcp_set_skb_tso_segs(skb, mss_now); return tcp_set_skb_tso_segs(skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
return tso_segs; return tso_segs;
} }
...@@ -2733,9 +2736,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2733,9 +2736,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_pacing_check(sk)) if (tcp_pacing_check(sk))
break; break;
tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs);
cwnd_quota = tcp_cwnd_test(tp); cwnd_quota = tcp_cwnd_test(tp);
if (!cwnd_quota) { if (!cwnd_quota) {
if (push_one == 2) if (push_one == 2)
...@@ -2745,6 +2745,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2745,6 +2745,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break; break;
} }
tso_segs = tcp_set_skb_tso_segs(skb, mss_now);
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
is_rwnd_limited = true; is_rwnd_limited = true;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment