Commit d907a077 authored by Eric Dumazet's avatar Eric Dumazet Committed by Greg Kroah-Hartman

tcp: limit payload size of sacked skbs

commit 3b4929f6 upstream.

Jonathan Looney reported that TCP can trigger the following crash
in tcp_shifted_skb() :

	BUG_ON(tcp_skb_pcount(skb) < pcount);

This can happen if the remote peer has advertized the smallest
MSS that linux TCP accepts : 48

An skb can hold 17 fragments, and each fragment can hold 32KB
on x86, or 64KB on PowerPC.

This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
can overflow.

Note that tcp_sendmsg() builds skbs with less than 64KB
of payload, so this problem needs SACK to be enabled.
SACK blocks allow TCP to coalesce multiple skbs in the retransmit
queue, thus filling the 17 fragments to maximal capacity.

CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs

Fixes: 832d11c5 ("tcp: Try to restore large SKBs while SACK processing")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarJonathan Looney <jtl@netflix.com>
Acked-by: default avatarNeal Cardwell <ncardwell@google.com>
Reviewed-by: default avatarTyler Hicks <tyhicks@canonical.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Bruce Curtis <brucec@netflix.com>
Cc: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7e1bdd68
...@@ -488,4 +488,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) ...@@ -488,4 +488,8 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
return (user_mss && user_mss < mss) ? user_mss : mss; return (user_mss && user_mss < mss) ? user_mss : mss;
} }
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
int shiftlen);
#endif /* _LINUX_TCP_H */ #endif /* _LINUX_TCP_H */
...@@ -55,6 +55,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -55,6 +55,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40 #define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
/* /*
* Never offer a window over 32767 without using window scaling. Some * Never offer a window over 32767 without using window scaling. Some
......
...@@ -3889,6 +3889,7 @@ void __init tcp_init(void) ...@@ -3889,6 +3889,7 @@ void __init tcp_init(void)
unsigned long limit; unsigned long limit;
unsigned int i; unsigned int i;
BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb)); FIELD_SIZEOF(struct sk_buff, cb));
......
...@@ -1296,7 +1296,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, ...@@ -1296,7 +1296,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
TCP_SKB_CB(skb)->seq += shifted; TCP_SKB_CB(skb)->seq += shifted;
tcp_skb_pcount_add(prev, pcount); tcp_skb_pcount_add(prev, pcount);
BUG_ON(tcp_skb_pcount(skb) < pcount); WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
tcp_skb_pcount_add(skb, -pcount); tcp_skb_pcount_add(skb, -pcount);
/* When we're adding to gso_segs == 1, gso_size will be zero, /* When we're adding to gso_segs == 1, gso_size will be zero,
...@@ -1362,6 +1362,21 @@ static int skb_can_shift(const struct sk_buff *skb) ...@@ -1362,6 +1362,21 @@ static int skb_can_shift(const struct sk_buff *skb)
return !skb_headlen(skb) && skb_is_nonlinear(skb); return !skb_headlen(skb) && skb_is_nonlinear(skb);
} }
int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
int pcount, int shiftlen)
{
/* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
* Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
* to make sure not storing more than 65535 * 8 bytes per skb,
* even if current MSS is bigger.
*/
if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
return 0;
if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
return 0;
return skb_shift(to, from, shiftlen);
}
/* Try collapsing SACK blocks spanning across multiple skbs to a single /* Try collapsing SACK blocks spanning across multiple skbs to a single
* skb. * skb.
*/ */
...@@ -1467,7 +1482,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, ...@@ -1467,7 +1482,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
goto fallback; goto fallback;
if (!skb_shift(prev, skb, len)) if (!tcp_skb_shift(prev, skb, pcount, len))
goto fallback; goto fallback;
if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
goto out; goto out;
...@@ -1485,11 +1500,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, ...@@ -1485,11 +1500,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
goto out; goto out;
len = skb->len; len = skb->len;
if (skb_shift(prev, skb, len)) { pcount = tcp_skb_pcount(skb);
pcount += tcp_skb_pcount(skb); if (tcp_skb_shift(prev, skb, pcount, len))
tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), tcp_shifted_skb(sk, prev, skb, state, pcount,
len, mss, 0); len, mss, 0);
}
out: out:
return prev; return prev;
......
...@@ -1457,8 +1457,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) ...@@ -1457,8 +1457,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
mss_now -= icsk->icsk_ext_hdr_len; mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */ /* Then reserve room for full set of TCP options and 8 bytes of data */
if (mss_now < 48) if (mss_now < TCP_MIN_SND_MSS)
mss_now = 48; mss_now = TCP_MIN_SND_MSS;
return mss_now; return mss_now;
} }
...@@ -2750,7 +2750,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) ...@@ -2750,7 +2750,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
if (next_skb_size <= skb_availroom(skb)) if (next_skb_size <= skb_availroom(skb))
skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size), skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
next_skb_size); next_skb_size);
else if (!skb_shift(skb, next_skb, next_skb_size)) else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
return false; return false;
} }
tcp_highest_sack_replace(sk, next_skb, skb); tcp_highest_sack_replace(sk, next_skb, skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment