Commit 6d123b81 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: ip: avoid OOM kills with large UDP sends over loopback

Dave observed number of machines hitting OOM on the UDP send
path. The workload seems to be sending large UDP packets over
loopback. Since loopback has MTU of 64k kernel will try to
allocate an skb with up to 64k of head space. This has a good
chance of failing under memory pressure. What's worse if
the message length is <32k the allocation may trigger an
OOM killer.

This is entirely avoidable, we can use an skb with page frags.

af_unix solves a similar problem by limiting the head
length to SKB_MAX_ALLOC. This seems like a good and simple
approach. It means that UDP messages > 16kB will now
use fragments if underlying device supports SG, if extra
allocator pressure causes regressions in real workloads
we can switch to trying the large allocation first and
falling back.

v4: pre-calculate all the additions to alloclen so
    we can be sure it won't go over order-2
Reported-by: default avatarDave Jones <dsj@fb.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ae24bab2
...@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk, ...@@ -1054,7 +1054,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int datalen; unsigned int datalen;
unsigned int fraglen; unsigned int fraglen;
unsigned int fraggap; unsigned int fraggap;
unsigned int alloclen; unsigned int alloclen, alloc_extra;
unsigned int pagedlen; unsigned int pagedlen;
struct sk_buff *skb_prev; struct sk_buff *skb_prev;
alloc_new_skb: alloc_new_skb:
...@@ -1074,35 +1074,39 @@ static int __ip_append_data(struct sock *sk, ...@@ -1074,35 +1074,39 @@ static int __ip_append_data(struct sock *sk,
fraglen = datalen + fragheaderlen; fraglen = datalen + fragheaderlen;
pagedlen = 0; pagedlen = 0;
alloc_extra = hh_len + 15;
alloc_extra += exthdrlen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap)
alloc_extra += rt->dst.trailer_len;
if ((flags & MSG_MORE) && if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG)) !(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu; alloclen = mtu;
else if (!paged) else if (!paged &&
(fraglen + alloc_extra < SKB_MAX_ALLOC ||
!(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen; alloclen = fraglen;
else { else {
alloclen = min_t(int, fraglen, MAX_HEADER); alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen; pagedlen = fraglen - alloclen;
} }
alloclen += exthdrlen; alloclen += alloc_extra;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap)
alloclen += rt->dst.trailer_len;
if (transhdrlen) { if (transhdrlen) {
skb = sock_alloc_send_skb(sk, skb = sock_alloc_send_skb(sk, alloclen,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err); (flags & MSG_DONTWAIT), &err);
} else { } else {
skb = NULL; skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf) 2 * sk->sk_sndbuf)
skb = alloc_skb(alloclen + hh_len + 15, skb = alloc_skb(alloclen,
sk->sk_allocation); sk->sk_allocation);
if (unlikely(!skb)) if (unlikely(!skb))
err = -ENOBUFS; err = -ENOBUFS;
......
...@@ -1555,7 +1555,7 @@ static int __ip6_append_data(struct sock *sk, ...@@ -1555,7 +1555,7 @@ static int __ip6_append_data(struct sock *sk,
unsigned int datalen; unsigned int datalen;
unsigned int fraglen; unsigned int fraglen;
unsigned int fraggap; unsigned int fraggap;
unsigned int alloclen; unsigned int alloclen, alloc_extra;
unsigned int pagedlen; unsigned int pagedlen;
alloc_new_skb: alloc_new_skb:
/* There's no room in the current skb */ /* There's no room in the current skb */
...@@ -1582,17 +1582,28 @@ static int __ip6_append_data(struct sock *sk, ...@@ -1582,17 +1582,28 @@ static int __ip6_append_data(struct sock *sk,
fraglen = datalen + fragheaderlen; fraglen = datalen + fragheaderlen;
pagedlen = 0; pagedlen = 0;
alloc_extra = hh_len;
alloc_extra += dst_exthdrlen;
alloc_extra += rt->dst.trailer_len;
/* We just reserve space for fragment header.
* Note: this may be overallocation if the message
* (without MSG_MORE) fits into the MTU.
*/
alloc_extra += sizeof(struct frag_hdr);
if ((flags & MSG_MORE) && if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG)) !(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu; alloclen = mtu;
else if (!paged) else if (!paged &&
(fraglen + alloc_extra < SKB_MAX_ALLOC ||
!(rt->dst.dev->features & NETIF_F_SG)))
alloclen = fraglen; alloclen = fraglen;
else { else {
alloclen = min_t(int, fraglen, MAX_HEADER); alloclen = min_t(int, fraglen, MAX_HEADER);
pagedlen = fraglen - alloclen; pagedlen = fraglen - alloclen;
} }
alloclen += alloc_extra;
alloclen += dst_exthdrlen;
if (datalen != length + fraggap) { if (datalen != length + fraggap) {
/* /*
...@@ -1602,30 +1613,21 @@ static int __ip6_append_data(struct sock *sk, ...@@ -1602,30 +1613,21 @@ static int __ip6_append_data(struct sock *sk,
datalen += rt->dst.trailer_len; datalen += rt->dst.trailer_len;
} }
alloclen += rt->dst.trailer_len;
fraglen = datalen + fragheaderlen; fraglen = datalen + fragheaderlen;
/*
* We just reserve space for fragment header.
* Note: this may be overallocation if the message
* (without MSG_MORE) fits into the MTU.
*/
alloclen += sizeof(struct frag_hdr);
copy = datalen - transhdrlen - fraggap - pagedlen; copy = datalen - transhdrlen - fraggap - pagedlen;
if (copy < 0) { if (copy < 0) {
err = -EINVAL; err = -EINVAL;
goto error; goto error;
} }
if (transhdrlen) { if (transhdrlen) {
skb = sock_alloc_send_skb(sk, skb = sock_alloc_send_skb(sk, alloclen,
alloclen + hh_len,
(flags & MSG_DONTWAIT), &err); (flags & MSG_DONTWAIT), &err);
} else { } else {
skb = NULL; skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf) 2 * sk->sk_sndbuf)
skb = alloc_skb(alloclen + hh_len, skb = alloc_skb(alloclen,
sk->sk_allocation); sk->sk_allocation);
if (unlikely(!skb)) if (unlikely(!skb))
err = -ENOBUFS; err = -ENOBUFS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment