Commit 72c1a284 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-extend-alloc_skb_with_frags-max-size'

Eric Dumazet says:

====================
net: extend alloc_skb_with_frags() max size

alloc_skb_with_frags(), while being able to use high order allocations,
limits the payload size to PAGE_SIZE * MAX_SKB_FRAGS

Reviewing Tahsin Erdogan patch [1], it was clear to me we need
to remove this limitation.

[1] https://lore.kernel.org/netdev/20230731230736.109216-1-trdgn@amazon.com/

v2: Addressed Willem feedback on 1st patch.
====================

Link: https://lore.kernel.org/r/20230801205254.400094-1-edumazet@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 49c467dc 37dfe5b8
...@@ -614,8 +614,10 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, ...@@ -614,8 +614,10 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
if (prepad + len < PAGE_SIZE || !linear) if (prepad + len < PAGE_SIZE || !linear)
linear = len; linear = len;
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
err, 0); err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb) if (!skb)
return NULL; return NULL;
......
...@@ -1526,8 +1526,10 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, ...@@ -1526,8 +1526,10 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
if (prepad + len < PAGE_SIZE || !linear) if (prepad + len < PAGE_SIZE || !linear)
linear = len; linear = len;
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
&err, 0); &err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb) if (!skb)
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -6204,7 +6204,7 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); ...@@ -6204,7 +6204,7 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
* *
* @header_len: size of linear part * @header_len: size of linear part
* @data_len: needed length in frags * @data_len: needed length in frags
* @max_page_order: max page order desired. * @order: max page order desired.
* @errcode: pointer to error code if any * @errcode: pointer to error code if any
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
* *
...@@ -6212,21 +6212,17 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); ...@@ -6212,21 +6212,17 @@ EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
*/ */
struct sk_buff *alloc_skb_with_frags(unsigned long header_len, struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
unsigned long data_len, unsigned long data_len,
int max_page_order, int order,
int *errcode, int *errcode,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
unsigned long chunk; unsigned long chunk;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
int i; int nr_frags = 0;
*errcode = -EMSGSIZE; *errcode = -EMSGSIZE;
/* Note this test could be relaxed, if we succeed to allocate if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
* high order pages...
*/
if (npages > MAX_SKB_FRAGS)
return NULL; return NULL;
*errcode = -ENOBUFS; *errcode = -ENOBUFS;
...@@ -6234,34 +6230,32 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, ...@@ -6234,34 +6230,32 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
if (!skb) if (!skb)
return NULL; return NULL;
skb->truesize += npages << PAGE_SHIFT; while (data_len) {
if (nr_frags == MAX_SKB_FRAGS - 1)
for (i = 0; npages > 0; i++) { goto failure;
int order = max_page_order; while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
order--;
while (order) { if (order) {
if (npages >= 1 << order) {
page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP | __GFP_COMP |
__GFP_NOWARN, __GFP_NOWARN,
order); order);
if (page) if (!page) {
goto fill_page;
/* Do not retry other high order allocations */
order = 1;
max_page_order = 0;
}
order--; order--;
continue;
} }
} else {
page = alloc_page(gfp_mask); page = alloc_page(gfp_mask);
if (!page) if (!page)
goto failure; goto failure;
fill_page: }
chunk = min_t(unsigned long, data_len, chunk = min_t(unsigned long, data_len,
PAGE_SIZE << order); PAGE_SIZE << order);
skb_fill_page_desc(skb, i, page, 0, chunk); skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
nr_frags++;
skb->truesize += (PAGE_SIZE << order);
data_len -= chunk; data_len -= chunk;
npages -= 1 << order;
} }
return skb; return skb;
......
...@@ -2927,8 +2927,10 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad, ...@@ -2927,8 +2927,10 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
if (prepad + len < PAGE_SIZE || !linear) if (prepad + len < PAGE_SIZE || !linear)
linear = len; linear = len;
if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
err, 0); err, PAGE_ALLOC_COSTLY_ORDER);
if (!skb) if (!skb)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment