Commit c89cca30 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net: skbuff: sprinkle more __GFP_NOWARN on ingress allocs

build_skb() and frag allocations done with GFP_ATOMIC will
fail in real life, when system is under memory pressure,
and there's nothing we can do about that. So no point
printing warnings.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3608d6ac
...@@ -314,8 +314,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) ...@@ -314,8 +314,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
fragsz = SKB_DATA_ALIGN(fragsz); fragsz = SKB_DATA_ALIGN(fragsz);
local_lock_nested_bh(&napi_alloc_cache.bh_lock); local_lock_nested_bh(&napi_alloc_cache.bh_lock);
data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, data = __page_frag_alloc_align(&nc->page, fragsz,
align_mask); GFP_ATOMIC | __GFP_NOWARN, align_mask);
local_unlock_nested_bh(&napi_alloc_cache.bh_lock); local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
return data; return data;
...@@ -330,7 +330,8 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) ...@@ -330,7 +330,8 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
fragsz = SKB_DATA_ALIGN(fragsz); fragsz = SKB_DATA_ALIGN(fragsz);
data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, data = __page_frag_alloc_align(nc, fragsz,
GFP_ATOMIC | __GFP_NOWARN,
align_mask); align_mask);
} else { } else {
local_bh_disable(); local_bh_disable();
...@@ -349,7 +350,7 @@ static struct sk_buff *napi_skb_cache_get(void) ...@@ -349,7 +350,7 @@ static struct sk_buff *napi_skb_cache_get(void)
local_lock_nested_bh(&napi_alloc_cache.bh_lock); local_lock_nested_bh(&napi_alloc_cache.bh_lock);
if (unlikely(!nc->skb_count)) { if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
GFP_ATOMIC, GFP_ATOMIC | __GFP_NOWARN,
NAPI_SKB_CACHE_BULK, NAPI_SKB_CACHE_BULK,
nc->skb_cache); nc->skb_cache);
if (unlikely(!nc->skb_count)) { if (unlikely(!nc->skb_count)) {
...@@ -418,7 +419,8 @@ struct sk_buff *slab_build_skb(void *data) ...@@ -418,7 +419,8 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int size; unsigned int size;
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
...@@ -469,7 +471,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) ...@@ -469,7 +471,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{ {
struct sk_buff *skb; struct sk_buff *skb;
skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment