Commit 7ba7aeab authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by David S. Miller

net: Don't disable interrupts in napi_alloc_frag()

netdev_alloc_frag() can be used from any context and is used by NAPI
and non-NAPI drivers. Non-NAPI drivers use it in interrupt context
and NAPI drivers use it during initial allocation (->ndo_open() or
->ndo_change_mtu()). Some NAPI drivers share the same function for the
initial allocation and the allocation in their NAPI callback.

The interrupts are disabled in order to ensure locked access from every
context to `netdev_alloc_cache'.

Let netdev_alloc_frag() check if interrupts are disabled. If they are,
use `netdev_alloc_cache' otherwise disable BH and invoke
__napi_alloc_frag() for the allocation. The IRQ check is cheaper
compared to disabling & enabling interrupts and memory allocation with
disabled interrupts does not work on -RT.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9e49fe4d
...@@ -366,19 +366,21 @@ struct napi_alloc_cache { ...@@ -366,19 +366,21 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{ {
struct page_frag_cache *nc; struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
unsigned long flags;
void *data;
local_irq_save(flags); return page_frag_alloc(&nc->page, fragsz, gfp_mask);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
} }
void *napi_alloc_frag(unsigned int fragsz)
{
fragsz = SKB_DATA_ALIGN(fragsz);
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
/** /**
* netdev_alloc_frag - allocate a page fragment * netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size * @fragsz: fragment size
...@@ -388,26 +390,21 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -388,26 +390,21 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/ */
void *netdev_alloc_frag(unsigned int fragsz) void *netdev_alloc_frag(unsigned int fragsz)
{ {
fragsz = SKB_DATA_ALIGN(fragsz); struct page_frag_cache *nc;
void *data;
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
return page_frag_alloc(&nc->page, fragsz, gfp_mask);
}
void *napi_alloc_frag(unsigned int fragsz)
{
fragsz = SKB_DATA_ALIGN(fragsz); fragsz = SKB_DATA_ALIGN(fragsz);
if (in_irq() || irqs_disabled()) {
return __napi_alloc_frag(fragsz, GFP_ATOMIC); nc = this_cpu_ptr(&netdev_alloc_cache);
data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
} else {
local_bh_disable();
data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
local_bh_enable();
}
return data;
} }
EXPORT_SYMBOL(napi_alloc_frag); EXPORT_SYMBOL(netdev_alloc_frag);
/** /**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment