Commit 67fd893e authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

ethernet/intel: Use napi_alloc_skb

This change replaces calls to netdev_alloc_skb_ip_align with
napi_alloc_skb.  The advantage of napi_alloc_skb is currently the fact that
the page allocation doesn't make use of any irq disable calls.

There are few spots where I couldn't replace the calls as the buffer
allocation routine is called as a part of init which is outside of the
softirq context.

Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fd11a83d
...@@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, ...@@ -4100,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
unsigned int bufsz) unsigned int bufsz)
{ {
struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
if (unlikely(!skb)) if (unlikely(!skb))
adapter->alloc_rx_buff_failed++; adapter->alloc_rx_buff_failed++;
......
...@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, ...@@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/ */
if (length < copybreak) { if (length < copybreak) {
struct sk_buff *new_skb = struct sk_buff *new_skb =
netdev_alloc_skb_ip_align(netdev, length); napi_alloc_skb(&adapter->napi, length);
if (new_skb) { if (new_skb) {
skb_copy_to_linear_data_offset(new_skb, skb_copy_to_linear_data_offset(new_skb,
-NET_IP_ALIGN, -NET_IP_ALIGN,
......
...@@ -308,7 +308,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, ...@@ -308,7 +308,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi,
FM10K_RX_HDR_LEN); FM10K_RX_HDR_LEN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
......
...@@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -6644,8 +6644,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
IGB_RX_HDR_LEN);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return NULL; return NULL;
......
...@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, ...@@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
* this should improve performance for small packets with large amounts * this should improve performance for small packets with large amounts
* of reassembly being done in the stack * of reassembly being done in the stack
*/ */
static void ixgb_check_copybreak(struct net_device *netdev, static void ixgb_check_copybreak(struct napi_struct *napi,
struct ixgb_buffer *buffer_info, struct ixgb_buffer *buffer_info,
u32 length, struct sk_buff **skb) u32 length, struct sk_buff **skb)
{ {
...@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev, ...@@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev,
if (length > copybreak) if (length > copybreak)
return; return;
new_skb = netdev_alloc_skb_ip_align(netdev, length); new_skb = napi_alloc_skb(napi, length);
if (!new_skb) if (!new_skb)
return; return;
...@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) ...@@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
goto rxdesc_done; goto rxdesc_done;
} }
ixgb_check_copybreak(netdev, buffer_info, length, &skb); ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
/* Good Receive */ /* Good Receive */
skb_put(skb, length); skb_put(skb, length);
......
...@@ -1913,7 +1913,7 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, ...@@ -1913,7 +1913,7 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
#endif #endif
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = napi_alloc_skb(&rx_ring->q_vector->napi,
IXGBE_RX_HDR_SIZE); IXGBE_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment