Commit 271d3be1 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
10GbE Intel Wired LAN Driver Updates 2021-12-28

Alexander Lobakin says:

napi_build_skb() I introduced earlier this year ([0]) aims
to decrease MM pressure and the overhead from in-place
kmem_cache_alloc() on each Rx entry processing by decaching
skbuff_heads from NAPI per-cpu cache filled prior to that by
napi_consume_skb() (so it is sort of a direct shortcut for
free -> mm -> alloc cycle).
Currently, no in-tree drivers use it. Switch all Intel Ethernet
drivers to it to get slight-to-medium perf boosts depending on
the frame size.

ice driver, 50 Gbps link, pktgen + XDP_PASS (local in) sample:

frame_size/nthreads  64/42  128/20  256/8  512/4  1024/2  1532/1

net-next (Kpps)      46062  34654   18248  9830   5343    2714
series               47438  34708   18330  9875   5435    2777
increase             2.9%   0.15%   0.45%  0.46%  1.72%   2.32%

Additionally, e1000's been switched to napi_consume_skb() as it's
safe and works fine there, and there's no point in napi_build_skb()
without paired NAPI cache feeding point.

[0] https://lore.kernel.org/all/20210213141021.87840-1-alobakin@pm.me

* '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ixgbevf: switch to napi_build_skb()
  ixgbe: switch to napi_build_skb()
  igc: switch to napi_build_skb()
  igb: switch to napi_build_skb()
  ice: switch to napi_build_skb()
  iavf: switch to napi_build_skb()
  i40e: switch to napi_build_skb()
  e1000: switch to napi_build_skb()
  e1000: switch to napi_consume_skb()
====================

Link: https://lore.kernel.org/r/20211228175815.281449-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 4c46625b c1550019
......@@ -1953,7 +1953,8 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_tx_buffer *buffer_info)
struct e1000_tx_buffer *buffer_info,
int budget)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
......@@ -1966,7 +1967,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
napi_consume_skb(buffer_info->skb, budget);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
......@@ -1990,7 +1991,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
netdev_reset_queue(adapter->netdev);
......@@ -2958,7 +2959,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
return 0;
......@@ -3856,7 +3857,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
}
}
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
e1000_unmap_and_free_tx_resource(adapter, buffer_info,
64);
tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count))
......@@ -4382,7 +4384,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (!skb) {
unsigned int frag_len = e1000_frag_len(adapter);
skb = build_skb(data - E1000_HEADROOM, frag_len);
skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
......
......@@ -2204,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -1366,7 +1366,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IAVF_SKB_PAD, truesize);
skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -948,7 +948,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -8367,7 +8367,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -1729,7 +1729,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -2170,7 +2170,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
......@@ -944,7 +944,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment