Commit 919e78a6 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: Make better use of memory allocations in one-buffer mode w/ RSC

This patch improves the memory utilization with RSC when in one-buffer
mode.  This is accomplished by making the default buffer sizes match up
with the standard memory allocation sizes minus 1K for shared info and
padding overhead.  By doing this CPU utilization when doing large receives
can be reduced by as much as 8%.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 398fe4a9
...@@ -72,10 +72,13 @@ ...@@ -72,10 +72,13 @@
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ #define IXGBE_RXBUFFER_512 512 /* Used for packet split */
#define IXGBE_RXBUFFER_2048 2048 #define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_4096 4096 #define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_8192 8192 #define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ #define IXGBE_RXBUFFER_7K 7168
#define IXGBE_RXBUFFER_8K 8192
#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/* /*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
......
...@@ -1539,7 +1539,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1539,7 +1539,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->dev = &adapter->pdev->dev; rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
rx_ring->numa_node = adapter->node; rx_ring->numa_node = adapter->node;
err = ixgbe_setup_rx_resources(rx_ring); err = ixgbe_setup_rx_resources(rx_ring);
......
...@@ -499,7 +499,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) ...@@ -499,7 +499,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
rx_ring->rx_buf_len, true); rx_ring->rx_buf_len, true);
if (rx_ring->rx_buf_len if (rx_ring->rx_buf_len
< IXGBE_RXBUFFER_2048) < IXGBE_RXBUFFER_2K)
print_hex_dump(KERN_INFO, "", print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1, DUMP_PREFIX_ADDRESS, 16, 1,
phys_to_virt( phys_to_virt(
...@@ -2644,9 +2644,9 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ...@@ -2644,9 +2644,9 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
rscctrl |= IXGBE_RSCCTL_MAXDESC_1; rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif #endif
} else { } else {
if (rx_buf_len < IXGBE_RXBUFFER_4096) if (rx_buf_len < IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16; rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
else if (rx_buf_len < IXGBE_RXBUFFER_8192) else if (rx_buf_len < IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8; rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4; rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
...@@ -2879,17 +2879,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -2879,17 +2879,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) if (hw->mac.type == ixgbe_mac_82599EB)
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
}
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* adjust max frame to be able to do baby jumbo for FCoE */ /* adjust max frame to be able to do baby jumbo for FCoE */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
...@@ -2905,6 +2894,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -2905,6 +2894,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
} }
/* MHADD will allow an extra 4 bytes past for vlan tagged frames */
max_frame += VLAN_HLEN;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
rx_buf_len = IXGBE_RX_HDR_SIZE;
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
/*
* Make best use of allocation by using all but 1K of a
* power of 2 allocation that will be used for skb->head.
*/
else if (max_frame <= IXGBE_RXBUFFER_3K)
rx_buf_len = IXGBE_RXBUFFER_3K;
else if (max_frame <= IXGBE_RXBUFFER_7K)
rx_buf_len = IXGBE_RXBUFFER_7K;
else if (max_frame <= IXGBE_RXBUFFER_15K)
rx_buf_len = IXGBE_RXBUFFER_15K;
else
rx_buf_len = IXGBE_MAX_RXBUFFER;
}
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN; hlreg0 |= IXGBE_HLREG0_JUMBOEN;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment