Commit de78d1f9 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Lock buffer size at 2K even on systems with larger pages

This change locks us in at 2K buffers even on a system that supports larger
frames.  The reason for this change is to make better use of pages and to
reduce the overall truesize of frames generated by igb.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 2e334eee
...@@ -132,9 +132,10 @@ struct vf_data_storage { ...@@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384 #define IGB_RXBUFFER_2048 2048
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16 #define IGB_TX_QUEUE_WAKE 16
......
...@@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for CPU read */ /* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev, dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
PAGE_SIZE / 2, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* verify contents of skb */ /* verify contents of skb */
...@@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */ /* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev, dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
PAGE_SIZE / 2, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* unmap buffer on tx side */ /* unmap buffer on tx side */
......
...@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter) ...@@ -554,7 +554,7 @@ static void igb_dump(struct igb_adapter *adapter)
16, 1, 16, 1,
page_address(buffer_info->page) + page_address(buffer_info->page) +
buffer_info->page_offset, buffer_info->page_offset,
PAGE_SIZE/2, true); IGB_RX_BUFSZ, true);
} }
} }
} }
...@@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, ...@@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */ /* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
#ifdef CONFIG_IGB_PTP #ifdef CONFIG_IGB_PTP
if (hw->mac.type >= e1000_82580) if (hw->mac.type >= e1000_82580)
...@@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
old_buff->page_offset, old_buff->page_offset,
PAGE_SIZE / 2, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
...@@ -5905,18 +5901,19 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -5905,18 +5901,19 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
} }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rx_buffer->page_offset, size, PAGE_SIZE / 2); rx_buffer->page_offset, size, IGB_RX_BUFSZ);
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id())) if (unlikely(page_to_nid(page) != numa_node_id()))
return false; return false;
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1)) if (unlikely(page_count(page) != 1))
return false; return false;
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= PAGE_SIZE / 2; rx_buffer->page_offset ^= IGB_RX_BUFSZ;
/* /*
* since we are the only owner of the page and we need to * since we are the only owner of the page and we need to
...@@ -5924,6 +5921,16 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -5924,6 +5921,16 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
* an unnecessary locked operation * an unnecessary locked operation
*/ */
atomic_set(&page->_count, 2); atomic_set(&page->_count, 2);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += SKB_DATA_ALIGN(size);
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif
return true; return true;
} }
...@@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
rx_buffer->page_offset, rx_buffer->page_offset,
PAGE_SIZE / 2, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* pull page into skb */ /* pull page into skb */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment