Commit 1ab37e12 authored by Emil Tantilov's avatar Emil Tantilov Committed by Jeff Kirsher

ixgbevf: add support for padding packet

Following the logic from commit 2de6aa3a
("ixgbe: Add support for padding packet")

Add support for providing a buffer with headroom and tail room
to allow for shared info, NET_SKB_PAD, and NET_IP_ALIGN.  With this
combined with the DMA changes we can start using build_skb to build frames
around an incoming Rx buffer instead of having to memcpy the headers.
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarKrishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent f2d00eca
...@@ -90,6 +90,7 @@ struct ixgbevf_rx_queue_stats { ...@@ -90,6 +90,7 @@ struct ixgbevf_rx_queue_stats {
enum ixgbevf_ring_state_t { enum ixgbevf_ring_state_t {
__IXGBEVF_RX_3K_BUFFER, __IXGBEVF_RX_3K_BUFFER,
__IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG, __IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_HANG_CHECK_ARMED,
}; };
...@@ -179,11 +180,21 @@ struct ixgbevf_ring { ...@@ -179,11 +180,21 @@ struct ixgbevf_ring {
#define clear_ring_uses_large_buffer(ring) \ #define clear_ring_uses_large_buffer(ring) \
clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state) clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
#define ring_uses_build_skb(ring) \
test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define set_ring_build_skb_enabled(ring) \
set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring) static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring)) if (ring_uses_large_buffer(ring))
return IXGBEVF_RXBUFFER_3072; return IXGBEVF_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IXGBEVF_MAX_FRAME_BUILD_SKB;
#endif #endif
return IXGBEVF_RXBUFFER_2048; return IXGBEVF_RXBUFFER_2048;
} }
......
...@@ -554,6 +554,11 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, ...@@ -554,6 +554,11 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
return true; return true;
} }
static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
}
static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *bi) struct ixgbevf_rx_buffer *bi)
{ {
...@@ -588,7 +593,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, ...@@ -588,7 +593,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
bi->dma = dma; bi->dma = dma;
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = ixgbevf_rx_offset(rx_ring);
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
rx_ring->rx_stats.alloc_rx_page++; rx_ring->rx_stats.alloc_rx_page++;
...@@ -803,7 +808,9 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, ...@@ -803,7 +808,9 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else #else
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); unsigned int truesize = ring_uses_build_skb(rx_ring) ?
SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
SKB_DATA_ALIGN(size);
#endif #endif
unsigned int pull_len; unsigned int pull_len;
...@@ -1776,8 +1783,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, ...@@ -1776,8 +1783,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ixgbevf_configure_srrctl(adapter, ring, reg_idx); ixgbevf_configure_srrctl(adapter, ring, reg_idx);
/* allow any size packet since we can handle overflow */ /* RXDCTL.RLPML does not work on 82599 */
rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
#if (PAGE_SIZE < 8192)
/* Limit the maximum frame size so we don't overrun the skb */
if (ring_uses_build_skb(ring) &&
!ring_uses_large_buffer(ring))
rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
IXGBE_RXDCTL_RLPML_EN;
#endif
}
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
...@@ -1793,11 +1811,14 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, ...@@ -1793,11 +1811,14 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* set build_skb and buffer size flags */ /* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring); clear_ring_uses_large_buffer(rx_ring);
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return; return;
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
return; return;
...@@ -3890,6 +3911,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3890,6 +3911,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */ /* must set new MTU before calling down or up */
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment