Commit c61bcebd authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann

ice: Prepare legacy-rx for upcoming XDP multi-buffer support

Rx path is going to be modified in a way that fragmented frame will be
gathered within xdp_buff in the first place. This approach implies that
underlying buffer has to provide tailroom for skb_shared_info. This is
currently the case when ring uses build_skb but not when legacy-rx knob
is turned on. This case configures 2k Rx buffers and has no way to
provide either headroom or tailroom - FWIW it currently has
XDP_PACKET_HEADROOM which is broken and in here it is removed. 2k Rx
buffers were used so driver in this setting was able to support 9k MTU
as it can chain up to 5 Rx buffers. With offset configuring HW writing
2k of a data was passing the half of the page which broke the assumption
of our internal page recycling tricks.

Now if above got fixed and legacy-rx path would be left as is, when
referring to skb_shared_info via xdp_get_shared_info_from_buff(),
packet's content would be corrupted again. Hence size of Rx buffer needs
to be lowered and therefore supported MTU. This operation will allow us
to keep the unified data path and with 8k MTU users (if any of
legacy-rx) would still be good to go. However, tendency is to drop the
support for this code path at some point.

Add ICE_RXBUF_1664 as vsi::rx_buf_len and ICE_MAX_FRAME_LEGACY_RX (8320)
as vsi::max_frame for legacy-rx. For bigger page sizes configure 3k Rx
buffers, not 2k.

Since headroom support is removed, disable data_meta support on legacy-rx.
When preparing XDP buff, rely on ice_rx_ring::rx_offset setting when
deciding whether to support data_meta or not.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-2-maciej.fijalkowski@intel.com
parent c1a3daf7
...@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) ...@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{ {
if (ice_ring_uses_build_skb(rx_ring)) if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD; return ICE_SKB_PAD;
else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
return XDP_PACKET_HEADROOM;
return 0; return 0;
} }
......
...@@ -1992,8 +1992,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi) ...@@ -1992,8 +1992,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{ {
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
vsi->rx_buf_len = ICE_RXBUF_2048; vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING && } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) { (vsi->netdev->mtu <= ETH_DATA_LEN)) {
...@@ -2002,11 +2002,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) ...@@ -2002,11 +2002,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
#endif #endif
} else { } else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
#if (PAGE_SIZE < 8192)
vsi->rx_buf_len = ICE_RXBUF_3072; vsi->rx_buf_len = ICE_RXBUF_3072;
#else
vsi->rx_buf_len = ICE_RXBUF_2048;
#endif
} }
} }
......
...@@ -7336,8 +7336,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ...@@ -7336,8 +7336,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
*/ */
static int ice_max_xdp_frame_size(struct ice_vsi *vsi) static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
{ {
if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM; return ICE_RXBUF_1664;
else else
return ICE_RXBUF_3072; return ICE_RXBUF_3072;
} }
...@@ -7370,6 +7370,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -7370,6 +7370,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD); frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL; return -EINVAL;
} }
} else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
} }
/* if a reset is in progress, wait for some time for it to complete */ /* if a reset is in progress, wait for some time for it to complete */
......
...@@ -984,17 +984,15 @@ static struct sk_buff * ...@@ -984,17 +984,15 @@ static struct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data; unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen; unsigned int headlen;
struct sk_buff *skb; struct sk_buff *skb;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
net_prefetch(xdp->data_meta); net_prefetch(xdp->data);
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
...@@ -1006,13 +1004,8 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -1006,13 +1004,8 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ /* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
ALIGN(headlen + metasize, sizeof(long))); sizeof(long)));
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
/* if we exhaust the linear part then add what is left as a frag */ /* if we exhaust the linear part then add what is left as a frag */
size -= headlen; size -= headlen;
...@@ -1187,7 +1180,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ...@@ -1187,7 +1180,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
hard_start = page_address(rx_buf->page) + rx_buf->page_offset - hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
offset; offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true); xdp_prepare_buff(&xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size); xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
......
...@@ -9,10 +9,12 @@ ...@@ -9,10 +9,12 @@
#define ICE_DFLT_IRQ_WORK 256 #define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_3072 3072 #define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048 #define ICE_RXBUF_2048 2048
#define ICE_RXBUF_1664 1664
#define ICE_RXBUF_1536 1536 #define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5 #define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8 #define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17 #define ICE_MIN_TX_LEN 17
#define ICE_MAX_FRAME_LEGACY_RX 8320
/* The size limit for a transmit buffer in a descriptor is (16K - 1). /* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to * In order to align with the read requests we will align the value to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment