Commit 1abeacc1 authored by Michael Chan's avatar Michael Chan Committed by David S. Miller

bnxt_en: Fix first buffer size calculations for XDP multi-buffer

The size of the first buffer is always page size, and the useable
space is the page size minus the offset and the skb_shared_info size.
Make sure SKB and XDP buf sizes match so that the skb_shared_info
is at the same offset seen from the SKB and XDP_BUF.

build_skb() should be passed PAGE_SIZE.  xdp_init_buff() should
be passed PAGE_SIZE as well.  xdp_get_shared_info_from_buff() will
automatically deduct the skb_shared_info size if the XDP buffer
has frags.  There is no need to keep bp->xdp_has_frags.

Change BNXT_PAGE_MODE_BUF_SIZE to BNXT_MAX_PAGE_MODE_MTU_SBUF
since this constant is really the MTU with ethernet header size
subtracted.

Also fix the BNXT_MAX_PAGE_MODE_MTU macro with proper parentheses.

Fixes: 32861236 ("bnxt: change receive ring space parameters")
Reviewed-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: default avatarAndy Gospodarek <andrew.gospodarek@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9b3e6078
...@@ -991,8 +991,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, ...@@ -991,8 +991,7 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset; dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING); DMA_ATTR_WEAK_ORDERING);
skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE + skb = build_skb(page_address(page), PAGE_SIZE);
bp->rx_dma_offset);
if (!skb) { if (!skb) {
__free_page(page); __free_page(page);
return NULL; return NULL;
...@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp) ...@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
if (BNXT_RX_PAGE_MODE(bp)) { if (BNXT_RX_PAGE_MODE(bp)) {
rx_space = BNXT_PAGE_MODE_BUF_SIZE; rx_space = PAGE_SIZE;
rx_size = BNXT_MAX_PAGE_MODE_MTU; rx_size = PAGE_SIZE -
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else { } else {
rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN); rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD + rx_space = rx_size + NET_SKB_PAD +
......
...@@ -591,12 +591,20 @@ struct nqe_cn { ...@@ -591,12 +591,20 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500 #define BNXT_MAX_MTU 9500
#define BNXT_PAGE_MODE_BUF_SIZE \
/* First RX buffer page in XDP multi-buf mode
*
* +-------------------------------------------------------------------------+
* | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info|
* | (bp->rx_dma_offset) | | |
* +-------------------------------------------------------------------------+
*/
#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM) XDP_PACKET_HEADROOM)
#define BNXT_MAX_PAGE_MODE_MTU \ #define BNXT_MAX_PAGE_MODE_MTU \
BNXT_PAGE_MODE_BUF_SIZE - \ (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52 #define BNXT_MIN_PKT_SIZE 52
...@@ -2134,7 +2142,6 @@ struct bnxt { ...@@ -2134,7 +2142,6 @@ struct bnxt {
#define BNXT_DUMP_CRASH 1 #define BNXT_DUMP_CRASH 1
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
u8 xdp_has_frags;
struct bnxt_ptp_cfg *ptp_cfg; struct bnxt_ptp_cfg *ptp_cfg;
u8 ptp_all_rx_tstamp; u8 ptp_all_rx_tstamp;
......
...@@ -193,9 +193,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, ...@@ -193,9 +193,6 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
mapping = rx_buf->mapping - bp->rx_dma_offset; mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
if (bp->xdp_has_frags)
buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false); xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
} }
...@@ -404,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) ...@@ -404,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (prog) { if (prog)
tx_xdp = bp->rx_nr_rings; tx_xdp = bp->rx_nr_rings;
bp->xdp_has_frags = prog->aux->xdp_has_frags;
}
tc = netdev_get_num_tc(dev); tc = netdev_get_num_tc(dev);
if (!tc) if (!tc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment