Commit aa3cc8a9 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2021-12-17

Maciej Fijalkowski says:

It seems that previous [0] Rx fix was not enough and there are still
issues with AF_XDP Rx ZC support in ice driver. Elza reported that for
multiple XSK sockets configured on a single netdev, some of them were
becoming dead after a while. We have spotted more things that needed to
be addressed this time. More of information can be found in particular
commit messages.

It also carries Alexandr's patch that was sent previously which was
overlapping with this set.

[0]: https://lore.kernel.org/bpf/20211129231746.2767739-1-anthony.l.nguyen@intel.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 158b515f dcbaf72a
...@@ -6,6 +6,18 @@ ...@@ -6,6 +6,18 @@
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
{
rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
return !!rx_ring->xdp_buf;
}
static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
{
rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
return !!rx_ring->rx_buf;
}
/** /**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment * @qs_cfg: gathered variables needed for PF->VSI queues assignment
...@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id); ring->q_index, ring->q_vector->napi.napi_id);
kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring); ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) { if (ring->xsk_pool) {
if (!ice_alloc_rx_buf_zc(ring))
return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len = ring->rx_buf_len =
...@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index); ring->q_index);
} else { } else {
if (!ice_alloc_rx_buf(ring))
return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, xdp_rxq_info_reg(&ring->xdp_rxq,
......
...@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
} }
rx_skip_free: rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); if (rx_ring->xsk_pool)
memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
else
memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */ /* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
...@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq); xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL; rx_ring->xdp_prog = NULL;
devm_kfree(rx_ring->dev, rx_ring->rx_buf); if (rx_ring->xsk_pool) {
rx_ring->rx_buf = NULL; kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
} else {
kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
}
if (rx_ring->desc) { if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
...@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */ /* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf); WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf = rx_ring->rx_buf =
devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count, kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
GFP_KERNEL);
if (!rx_ring->rx_buf) if (!rx_ring->rx_buf)
return -ENOMEM; return -ENOMEM;
...@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0; return 0;
err: err:
devm_kfree(dev, rx_ring->rx_buf); kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL; rx_ring->rx_buf = NULL;
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \ #define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128 #define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K /* Attempt to maximize the headroom available for incoming frames. We use a 2K
......
...@@ -12,6 +12,11 @@ ...@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h" #include "ice_txrx_lib.h"
#include "ice_lib.h" #include "ice_lib.h"
static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
{
return &rx_ring->xdp_buf[idx];
}
/** /**
* ice_qp_reset_stats - Resets all stats for rings of given index * ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest * @vsi: VSI that contains rings of interest
...@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) ...@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
dma_addr_t dma; dma_addr_t dma;
rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_desc = ICE_RX_DESC(rx_ring, ntu);
xdp = &rx_ring->xdp_buf[ntu]; xdp = ice_xdp_buf(rx_ring, ntu);
nb_buffs = min_t(u16, count, rx_ring->count - ntu); nb_buffs = min_t(u16, count, rx_ring->count - ntu);
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
...@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) ...@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
} }
ntu += nb_buffs; ntu += nb_buffs;
if (ntu == rx_ring->count) { if (ntu == rx_ring->count)
rx_desc = ICE_RX_DESC(rx_ring, 0);
xdp = rx_ring->xdp_buf;
ntu = 0; ntu = 0;
}
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu); ice_release_rx_desc(rx_ring, ntu);
return count == nb_buffs; return count == nb_buffs;
...@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring) ...@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/** /**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring * @rx_ring: Rx ring
* @xdp_arr: Pointer to the SW ring of xdp_buff pointers * @xdp: Pointer to XDP buffer
* *
* This function allocates a new skb from a zero-copy Rx buffer. * This function allocates a new skb from a zero-copy Rx buffer.
* *
* Returns the skb on success, NULL on failure. * Returns the skb on success, NULL on failure.
*/ */
static struct sk_buff * static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{ {
struct xdp_buff *xdp = *xdp_arr; unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data; unsigned int datasize = xdp->data_end - xdp->data;
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb; struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
...@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) ...@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
xsk_buff_free(xdp); xsk_buff_free(xdp);
*xdp_arr = NULL;
return skb; return skb;
} }
...@@ -507,7 +505,6 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, ...@@ -507,7 +505,6 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring; struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
...@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ...@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) { while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0; unsigned int size, xdp_res = 0;
struct xdp_buff **xdp; struct xdp_buff *xdp;
struct sk_buff *skb; struct sk_buff *skb;
u16 stat_err_bits; u16 stat_err_bits;
u16 vlan_tag = 0; u16 vlan_tag = 0;
...@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ...@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/ */
dma_rmb(); dma_rmb();
xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.pkt_len) & size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M; ICE_RX_FLX_DESC_PKT_LEN_M;
if (!size) if (!size) {
break; xdp->data = NULL;
xdp->data_end = NULL;
xdp->data_hard_start = NULL;
xdp->data_meta = NULL;
goto construct_skb;
}
xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean]; xsk_buff_set_size(xdp, size);
xsk_buff_set_size(*xdp, size); xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring); xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
if (xdp_res) { if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res; xdp_xmit |= xdp_res;
else else
xsk_buff_free(*xdp); xsk_buff_free(xdp);
*xdp = NULL;
total_rx_bytes += size; total_rx_bytes += size;
total_rx_packets++; total_rx_packets++;
cleaned_count++;
ice_bump_ntc(rx_ring); ice_bump_ntc(rx_ring);
continue; continue;
} }
construct_skb:
/* XDP_PASS path */ /* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp); skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) { if (!skb) {
...@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ...@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
break; break;
} }
cleaned_count++;
ice_bump_ntc(rx_ring); ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) { if (eth_skb_pad(skb)) {
...@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ...@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag); ice_receive_skb(rx_ring, skb, vlan_tag);
} }
if (cleaned_count >= ICE_RX_BUF_WRITE) failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
...@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) ...@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/ */
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{ {
u16 i; u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
for (i = 0; i < rx_ring->count; i++) { u16 ntu = rx_ring->next_to_use;
struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
if (!xdp) for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
continue; struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
*xdp = NULL; xsk_buff_free(xdp);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment