Commit ed0907e3 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Tony Nguyen

ice: fix napi work done reporting in xsk path

Fix the wrong napi work done reporting in the xsk path of the ice
driver. The code in the main Rx processing loop was written to assume
that the buffer allocation code returns true if all allocations where
successful and false if not. In contrast with all other Intel NIC xsk
drivers, the ice_alloc_rx_bufs_zc() has the inverted logic messing up
the work done reporting in the napi loop.

This can be fixed either by inverting the return value from
ice_alloc_rx_bufs_zc() in the function that uses this in an incorrect
way, or by changing the return value of ice_alloc_rx_bufs_zc(). We
chose the latter as it makes all the xsk allocation functions for
Intel NICs behave in the same way. My guess is that it was this
unexpected discrepancy that gave rise to this bug in the first place.

Fixes: 5bb0c4b5 ("ice, xsk: Move Rx allocation out of while-loop")
Reported-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Tested-by: default avatarKiran Bhandare <kiranx.bhandare@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 7a1468ba
...@@ -418,6 +418,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -418,6 +418,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
writel(0, ring->tail); writel(0, ring->tail);
if (ring->xsk_pool) { if (ring->xsk_pool) {
bool ok;
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index); num_bufs, ring->q_index);
...@@ -426,8 +428,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -426,8 +428,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
return 0; return 0;
} }
err = ice_alloc_rx_bufs_zc(ring, num_bufs); ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err) if (!ok)
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q); ring->q_index, pf_q);
return 0; return 0;
......
...@@ -358,18 +358,18 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -358,18 +358,18 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* This function allocates a number of Rx buffers from the fill ring * This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring. * or the internal recycle mechanism and places them on the Rx ring.
* *
* Returns false if all allocations were successful, true if any fail. * Returns true if all allocations were successful, false if any fail.
*/ */
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{ {
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use; u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
bool ret = false; bool ok = true;
dma_addr_t dma; dma_addr_t dma;
if (!count) if (!count)
return false; return true;
rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_desc = ICE_RX_DESC(rx_ring, ntu);
rx_buf = &rx_ring->rx_buf[ntu]; rx_buf = &rx_ring->rx_buf[ntu];
...@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
do { do {
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) { if (!rx_buf->xdp) {
ret = true; ok = false;
break; break;
} }
...@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, ntu); ice_release_rx_desc(rx_ring, ntu);
} }
return ret; return ok;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment