Commit adbf5a42 authored by Larysa Zaremba's avatar Larysa Zaremba Committed by Jakub Kicinski

ice: remove af_xdp_zc_qps bitmap

Referenced commit has introduced a bitmap to distinguish between ZC and
copy-mode AF_XDP queues, because xsk_get_pool_from_qid() does not do this
for us.

The bitmap would be especially useful when restoring previous state after
rebuild, if only it was not reallocated in the process. This leads to e.g.
xdpsock dying after changing number of queues.

Instead of preserving the bitmap during the rebuild, remove it completely
and distinguish between ZC and copy-mode queues based on the presence of
a device associated with the pool.

Fixes: e102db78 ("ice: track AF_XDP ZC enabled queues in bitmap")
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: default avatarLarysa Zaremba <larysa.zaremba@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: default avatarChandan Kumar Rout <chandanx.rout@intel.com>
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-3-e3563aa89b0c@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent cfa747a6
...@@ -409,7 +409,6 @@ struct ice_vsi { ...@@ -409,7 +409,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg; struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */ u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
...@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) ...@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
ring->flags |= ICE_TX_FLAGS_RING_XDP; ring->flags |= ICE_TX_FLAGS_RING_XDP;
} }
/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
*
* Return: A pointer to xsk_buff_pool structure if there is a buffer pool
* attached and configured as zero-copy, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
u16 qid)
{
struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
if (!ice_is_xdp_ena_vsi(vsi))
return NULL;
return (pool && pool->dev) ? pool : NULL;
}
/** /**
* ice_xsk_pool - get XSK buffer pool bound to a ring * ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use * @ring: Rx ring to use
...@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) ...@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index; u16 qid = ring->q_index;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) return ice_get_xp_from_qid(vsi, qid);
return NULL;
return xsk_get_pool_from_qid(vsi->netdev, qid);
} }
/** /**
...@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) ...@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring) if (!ring)
return; return;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
ring->xsk_pool = NULL;
return;
}
ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
} }
/** /**
......
...@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) ...@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors) if (!vsi->q_vectors)
goto err_vectors; goto err_vectors;
vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
if (!vsi->af_xdp_zc_qps)
goto err_zc_qps;
return 0; return 0;
err_zc_qps:
devm_kfree(dev, vsi->q_vectors);
err_vectors: err_vectors:
devm_kfree(dev, vsi->rxq_map); devm_kfree(dev, vsi->rxq_map);
err_rxq_map: err_rxq_map:
...@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) ...@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
bitmap_free(vsi->af_xdp_zc_qps);
vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */ /* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors); devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL; vsi->q_vectors = NULL;
......
...@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) ...@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool) if (!pool)
return -EINVAL; return -EINVAL;
clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0; return 0;
...@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err) if (err)
return err; return err;
set_bit(qid, vsi->af_xdp_zc_qps);
return 0; return 0;
} }
...@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) ...@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{ {
struct ice_rx_ring *rx_ring; struct ice_rx_ring *rx_ring;
unsigned long q; uint i;
ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
if (!rx_ring->xsk_pool)
continue;
for_each_set_bit(q, vsi->af_xdp_zc_qps,
max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment