Commit ebc33a3f authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Tony Nguyen

ice: improve updating ice_{t,r}x_ring::xsk_pool

xsk_buff_pool pointers that ice ring structs hold are updated via
ndo_bpf that is executed in process context while it can be read by
remote CPU at the same time within NAPI poll. Use synchronize_net()
after pointer update and {READ,WRITE}_ONCE() when working with mentioned
pointer.

Fixes: 2d4238f5 ("ice: Add support for AF_XDP")
Reviewed-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 9da75a51
...@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, ...@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
} }
/** /**
* ice_xsk_pool - get XSK buffer pool bound to a ring * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
* @ring: Rx ring to use * @ring: Rx ring to use
* *
* Returns a pointer to xsk_buff_pool structure if there is a buffer pool * Sets XSK buff pool pointer on Rx ring.
* present, NULL otherwise.
*/ */
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{ {
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index; u16 qid = ring->q_index;
return ice_get_xp_from_qid(vsi, qid); WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
} }
/** /**
...@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) ...@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring) if (!ring)
return; return;
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
} }
/** /**
......
...@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err; return err;
} }
ring->xsk_pool = ice_xsk_pool(ring); ice_rx_xsk_pool(ring);
if (ring->xsk_pool) { if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq); xdp_rxq_info_unreg(&ring->xdp_rxq);
...@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0; return 0;
} }
ok = ice_alloc_rx_bufs_zc(ring, num_bufs); ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) { if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index]; u16 pf_q = ring->vsi->rxq_map[ring->q_index];
......
...@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) ...@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) { ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool) if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi); napi_schedule(&rx_ring->q_vector->napi);
} }
} }
......
...@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
ice_for_each_tx_ring(tx_ring, q_vector->tx) { ice_for_each_tx_ring(tx_ring, q_vector->tx) {
struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd; bool wd;
if (tx_ring->xsk_pool) if (xsk_pool)
wd = ice_xmit_zc(tx_ring); wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring)) else if (ice_ring_is_xdp(tx_ring))
wd = true; wd = true;
else else
...@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget; budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) { ice_for_each_rx_ring(rx_ring, q_vector->rx) {
struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned; int cleaned;
/* A dedicated path for zero-copy allows making a single /* A dedicated path for zero-copy allows making a single
...@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner. * ice_clean_rx_irq function and makes the codebase cleaner.
*/ */
cleaned = rx_ring->xsk_pool ? cleaned = rx_ring->xsk_pool ?
ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring); ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned; work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */ /* if we clean as many as budgeted, we must not be done */
......
This diff is collapsed.
...@@ -20,16 +20,20 @@ struct ice_vsi; ...@@ -20,16 +20,20 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid); u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool,
int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else #else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
{ {
return false; return false;
} }
...@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, ...@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget) int __always_unused budget)
{ {
return 0; return 0;
...@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, ...@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count) u16 __always_unused count)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment