Commit 0bf50cea authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
ice: fix AF_XDP ZC timeout and concurrency issues

Maciej Fijalkowski says:

Changes included in this patchset address an issue that customer has
been facing when AF_XDP ZC Tx sockets were used in combination with flow
control and regular Tx traffic.

After executing:
ethtool --set-priv-flags $dev link-down-on-close on
ethtool -A $dev rx on tx on

launching multiple ZC Tx sockets on $dev + pinging remote interface (so
that regular Tx traffic is present) and then going through down/up of
$dev, Tx timeout occurred and then most of the time ice driver was unable
to recover from that state.

These patches combined together solve the described above issue on
customer side. Main focus here is to forbid producing Tx descriptors when
either carrier is not yet initialized or process of bringing interface
down has already started.

v1: https://lore.kernel.org/netdev/20240708221416.625850-1-anthony.l.nguyen@intel.com/

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: xsk: fix txq interrupt mapping
  ice: add missing WRITE_ONCE when clearing ice_rx_ring::xdp_prog
  ice: improve updating ice_{t,r}x_ring::xsk_pool
  ice: toggle netif_carrier when setting up XSK pool
  ice: modify error handling when setting XSK pool in ndo_bpf
  ice: replace synchronize_rcu with synchronize_net
  ice: don't busy wait for Rx queue disable in ice_qp_dis()
  ice: respect netif readiness in AF_XDP ZC related ndo's
====================

Link: https://patch.msgid.link/20240729200716.681496-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 89add400 963fb461
...@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, ...@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
} }
/** /**
* ice_xsk_pool - get XSK buffer pool bound to a ring * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
* @ring: Rx ring to use * @ring: Rx ring to use
* *
* Returns a pointer to xsk_buff_pool structure if there is a buffer pool * Sets XSK buff pool pointer on Rx ring.
* present, NULL otherwise.
*/ */
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{ {
struct ice_vsi *vsi = ring->vsi; struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index; u16 qid = ring->q_index;
return ice_get_xp_from_qid(vsi, qid); WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
} }
/** /**
...@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) ...@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring) if (!ring)
return; return;
ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
} }
/** /**
......
...@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err; return err;
} }
ring->xsk_pool = ice_xsk_pool(ring); ice_rx_xsk_pool(ring);
if (ring->xsk_pool) { if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq); xdp_rxq_info_unreg(&ring->xdp_rxq);
...@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) ...@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0; return 0;
} }
ok = ice_alloc_rx_bufs_zc(ring, num_bufs); ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) { if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index]; u16 pf_q = ring->vsi->rxq_map[ring->q_index];
......
...@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) ...@@ -2948,7 +2948,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) { ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool) if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi); napi_schedule(&rx_ring->q_vector->napi);
} }
} }
......
...@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) ...@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (rx_ring->vsi->type == ICE_VSI_PF) if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq); xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL; WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) { if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf); kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL; rx_ring->xdp_buf = NULL;
...@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
ice_for_each_tx_ring(tx_ring, q_vector->tx) { ice_for_each_tx_ring(tx_ring, q_vector->tx) {
struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd; bool wd;
if (tx_ring->xsk_pool) if (xsk_pool)
wd = ice_xmit_zc(tx_ring); wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring)) else if (ice_ring_is_xdp(tx_ring))
wd = true; wd = true;
else else
...@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget; budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) { ice_for_each_rx_ring(rx_ring, q_vector->rx) {
struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned; int cleaned;
/* A dedicated path for zero-copy allows making a single /* A dedicated path for zero-copy allows making a single
...@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner. * ice_clean_rx_irq function and makes the codebase cleaner.
*/ */
cleaned = rx_ring->xsk_pool ? cleaned = rx_ring->xsk_pool ?
ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring); ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned; work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */ /* if we clean as many as budgeted, we must not be done */
......
This diff is collapsed.
...@@ -20,16 +20,20 @@ struct ice_vsi; ...@@ -20,16 +20,20 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid); u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool,
int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else #else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
{ {
return false; return false;
} }
...@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, ...@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget) int __always_unused budget)
{ {
return 0; return 0;
...@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, ...@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count) u16 __always_unused count)
{ {
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment