Commit 74edbe9e authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-04-05

Maciej Fijalkowski says:

We were solving issues around AF_XDP busy poll's not-so-usual scenarios,
such as very big busy poll budgets applied to very small HW rings. This
set carries the things that were found during that work that apply to
net tree.

One thing that was fixed for all in-tree ZC drivers was missing on ice
side all the time - it's about syncing RCU before destroying XDP
resources. Next one fixes the bit that is checked in ice_xsk_wakeup and
third one avoids false setting of DD bits on Tx descriptors.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8d90991e e19778e6
...@@ -671,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) ...@@ -671,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{ {
return !!vsi->xdp_prog; return !!READ_ONCE(vsi->xdp_prog);
} }
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
......
...@@ -2562,7 +2562,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) ...@@ -2562,7 +2562,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
spin_lock_init(&xdp_ring->tx_lock); spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) { for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j); tx_desc = ICE_TX_DESC(xdp_ring, j);
tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE); tx_desc->cmd_type_offset_bsz = 0;
} }
} }
...@@ -2758,8 +2758,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) ...@@ -2758,8 +2758,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
ice_for_each_xdp_txq(vsi, i) ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) { if (vsi->xdp_rings[i]) {
if (vsi->xdp_rings[i]->desc) if (vsi->xdp_rings[i]->desc) {
synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]); ice_free_tx_ring(vsi->xdp_rings[i]);
}
kfree_rcu(vsi->xdp_rings[i], rcu); kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL; vsi->xdp_rings[i] = NULL;
} }
......
...@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) ...@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{ {
ice_clean_tx_ring(vsi->tx_rings[q_idx]); ice_clean_tx_ring(vsi->tx_rings[q_idx]);
if (ice_is_xdp_ena_vsi(vsi)) if (ice_is_xdp_ena_vsi(vsi)) {
synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]); ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
}
ice_clean_rx_ring(vsi->rx_rings[q_idx]); ice_clean_rx_ring(vsi->rx_rings[q_idx]);
} }
...@@ -918,7 +920,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, ...@@ -918,7 +920,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring; struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state)) if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN; return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi)) if (!ice_is_xdp_ena_vsi(vsi))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment