Commit 17c84cb4 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed

net/mlx5e: Sync txq2sq updates with mlx5e_xmit for HTB queues

This commit makes necessary changes to guarantee that txq2sq remains
stable while mlx5e_xmit is running. Proper synchronization is added for
HTB TX queues.

All updates to txq2sq are performed while the corresponding queue is
disabled (i.e. mlx5e_xmit doesn't run on that queue). smp_wmb after each
change guarantees that mlx5e_xmit can see the updated value after the
queue is enabled. Comments explaining this mechanism are added to
mlx5e_xmit.

When an HTB SQ can be deleted (after deleting an HTB node), synchronize
with RCU to wait for mlx5e_select_queue to finish and stop selecting
that queue, before we re-enable it to avoid TX timeout watchdog alarms.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 6ce204ea
...@@ -50,7 +50,6 @@ static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv) ...@@ -50,7 +50,6 @@ static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
struct mlx5e_qos_node { struct mlx5e_qos_node {
struct hlist_node hnode; struct hlist_node hnode;
struct rcu_head rcu;
struct mlx5e_qos_node *parent; struct mlx5e_qos_node *parent;
u64 rate; u64 rate;
u32 bw_share; u32 bw_share;
...@@ -132,7 +131,11 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node ...@@ -132,7 +131,11 @@ static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node
__clear_bit(node->qid, priv->htb.qos_used_qids); __clear_bit(node->qid, priv->htb.qos_used_qids);
mlx5e_update_tx_netdev_queues(priv); mlx5e_update_tx_netdev_queues(priv);
} }
kfree_rcu(node, rcu); /* Make sure this qid is no longer selected by mlx5e_select_queue, so
* that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
*/
synchronize_net();
kfree(node);
} }
/* TX datapath API */ /* TX datapath API */
...@@ -273,10 +276,18 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -273,10 +276,18 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
{ {
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
u16 qid;
sq = mlx5e_get_qos_sq(priv, node->qid); sq = mlx5e_get_qos_sq(priv, node->qid);
WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq); qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
/* If it's a new queue, it will be marked as started at this point.
* Stop it before updating txq2sq.
*/
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
priv->txq2sq[qid] = sq;
/* Make the change to txq2sq visible before the queue is started. /* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
...@@ -299,8 +310,13 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) ...@@ -299,8 +310,13 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid); qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq); mlx5e_deactivate_txqsq(sq);
/* The queue is disabled, no synchronization with datapath is needed. */
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
/* Make the change to txq2sq visible before the queue is started again.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
* which pairs with this barrier.
*/
smp_wmb();
} }
static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
......
...@@ -691,8 +691,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -691,8 +691,21 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
u16 pi; u16 pi;
/* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
* queue being changed is disabled, and smp_wmb guarantees that the
* changes are visible before mlx5e_xmit tries to read from txq2sq. It
* guarantees that the value of txq2sq[qid] doesn't change while
* mlx5e_xmit is running on queue number qid. smb_wmb is paired with
* HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
*/
sq = priv->txq2sq[skb_get_queue_mapping(skb)]; sq = priv->txq2sq[skb_get_queue_mapping(skb)];
if (unlikely(!sq)) { if (unlikely(!sq)) {
/* Two cases when sq can be NULL:
* 1. The HTB node is registered, and mlx5e_select_queue
* selected its queue ID, but the SQ itself is not yet created.
* 2. HTB SQ creation failed. Similar to the previous case, but
* the SQ won't be created.
*/
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment