Commit 47f058ce authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-07-17

1) Add resiliency for lost completions for PTP TX port timestamp

2) Report Header-data split state via ethtool

3) Decouple HTB code from main regular TX code

* tag 'mlx5-updates-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: CT: Remove warning of ignore_flow_level support for non PF
  net/mlx5e: Add resiliency for PTP TX port timestamp
  net/mlx5: Expose ts_cqe_metadata_size2wqe_counter
  net/mlx5e: HTB, move htb functions to a new file
  net/mlx5e: HTB, change functions name to follow convention
  net/mlx5e: HTB, remove priv from htb function calls
  net/mlx5e: HTB, hide and dynamically allocate mlx5e_htb structure
  net/mlx5e: HTB, move stats and max_sqs to priv
  net/mlx5e: HTB, move section comment to the right place
  net/mlx5e: HTB, move ids to selq_params struct
  net/mlx5e: HTB, reduce visibility of htb functions
  net/mlx5e: Fix mqprio_rl handling on devlink reload
  net/mlx5e: Report header-data split state through ethtool
====================

Link: https://lore.kernel.org/r/20220719203529.51151-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 5fb859f7 22df2e93
...@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ ...@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o lib/crypto.o en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
lib/crypto.o
# #
# Netdev extra # Netdev extra
......
...@@ -321,7 +321,8 @@ struct mlx5e_params { ...@@ -321,7 +321,8 @@ struct mlx5e_params {
u8 num_tc; u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct { struct {
struct mlx5e_mqprio_rl *rl; u64 max_rate[TC_MAX_QUEUE];
u32 hw_id[TC_MAX_QUEUE];
} channel; } channel;
} mqprio; } mqprio;
bool rx_cqe_compress_def; bool rx_cqe_compress_def;
...@@ -898,16 +899,8 @@ struct mlx5e_scratchpad { ...@@ -898,16 +899,8 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask; cpumask_var_t cpumask;
}; };
struct mlx5e_htb {
DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
struct mlx5e_sq_stats **qos_sq_stats;
u16 max_qos_sqs;
u16 maj_id;
u16 defcls;
};
struct mlx5e_trap; struct mlx5e_trap;
struct mlx5e_htb;
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
...@@ -945,6 +938,8 @@ struct mlx5e_priv { ...@@ -945,6 +938,8 @@ struct mlx5e_priv {
struct mlx5e_channel_stats **channel_stats; struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats; struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats; struct mlx5e_ptp_stats ptp_stats;
struct mlx5e_sq_stats **htb_qos_sq_stats;
u16 htb_max_qos_sqs;
u16 stats_nch; u16 stats_nch;
u16 max_nch; u16 max_nch;
u8 max_opened_tc; u8 max_opened_tc;
...@@ -976,7 +971,7 @@ struct mlx5e_priv { ...@@ -976,7 +971,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent; struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif #endif
struct mlx5e_scratchpad scratchpad; struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb; struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl; struct mlx5e_mqprio_rl *mqprio_rl;
}; };
...@@ -1181,7 +1176,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); ...@@ -1181,7 +1176,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data); struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param); struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param); struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include <net/pkt_cls.h>
#include "htb.h"
#include "en.h"
#include "../qos.h"
struct mlx5e_qos_node {
struct hlist_node hnode;
struct mlx5e_qos_node *parent;
u64 rate;
u32 bw_share;
u32 max_average_bw;
u32 hw_id;
u32 classid; /* 16-bit, except root. */
u16 qid;
};
struct mlx5e_htb {
DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
struct mlx5e_selq *selq;
};
#define MLX5E_QOS_QID_INNER 0xffff
#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
/* Software representation of the QoS tree */
int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data)
{
struct mlx5e_qos_node *node = NULL;
int bkt, err;
hash_for_each(htb->qos_tc2node, bkt, node, hnode) {
if (node->qid == MLX5E_QOS_QID_INNER)
continue;
err = callback(data, node->qid, node->hw_id);
if (err)
return err;
}
return 0;
}
int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb)
{
int last;
last = find_last_bit(htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(htb->mdev));
return last == mlx5e_qos_max_leaf_nodes(htb->mdev) ? 0 : last + 1;
}
static int mlx5e_htb_find_unused_qos_qid(struct mlx5e_htb *htb)
{
int size = mlx5e_qos_max_leaf_nodes(htb->mdev);
struct mlx5e_priv *priv = htb->priv;
int res;
WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
res = find_first_zero_bit(htb->qos_used_qids, size);
return res == size ? -ENOSPC : res;
}
static struct mlx5e_qos_node *
mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid,
struct mlx5e_qos_node *parent)
{
struct mlx5e_qos_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return ERR_PTR(-ENOMEM);
node->parent = parent;
node->qid = qid;
__set_bit(qid, htb->qos_used_qids);
node->classid = classid;
hash_add_rcu(htb->qos_tc2node, &node->hnode, classid);
mlx5e_update_tx_netdev_queues(htb->priv);
return node;
}
static struct mlx5e_qos_node *mlx5e_htb_node_create_root(struct mlx5e_htb *htb)
{
struct mlx5e_qos_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return ERR_PTR(-ENOMEM);
node->qid = MLX5E_QOS_QID_INNER;
node->classid = MLX5E_HTB_CLASSID_ROOT;
hash_add_rcu(htb->qos_tc2node, &node->hnode, node->classid);
return node;
}
static struct mlx5e_qos_node *mlx5e_htb_node_find(struct mlx5e_htb *htb, u32 classid)
{
struct mlx5e_qos_node *node = NULL;
hash_for_each_possible(htb->qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
return node;
}
static struct mlx5e_qos_node *mlx5e_htb_node_find_rcu(struct mlx5e_htb *htb, u32 classid)
{
struct mlx5e_qos_node *node = NULL;
hash_for_each_possible_rcu(htb->qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
return node;
}
static void mlx5e_htb_node_delete(struct mlx5e_htb *htb, struct mlx5e_qos_node *node)
{
hash_del_rcu(&node->hnode);
if (node->qid != MLX5E_QOS_QID_INNER) {
__clear_bit(node->qid, htb->qos_used_qids);
mlx5e_update_tx_netdev_queues(htb->priv);
}
/* Make sure this qid is no longer selected by mlx5e_select_queue, so
* that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
*/
synchronize_net();
kfree(node);
}
/* TX datapath API */
int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid)
{
struct mlx5e_qos_node *node;
u16 qid;
int res;
rcu_read_lock();
node = mlx5e_htb_node_find_rcu(htb, classid);
if (!node) {
res = -ENOENT;
goto out;
}
qid = READ_ONCE(node->qid);
if (qid == MLX5E_QOS_QID_INNER) {
res = -EINVAL;
goto out;
}
res = mlx5e_qid_from_qos(&htb->priv->channels, qid);
out:
rcu_read_unlock();
return res;
}
/* HTB TC handlers */
static int
mlx5e_htb_root_add(struct mlx5e_htb *htb, u16 htb_maj_id, u16 htb_defcls,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = htb->priv;
struct mlx5e_qos_node *root;
bool opened;
int err;
qos_dbg(htb->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
mlx5e_selq_prepare_htb(htb->selq, htb_maj_id, htb_defcls);
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err)
goto err_cancel_selq;
}
root = mlx5e_htb_node_create_root(htb);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto err_free_queues;
}
err = mlx5_qos_create_root_node(htb->mdev, &root->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
goto err_sw_node_delete;
}
mlx5e_selq_apply(htb->selq);
return 0;
err_sw_node_delete:
mlx5e_htb_node_delete(htb, root);
err_free_queues:
if (opened)
mlx5e_qos_close_all_queues(&priv->channels);
err_cancel_selq:
mlx5e_selq_cancel(htb->selq);
return err;
}
static int mlx5e_htb_root_del(struct mlx5e_htb *htb)
{
struct mlx5e_priv *priv = htb->priv;
struct mlx5e_qos_node *root;
int err;
qos_dbg(htb->mdev, "TC_HTB_DESTROY\n");
/* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
* so that we can safely switch to its non-HTB non-PTP fastpath.
*/
synchronize_net();
mlx5e_selq_prepare_htb(htb->selq, 0, 0);
mlx5e_selq_apply(htb->selq);
root = mlx5e_htb_node_find(htb, MLX5E_HTB_CLASSID_ROOT);
if (!root) {
qos_err(htb->mdev, "Failed to find the root node in the QoS tree\n");
return -ENOENT;
}
err = mlx5_qos_destroy_node(htb->mdev, root->hw_id);
if (err)
qos_err(htb->mdev, "Failed to destroy root node %u, err = %d\n",
root->hw_id, err);
mlx5e_htb_node_delete(htb, root);
mlx5e_qos_deactivate_all_queues(&priv->channels);
mlx5e_qos_close_all_queues(&priv->channels);
return err;
}
static int mlx5e_htb_convert_rate(struct mlx5e_htb *htb, u64 rate,
struct mlx5e_qos_node *parent, u32 *bw_share)
{
u64 share = 0;
while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
parent = parent->parent;
if (parent->max_average_bw)
share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
parent->max_average_bw);
else
share = 101;
*bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
qos_dbg(htb->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
return 0;
}
static void mlx5e_htb_convert_ceil(struct mlx5e_htb *htb, u64 ceil, u32 *max_average_bw)
{
/* Hardware treats 0 as "unlimited", set at least 1. */
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(htb->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
}
int
mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node, *parent;
struct mlx5e_priv *priv = htb->priv;
int qid;
int err;
qos_dbg(htb->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
classid, parent_classid, rate, ceil);
qid = mlx5e_htb_find_unused_qos_qid(htb);
if (qid < 0) {
NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
return qid;
}
parent = mlx5e_htb_node_find(htb, parent_classid);
if (!parent)
return -EINVAL;
node = mlx5e_htb_node_create_leaf(htb, classid, qid, parent);
if (IS_ERR(node))
return PTR_ERR(node);
node->rate = rate;
mlx5e_htb_convert_rate(htb, rate, node->parent, &node->bw_share);
mlx5e_htb_convert_ceil(htb, ceil, &node->max_average_bw);
err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->hw_id,
node->bw_share, node->max_average_bw,
&node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
mlx5e_htb_node_delete(htb, node);
return err;
}
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
}
}
return mlx5e_qid_from_qos(&priv->channels, node->qid);
}
int
mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node, *child;
struct mlx5e_priv *priv = htb->priv;
int err, tmp_err;
u32 new_hw_id;
u16 qid;
qos_dbg(htb->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
classid, child_classid, rate, ceil);
node = mlx5e_htb_node_find(htb, classid);
if (!node)
return -ENOENT;
err = mlx5_qos_create_inner_node(htb->mdev, node->parent->hw_id,
node->bw_share, node->max_average_bw,
&new_hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
qos_err(htb->mdev, "Failed to create an inner node (class %04x), err = %d\n",
classid, err);
return err;
}
/* Intentionally reuse the qid for the upcoming first child. */
child = mlx5e_htb_node_create_leaf(htb, child_classid, node->qid, node);
if (IS_ERR(child)) {
err = PTR_ERR(child);
goto err_destroy_hw_node;
}
child->rate = rate;
mlx5e_htb_convert_rate(htb, rate, node, &child->bw_share);
mlx5e_htb_convert_ceil(htb, ceil, &child->max_average_bw);
err = mlx5_qos_create_leaf_node(htb->mdev, new_hw_id, child->bw_share,
child->max_average_bw, &child->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
goto err_delete_sw_node;
}
/* No fail point. */
qid = node->qid;
/* Pairs with mlx5e_htb_get_txq_by_classid. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_deactivate_qos_sq(priv, qid);
mlx5e_close_qos_sq(priv, qid);
}
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
node->hw_id = new_hw_id;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, child->qid, child->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, child->qid, child->hw_id);
}
}
return 0;
err_delete_sw_node:
child->qid = MLX5E_QOS_QID_INNER;
mlx5e_htb_node_delete(htb, child);
err_destroy_hw_node:
tmp_err = mlx5_qos_destroy_node(htb->mdev, new_hw_id);
if (tmp_err) /* Not fatal. */
qos_warn(htb->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
new_hw_id, classid, tmp_err);
return err;
}
static struct mlx5e_qos_node *mlx5e_htb_node_find_by_qid(struct mlx5e_htb *htb, u16 qid)
{
struct mlx5e_qos_node *node = NULL;
int bkt;
hash_for_each(htb->qos_tc2node, bkt, node, hnode)
if (node->qid == qid)
break;
return node;
}
int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = htb->priv;
struct mlx5e_qos_node *node;
struct netdev_queue *txq;
u16 qid, moved_qid;
bool opened;
int err;
qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
node = mlx5e_htb_node_find(htb, *classid);
if (!node)
return -ENOENT;
/* Store qid for reuse. */
qid = node->qid;
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
txq = netdev_get_tx_queue(htb->netdev,
mlx5e_qid_from_qos(&priv->channels, qid));
mlx5e_deactivate_qos_sq(priv, qid);
mlx5e_close_qos_sq(priv, qid);
}
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, *classid, err);
mlx5e_htb_node_delete(htb, node);
moved_qid = mlx5e_htb_cur_leaf_nodes(htb);
if (moved_qid == 0) {
/* The last QoS SQ was just destroyed. */
if (opened)
mlx5e_reactivate_qos_sq(priv, qid, txq);
return 0;
}
moved_qid--;
if (moved_qid < qid) {
/* The highest QoS SQ was just destroyed. */
WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
qid, moved_qid);
if (opened)
mlx5e_reactivate_qos_sq(priv, qid, txq);
return 0;
}
WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
qos_dbg(htb->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
node = mlx5e_htb_node_find_by_qid(htb, moved_qid);
WARN(!node, "Could not find a node with qid %u to move to queue %u",
moved_qid, qid);
/* Stop traffic to the old queue. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
__clear_bit(moved_qid, priv->htb->qos_used_qids);
if (opened) {
txq = netdev_get_tx_queue(htb->netdev,
mlx5e_qid_from_qos(&priv->channels, moved_qid));
mlx5e_deactivate_qos_sq(priv, moved_qid);
mlx5e_close_qos_sq(priv, moved_qid);
}
/* Prevent packets from the old class from getting into the new one. */
mlx5e_reset_qdisc(htb->netdev, moved_qid);
__set_bit(qid, htb->qos_used_qids);
WRITE_ONCE(node->qid, qid);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
node->classid, moved_qid, qid, err);
} else {
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
}
}
mlx5e_update_tx_netdev_queues(priv);
if (opened)
mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
*classid = node->classid;
return 0;
}
int
mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node, *parent;
struct mlx5e_priv *priv = htb->priv;
u32 old_hw_id, new_hw_id;
int err, saved_err = 0;
u16 qid;
qos_dbg(htb->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
force ? "_FORCE" : "", classid);
node = mlx5e_htb_node_find(htb, classid);
if (!node)
return -ENOENT;
err = mlx5_qos_create_leaf_node(htb->mdev, node->parent->parent->hw_id,
node->parent->bw_share,
node->parent->max_average_bw,
&new_hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(htb->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
if (!force)
return err;
saved_err = err;
}
/* Store qid for reuse and prevent clearing the bit. */
qid = node->qid;
/* Pairs with mlx5e_htb_get_txq_by_classid. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_deactivate_qos_sq(priv, qid);
mlx5e_close_qos_sq(priv, qid);
}
/* Prevent packets from the old class from getting into the new one. */
mlx5e_reset_qdisc(htb->netdev, qid);
err = mlx5_qos_destroy_node(htb->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
parent = node->parent;
mlx5e_htb_node_delete(htb, node);
node = parent;
WRITE_ONCE(node->qid, qid);
/* Early return on error in force mode. Parent will still be an inner
* node to be deleted by a following delete operation.
*/
if (saved_err)
return saved_err;
old_hw_id = node->hw_id;
node->hw_id = new_hw_id;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node->qid, node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(htb->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, node->qid, node->hw_id);
}
}
err = mlx5_qos_destroy_node(htb->mdev, old_hw_id);
if (err) /* Not fatal. */
qos_warn(htb->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
return 0;
}
static int
mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *child;
int err = 0;
int bkt;
hash_for_each(htb->qos_tc2node, bkt, child, hnode) {
u32 old_bw_share = child->bw_share;
int err_one;
if (child->parent != node)
continue;
mlx5e_htb_convert_rate(htb, child->rate, node, &child->bw_share);
if (child->bw_share == old_bw_share)
continue;
err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
child->max_average_bw, child->hw_id);
if (!err && err_one) {
err = err_one;
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
qos_err(htb->mdev, "Failed to modify a child node (class %04x), err = %d\n",
node->classid, err);
}
}
return err;
}
int
mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack)
{
u32 bw_share, max_average_bw;
struct mlx5e_qos_node *node;
bool ceil_changed = false;
int err;
qos_dbg(htb->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
classid, rate, ceil);
node = mlx5e_htb_node_find(htb, classid);
if (!node)
return -ENOENT;
node->rate = rate;
mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
max_average_bw, node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
qos_err(htb->mdev, "Failed to modify a node (class %04x), err = %d\n",
classid, err);
return err;
}
if (max_average_bw != node->max_average_bw)
ceil_changed = true;
node->bw_share = bw_share;
node->max_average_bw = max_average_bw;
if (ceil_changed)
err = mlx5e_htb_update_children(htb, node, extack);
return err;
}
struct mlx5e_htb *mlx5e_htb_alloc(void)
{
return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
}
void mlx5e_htb_free(struct mlx5e_htb *htb)
{
kvfree(htb);
}
int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
struct net_device *netdev, struct mlx5_core_dev *mdev,
struct mlx5e_selq *selq, struct mlx5e_priv *priv)
{
htb->mdev = mdev;
htb->netdev = netdev;
htb->selq = selq;
htb->priv = priv;
hash_init(htb->qos_tc2node);
return mlx5e_htb_root_add(htb, htb_qopt->parent_classid, htb_qopt->classid,
htb_qopt->extack);
}
void mlx5e_htb_cleanup(struct mlx5e_htb *htb)
{
mlx5e_htb_root_del(htb);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5E_EN_HTB_H_
#define __MLX5E_EN_HTB_H_
#include "qos.h"
#define MLX5E_QOS_MAX_LEAF_NODES 256
struct mlx5e_selq;
struct mlx5e_htb;
typedef int (*mlx5e_fp_htb_enumerate)(void *data, u16 qid, u32 hw_id);
int mlx5e_htb_enumerate_leaves(struct mlx5e_htb *htb, mlx5e_fp_htb_enumerate callback, void *data);
int mlx5e_htb_cur_leaf_nodes(struct mlx5e_htb *htb);
/* TX datapath API */
int mlx5e_htb_get_txq_by_classid(struct mlx5e_htb *htb, u16 classid);
/* HTB TC handlers */
int
mlx5e_htb_leaf_alloc_queue(struct mlx5e_htb *htb, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
int
mlx5e_htb_leaf_to_inner(struct mlx5e_htb *htb, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del(struct mlx5e_htb *htb, u16 *classid,
struct netlink_ext_ack *extack);
int
mlx5e_htb_leaf_del_last(struct mlx5e_htb *htb, u16 classid, bool force,
struct netlink_ext_ack *extack);
int
mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
struct mlx5e_htb *mlx5e_htb_alloc(void);
void mlx5e_htb_free(struct mlx5e_htb *htb);
int mlx5e_htb_init(struct mlx5e_htb *htb, struct tc_htb_qopt_offload *htb_qopt,
struct net_device *netdev, struct mlx5_core_dev *mdev,
struct mlx5e_selq *selq, struct mlx5e_priv *priv);
void mlx5e_htb_cleanup(struct mlx5e_htb *htb);
#endif
...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, ...@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
} }
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
}
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
{
struct skb_shared_hwtstamps hwts = {};
struct sk_buff *skb;
ptpsq->cq_stats->resync_event++;
while (skb_cc != skb_id) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++;
skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
}
}
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
int budget) int budget)
{ {
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq; struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp; ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
ptpsq->cq_stats->err_cqe++; ptpsq->cq_stats->err_cqe++;
goto out; goto out;
} }
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats); hwtstamp, ptpsq->cq_stats);
...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) ...@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
GFP_KERNEL, numa); GFP_KERNEL, numa);
...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) ...@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc;
ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc;
ptpsq->skb_fifo.mask = wq_sz - 1; ptpsq->skb_fifo.mask = wq_sz - 1;
if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
ptpsq->ts_cqe_ctr_mask =
(1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
return 0; return 0;
} }
......
...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq { ...@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
u16 skb_fifo_pc; u16 skb_fifo_pc;
struct mlx5e_skb_fifo skb_fifo; struct mlx5e_skb_fifo skb_fifo;
struct mlx5e_ptp_cq_stats *cq_stats; struct mlx5e_ptp_cq_stats *cq_stats;
u16 ts_cqe_ctr_mask;
}; };
enum { enum {
......
...@@ -2,11 +2,16 @@ ...@@ -2,11 +2,16 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include "en.h" #include "en.h"
#include "params.h" #include "params.h"
#include "../qos.h" #include "../qos.h"
#include "en/htb.h"
#define BYTES_IN_MBIT 125000 struct qos_sq_callback_params {
struct mlx5e_priv *priv;
struct mlx5e_channels *chs;
};
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes) int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{ {
...@@ -28,124 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) ...@@ -28,124 +33,14 @@ int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
} }
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
{
int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
}
/* Software representation of the QoS tree (internal to this file) */
static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
{
int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
int res;
WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
res = find_first_zero_bit(priv->htb.qos_used_qids, size);
return res == size ? -ENOSPC : res;
}
struct mlx5e_qos_node {
struct hlist_node hnode;
struct mlx5e_qos_node *parent;
u64 rate;
u32 bw_share;
u32 max_average_bw;
u32 hw_id;
u32 classid; /* 16-bit, except root. */
u16 qid;
};
#define MLX5E_QOS_QID_INNER 0xffff
#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
static struct mlx5e_qos_node *
mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
struct mlx5e_qos_node *parent)
{
struct mlx5e_qos_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return ERR_PTR(-ENOMEM);
node->parent = parent;
node->qid = qid;
__set_bit(qid, priv->htb.qos_used_qids);
node->classid = classid;
hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
mlx5e_update_tx_netdev_queues(priv);
return node;
}
static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
{
struct mlx5e_qos_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return ERR_PTR(-ENOMEM);
node->qid = MLX5E_QOS_QID_INNER;
node->classid = MLX5E_HTB_CLASSID_ROOT;
hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
return node;
}
static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
{
struct mlx5e_qos_node *node = NULL;
hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
return node;
}
static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
{
struct mlx5e_qos_node *node = NULL;
hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
return node;
}
static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
{
hash_del_rcu(&node->hnode);
if (node->qid != MLX5E_QOS_QID_INNER) {
__clear_bit(node->qid, priv->htb.qos_used_qids);
mlx5e_update_tx_netdev_queues(priv);
}
/* Make sure this qid is no longer selected by mlx5e_select_queue, so
* that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
*/
synchronize_net();
kfree(node);
}
/* TX datapath API */ /* TX datapath API */
static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
{ {
/* These channel params are safe to access from the datapath, because: /* These channel params are safe to access from the datapath, because:
* 1. This function is called only after checking priv->htb.maj_id != 0, * 1. This function is called only after checking selq->htb_maj_id != 0,
* and the number of queues can't change while HTB offload is active. * and the number of queues can't change while HTB offload is active.
* 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
* mlx5e_select_queue to finish while holding priv->state_lock, * mlx5e_select_queue to finish while holding priv->state_lock,
* preventing other code from changing the number of queues. * preventing other code from changing the number of queues.
*/ */
...@@ -154,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) ...@@ -154,30 +49,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
} }
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid) /* SQ lifecycle */
{
struct mlx5e_qos_node *node;
u16 qid;
int res;
rcu_read_lock();
node = mlx5e_sw_node_find_rcu(priv, classid);
if (!node) {
res = -ENOENT;
goto out;
}
qid = READ_ONCE(node->qid);
if (qid == MLX5E_QOS_QID_INNER) {
res = -EINVAL;
goto out;
}
res = mlx5e_qid_from_qos(&priv->channels, qid);
out:
rcu_read_unlock();
return res;
}
static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
{ {
...@@ -194,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) ...@@ -194,10 +66,8 @@ static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
return mlx5e_state_dereference(priv, qos_sqs[qid]); return mlx5e_state_dereference(priv, qos_sqs[qid]);
} }
/* SQ lifecycle */ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
u16 node_qid, u32 hw_id)
static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
struct mlx5e_qos_node *node)
{ {
struct mlx5e_create_cq_param ccp = {}; struct mlx5e_create_cq_param ccp = {};
struct mlx5e_txqsq __rcu **qos_sqs; struct mlx5e_txqsq __rcu **qos_sqs;
...@@ -210,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -210,13 +80,13 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
params = &chs->params; params = &chs->params;
txq_ix = mlx5e_qid_from_qos(chs, node->qid); txq_ix = mlx5e_qid_from_qos(chs, node_qid);
WARN_ON(node->qid > priv->htb.max_qos_sqs); WARN_ON(node_qid > priv->htb_max_qos_sqs);
if (node->qid == priv->htb.max_qos_sqs) { if (node_qid == priv->htb_max_qos_sqs) {
struct mlx5e_sq_stats *stats, **stats_list = NULL; struct mlx5e_sq_stats *stats, **stats_list = NULL;
if (priv->htb.max_qos_sqs == 0) { if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list), sizeof(*stats_list),
GFP_KERNEL); GFP_KERNEL);
...@@ -229,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -229,16 +99,16 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return -ENOMEM; return -ENOMEM;
} }
if (stats_list) if (stats_list)
WRITE_ONCE(priv->htb.qos_sq_stats, stats_list); WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats); WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order max_qos_sqs increment after writing the array pointer. /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c. * Pairs with smp_load_acquire in en_stats.c.
*/ */
smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1); smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
} }
ix = node->qid % params->num_channels; ix = node_qid % params->num_channels;
qid = node->qid / params->num_channels; qid = node_qid / params->num_channels;
c = chs->c[ix]; c = chs->c[ix];
qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
...@@ -257,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -257,8 +127,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err) if (err)
goto err_free_sq; goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
&param_sq, sq, 0, node->hw_id, &param_sq, sq, 0, hw_id,
priv->htb.qos_sq_stats[node->qid]); priv->htb_qos_sq_stats[node_qid]);
if (err) if (err)
goto err_close_cq; goto err_close_cq;
...@@ -273,14 +143,22 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs ...@@ -273,14 +143,22 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
return err; return err;
} }
static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
{ {
struct qos_sq_callback_params *cb_params = data;
return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
}
int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
{
struct mlx5e_priv *priv = data;
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
u16 qid; u16 qid;
sq = mlx5e_get_qos_sq(priv, node->qid); sq = mlx5e_get_qos_sq(priv, node_qid);
qid = mlx5e_qid_from_qos(&priv->channels, node->qid); qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
/* If it's a new queue, it will be marked as started at this point. /* If it's a new queue, it will be marked as started at this point.
* Stop it before updating txq2sq. * Stop it before updating txq2sq.
...@@ -295,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node ...@@ -295,11 +173,13 @@ static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node
*/ */
smp_wmb(); smp_wmb();
qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid); qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq); mlx5e_activate_txqsq(sq);
return 0;
} }
static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{ {
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
...@@ -319,7 +199,7 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) ...@@ -319,7 +199,7 @@ static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
smp_wmb(); smp_wmb();
} }
static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
{ {
struct mlx5e_txqsq __rcu **qos_sqs; struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_params *params; struct mlx5e_params *params;
...@@ -369,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c) ...@@ -369,7 +249,7 @@ void mlx5e_qos_close_queues(struct mlx5e_channel *c)
kvfree(qos_sqs); kvfree(qos_sqs);
} }
static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs) void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
{ {
int i; int i;
...@@ -377,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs) ...@@ -377,7 +257,7 @@ static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_close_queues(chs->c[i]); mlx5e_qos_close_queues(chs->c[i]);
} }
static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{ {
u16 qos_sqs_size; u16 qos_sqs_size;
int i; int i;
...@@ -413,39 +293,28 @@ static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels ...@@ -413,39 +293,28 @@ static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{ {
struct mlx5e_qos_node *node = NULL; struct qos_sq_callback_params callback_params;
int bkt, err; int err;
if (!priv->htb.maj_id)
return 0;
err = mlx5e_qos_alloc_queues(priv, chs); err = mlx5e_qos_alloc_queues(priv, chs);
if (err) if (err)
return err; return err;
hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { callback_params.priv = priv;
if (node->qid == MLX5E_QOS_QID_INNER) callback_params.chs = chs;
continue;
err = mlx5e_open_qos_sq(priv, chs, node); err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
if (err) { if (err) {
mlx5e_qos_close_all_queues(chs); mlx5e_qos_close_all_queues(chs);
return err; return err;
} }
}
return 0; return 0;
} }
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv) void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
{ {
struct mlx5e_qos_node *node = NULL; mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
int bkt;
hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
if (node->qid == MLX5E_QOS_QID_INNER)
continue;
mlx5e_activate_qos_sq(priv, node);
}
} }
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
...@@ -474,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) ...@@ -474,7 +343,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
} }
} }
static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs) void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
{ {
int i; int i;
...@@ -482,293 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs) ...@@ -482,293 +351,14 @@ static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
mlx5e_qos_deactivate_queues(chs->c[i]); mlx5e_qos_deactivate_queues(chs->c[i]);
} }
/* HTB API */ void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *root;
bool opened;
int err;
qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
if (!mlx5_qos_is_supported(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
return -EOPNOTSUPP;
}
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err)
goto err_cancel_selq;
}
root = mlx5e_sw_node_create_root(priv);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto err_free_queues;
}
err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
goto err_sw_node_delete;
}
WRITE_ONCE(priv->htb.defcls, htb_defcls);
/* Order maj_id after defcls - pairs with
* mlx5e_select_queue/mlx5e_select_htb_queues.
*/
smp_store_release(&priv->htb.maj_id, htb_maj_id);
if (opened)
mlx5e_selq_apply(&priv->selq);
return 0;
err_sw_node_delete:
mlx5e_sw_node_delete(priv, root);
err_free_queues:
if (opened)
mlx5e_qos_close_all_queues(&priv->channels);
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
return err;
}
int mlx5e_htb_root_del(struct mlx5e_priv *priv)
{
struct mlx5e_qos_node *root;
int err;
qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
/* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
* so that we can safely switch to its non-HTB non-PTP fastpath.
*/
synchronize_net();
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
mlx5e_selq_apply(&priv->selq);
WRITE_ONCE(priv->htb.maj_id, 0);
root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
if (!root) {
qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
return -ENOENT;
}
err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
if (err)
qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
root->hw_id, err);
mlx5e_sw_node_delete(priv, root);
mlx5e_qos_deactivate_all_queues(&priv->channels);
mlx5e_qos_close_all_queues(&priv->channels);
return err;
}
static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
struct mlx5e_qos_node *parent, u32 *bw_share)
{
u64 share = 0;
while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
parent = parent->parent;
if (parent->max_average_bw)
share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
parent->max_average_bw);
else
share = 101;
*bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
return 0;
}
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{
/* Hardware treats 0 as "unlimited", set at least 1. */
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
}
int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node, *parent;
int qid;
int err;
qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
classid, parent_classid, rate, ceil);
qid = mlx5e_find_unused_qos_qid(priv);
if (qid < 0) {
NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
return qid;
}
parent = mlx5e_sw_node_find(priv, parent_classid);
if (!parent)
return -EINVAL;
node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
if (IS_ERR(node))
return PTR_ERR(node);
node->rate = rate;
mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
node->bw_share, node->max_average_bw,
&node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
mlx5e_sw_node_delete(priv, node);
return err;
}
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, node);
}
}
return mlx5e_qid_from_qos(&priv->channels, node->qid);
}
int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *node, *child;
int err, tmp_err;
u32 new_hw_id;
u16 qid;
qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
classid, child_classid, rate, ceil);
node = mlx5e_sw_node_find(priv, classid);
if (!node)
return -ENOENT;
err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
node->bw_share, node->max_average_bw,
&new_hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
classid, err);
return err;
}
/* Intentionally reuse the qid for the upcoming first child. */
child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
if (IS_ERR(child)) {
err = PTR_ERR(child);
goto err_destroy_hw_node;
}
child->rate = rate;
mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
child->max_average_bw, &child->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
goto err_delete_sw_node;
}
/* No fail point. */
qid = node->qid;
/* Pairs with mlx5e_get_txq_by_classid. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_deactivate_qos_sq(priv, qid);
mlx5e_close_qos_sq(priv, qid);
}
err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
node->hw_id = new_hw_id;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, child);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, child);
}
}
return 0;
err_delete_sw_node:
child->qid = MLX5E_QOS_QID_INNER;
mlx5e_sw_node_delete(priv, child);
err_destroy_hw_node:
tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
if (tmp_err) /* Not fatal. */
qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
new_hw_id, classid, tmp_err);
return err;
}
static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_qos_node *node = NULL;
int bkt;
hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
if (node->qid == qid)
break;
return node;
}
static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
{ {
qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid); qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
netdev_tx_reset_queue(txq); netdev_tx_reset_queue(txq);
netif_tx_start_queue(txq); netif_tx_start_queue(txq);
} }
static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
{ {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
struct Qdisc *qdisc = dev_queue->qdisc_sleeping; struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
...@@ -781,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) ...@@ -781,251 +371,65 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
spin_unlock_bh(qdisc_lock(qdisc)); spin_unlock_bh(qdisc_lock(qdisc));
} }
int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid, int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
struct netlink_ext_ack *extack)
{ {
struct mlx5e_qos_node *node; struct mlx5e_htb *htb = priv->htb;
struct netdev_queue *txq; int res;
u16 qid, moved_qid;
bool opened;
int err;
qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
node = mlx5e_sw_node_find(priv, *classid);
if (!node)
return -ENOENT;
/* Store qid for reuse. */ if (!htb && htb_qopt->command != TC_HTB_CREATE)
qid = node->qid; return -EINVAL;
opened = test_bit(MLX5E_STATE_OPENED, &priv->state); switch (htb_qopt->command) {
if (opened) { case TC_HTB_CREATE:
txq = netdev_get_tx_queue(priv->netdev, if (!mlx5_qos_is_supported(priv->mdev)) {
mlx5e_qid_from_qos(&priv->channels, qid)); NL_SET_ERR_MSG_MOD(htb_qopt->extack,
mlx5e_deactivate_qos_sq(priv, qid); "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
mlx5e_close_qos_sq(priv, qid); return -EOPNOTSUPP;
} }
priv->htb = mlx5e_htb_alloc();
err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); htb = priv->htb;
if (err) /* Not fatal. */ if (!htb)
qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", return -ENOMEM;
node->hw_id, *classid, err); res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
if (res) {
mlx5e_sw_node_delete(priv, node); mlx5e_htb_free(htb);
priv->htb = NULL;
moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
if (moved_qid == 0) {
/* The last QoS SQ was just destroyed. */
if (opened)
mlx5e_reactivate_qos_sq(priv, qid, txq);
return 0;
} }
moved_qid--; return res;
case TC_HTB_DESTROY:
if (moved_qid < qid) { mlx5e_htb_cleanup(htb);
/* The highest QoS SQ was just destroyed. */ mlx5e_htb_free(htb);
WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u", priv->htb = NULL;
qid, moved_qid);
if (opened)
mlx5e_reactivate_qos_sq(priv, qid, txq);
return 0; return 0;
} case TC_HTB_LEAF_ALLOC_QUEUE:
res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid); htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid); if (res < 0)
return res;
node = mlx5e_sw_node_find_by_qid(priv, moved_qid); htb_qopt->qid = res;
WARN(!node, "Could not find a node with qid %u to move to queue %u",
moved_qid, qid);
/* Stop traffic to the old queue. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
__clear_bit(moved_qid, priv->htb.qos_used_qids);
if (opened) {
txq = netdev_get_tx_queue(priv->netdev,
mlx5e_qid_from_qos(&priv->channels, moved_qid));
mlx5e_deactivate_qos_sq(priv, moved_qid);
mlx5e_close_qos_sq(priv, moved_qid);
}
/* Prevent packets from the old class from getting into the new one. */
mlx5e_reset_qdisc(priv->netdev, moved_qid);
__set_bit(qid, priv->htb.qos_used_qids);
WRITE_ONCE(node->qid, qid);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
node->classid, moved_qid, qid, err);
} else {
mlx5e_activate_qos_sq(priv, node);
}
}
mlx5e_update_tx_netdev_queues(priv);
if (opened)
mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
*classid = node->classid;
return 0; return 0;
} case TC_HTB_LEAF_TO_INNER:
return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
struct netlink_ext_ack *extack) case TC_HTB_LEAF_DEL:
{ return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
struct mlx5e_qos_node *node, *parent; case TC_HTB_LEAF_DEL_LAST:
u32 old_hw_id, new_hw_id; case TC_HTB_LEAF_DEL_LAST_FORCE:
int err, saved_err = 0; return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
u16 qid; htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
htb_qopt->extack);
qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n", case TC_HTB_NODE_MODIFY:
force ? "_FORCE" : "", classid); return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
htb_qopt->extack);
node = mlx5e_sw_node_find(priv, classid); case TC_HTB_LEAF_QUERY_QUEUE:
if (!node) res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
return -ENOENT; if (res < 0)
return res;
err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id, htb_qopt->qid = res;
node->parent->bw_share,
node->parent->max_average_bw,
&new_hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
classid, err);
if (!force)
return err;
saved_err = err;
}
/* Store qid for reuse and prevent clearing the bit. */
qid = node->qid;
/* Pairs with mlx5e_get_txq_by_classid. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_deactivate_qos_sq(priv, qid);
mlx5e_close_qos_sq(priv, qid);
}
/* Prevent packets from the old class from getting into the new one. */
mlx5e_reset_qdisc(priv->netdev, qid);
err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
if (err) /* Not fatal. */
qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
parent = node->parent;
mlx5e_sw_node_delete(priv, node);
node = parent;
WRITE_ONCE(node->qid, qid);
/* Early return on error in force mode. Parent will still be an inner
* node to be deleted by a following delete operation.
*/
if (saved_err)
return saved_err;
old_hw_id = node->hw_id;
node->hw_id = new_hw_id;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
err = mlx5e_open_qos_sq(priv, &priv->channels, node);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
classid, err);
} else {
mlx5e_activate_qos_sq(priv, node);
}
}
err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
if (err) /* Not fatal. */
qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
node->hw_id, classid, err);
return 0; return 0;
} default:
return -EOPNOTSUPP;
static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
struct netlink_ext_ack *extack)
{
struct mlx5e_qos_node *child;
int err = 0;
int bkt;
hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
u32 old_bw_share = child->bw_share;
int err_one;
if (child->parent != node)
continue;
mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
if (child->bw_share == old_bw_share)
continue;
err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
child->max_average_bw, child->hw_id);
if (!err && err_one) {
err = err_one;
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
node->classid, err);
}
}
return err;
}
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack)
{
u32 bw_share, max_average_bw;
struct mlx5e_qos_node *node;
bool ceil_changed = false;
int err;
qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
classid, rate, ceil);
node = mlx5e_sw_node_find(priv, classid);
if (!node)
return -ENOENT;
node->rate = rate;
mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
max_average_bw, node->hw_id);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
classid, err);
return err;
} }
if (max_average_bw != node->max_average_bw)
ceil_changed = true;
node->bw_share = bw_share;
node->max_average_bw = max_average_bw;
if (ceil_changed)
err = mlx5e_qos_update_children(priv, node, extack);
return err;
} }
struct mlx5e_mqprio_rl { struct mlx5e_mqprio_rl {
...@@ -1111,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i ...@@ -1111,3 +515,4 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc]; *hw_id = rl->leaves_id[tc];
return 0; return 0;
} }
...@@ -6,40 +6,39 @@ ...@@ -6,40 +6,39 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#define MLX5E_QOS_MAX_LEAF_NODES 256 #define BYTES_IN_MBIT 125000
struct mlx5e_priv; struct mlx5e_priv;
struct mlx5e_htb;
struct mlx5e_channels; struct mlx5e_channels;
struct mlx5e_channel; struct mlx5e_channel;
struct tc_htb_qopt_offload;
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes); int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
/* TX datapath API */
int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
/* SQ lifecycle */ /* SQ lifecycle */
int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
u16 node_qid, u32 hw_id);
int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id);
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs); int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
void mlx5e_qos_activate_queues(struct mlx5e_priv *priv); void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c); void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs);
void mlx5e_qos_close_queues(struct mlx5e_channel *c); void mlx5e_qos_close_queues(struct mlx5e_channel *c);
void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs);
int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
/* TX datapath API */
u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/* HTB API */ /* HTB API */
int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb);
struct netlink_ext_ack *extack);
int mlx5e_htb_root_del(struct mlx5e_priv *priv);
int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
u32 parent_classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
u64 rate, u64 ceil, struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
struct netlink_ext_ack *extack);
int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
struct netlink_ext_ack *extack);
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
/* MQPRIO TX rate limit */ /* MQPRIO TX rate limit */
struct mlx5e_mqprio_rl; struct mlx5e_mqprio_rl;
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include "en.h" #include "en.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "en/htb.h"
struct mlx5e_selq_params { struct mlx5e_selq_params {
unsigned int num_regular_queues; unsigned int num_regular_queues;
...@@ -19,6 +20,8 @@ struct mlx5e_selq_params { ...@@ -19,6 +20,8 @@ struct mlx5e_selq_params {
bool is_ptp : 1; bool is_ptp : 1;
}; };
}; };
u16 htb_maj_id;
u16 htb_defcls;
}; };
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
...@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) ...@@ -44,6 +47,8 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
.num_tcs = 1, .num_tcs = 1,
.is_htb = false, .is_htb = false,
.is_ptp = false, .is_ptp = false,
.htb_maj_id = 0,
.htb_defcls = 0,
}; };
rcu_assign_pointer(selq->active, init_params); rcu_assign_pointer(selq->active, init_params);
...@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq) ...@@ -64,21 +69,50 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
selq->standby = NULL; selq->standby = NULL;
} }
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb) void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
{ {
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock); lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared); WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true; selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->num_channels = params->num_channels; selq->standby->num_channels = params->num_channels;
selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params); selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
selq->standby->num_regular_queues = selq->standby->num_regular_queues =
selq->standby->num_channels * selq->standby->num_tcs; selq->standby->num_channels * selq->standby->num_tcs;
selq->standby->is_htb = htb;
selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS); selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
} }
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq)
{
struct mlx5e_selq_params *selq_active =
rcu_dereference_protected(selq->active, lockdep_is_held(selq->state_lock));
return selq_active->htb_maj_id;
}
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls)
{
struct mlx5e_selq_params *selq_active;
lockdep_assert_held(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
selq->is_prepared = true;
selq_active = rcu_dereference_protected(selq->active,
lockdep_is_held(selq->state_lock));
*selq->standby = *selq_active;
selq->standby->is_htb = htb_maj_id;
selq->standby->htb_maj_id = htb_maj_id;
selq->standby->htb_defcls = htb_defcls;
}
void mlx5e_selq_apply(struct mlx5e_selq *selq) void mlx5e_selq_apply(struct mlx5e_selq *selq)
{ {
struct mlx5e_selq_params *old_params; struct mlx5e_selq_params *old_params;
...@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb, ...@@ -137,20 +171,21 @@ static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
return selq->num_regular_queues + up; return selq->num_regular_queues + up;
} }
static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb) static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_selq_params *selq)
{ {
u16 classid; u16 classid;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id)) if ((TC_H_MAJ(skb->priority) >> 16) == selq->htb_maj_id)
classid = TC_H_MIN(skb->priority); classid = TC_H_MIN(skb->priority);
else else
classid = READ_ONCE(priv->htb.defcls); classid = selq->htb_defcls;
if (!classid) if (!classid)
return 0; return 0;
return mlx5e_get_txq_by_classid(priv, classid); return mlx5e_htb_get_txq_by_classid(priv->htb, classid);
} }
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
...@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -187,10 +222,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
up * selq->num_channels; up * selq->num_channels;
} }
if (unlikely(selq->is_htb)) { if (unlikely(selq->htb_maj_id)) {
/* num_tcs == 1, shortcut for PTP */ /* num_tcs == 1, shortcut for PTP */
txq_ix = mlx5e_select_htb_queue(priv, skb); txq_ix = mlx5e_select_htb_queue(priv, skb, selq);
if (txq_ix > 0) if (txq_ix > 0)
return txq_ix; return txq_ix;
......
...@@ -21,7 +21,9 @@ struct sk_buff; ...@@ -21,7 +21,9 @@ struct sk_buff;
int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock); int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
void mlx5e_selq_cleanup(struct mlx5e_selq *selq); void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb); void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params);
void mlx5e_selq_prepare_htb(struct mlx5e_selq *selq, u16 htb_maj_id, u16 htb_defcls);
bool mlx5e_selq_is_htb_enabled(struct mlx5e_selq *selq);
void mlx5e_selq_apply(struct mlx5e_selq *selq); void mlx5e_selq_apply(struct mlx5e_selq *selq);
void mlx5e_selq_cancel(struct mlx5e_selq *selq); void mlx5e_selq_cancel(struct mlx5e_selq *selq);
......
...@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, ...@@ -36,7 +36,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
int err; int err;
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) { if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type != MLX5_COREDEV_VF) if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_check; goto err_check;
......
...@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, ...@@ -2062,7 +2062,7 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
/* Ignore_flow_level support isn't supported by default for VFs and so post_act /* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg. * won't be supported. Skip showing error msg.
*/ */
if (priv->mdev->coredev_type != MLX5_COREDEV_VF) if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
err_msg = "post action is missing"; err_msg = "post action is missing";
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto out_err; goto out_err;
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/ethtool_netlink.h>
#include "en.h" #include "en.h"
#include "en/port.h" #include "en/port.h"
#include "en/params.h" #include "en/params.h"
...@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ...@@ -305,12 +307,18 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
} }
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param) struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{ {
param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames; param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size; param->tx_pending = 1 << priv->channels.params.log_sq_size;
kernel_param->tcp_data_split =
(priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
} }
static void mlx5e_get_ringparam(struct net_device *dev, static void mlx5e_get_ringparam(struct net_device *dev,
...@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev, ...@@ -320,7 +328,7 @@ static void mlx5e_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
...@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -451,7 +459,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
* because the numeration of the QoS SQs will change, while per-queue * because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached. * qdiscs are attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL; err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__); __func__);
...@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) ...@@ -2067,7 +2075,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
* the numeration of the QoS SQs will change, while per-queue qdiscs are * the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached. * attached.
*/ */
if (priv->htb.maj_id) { if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n", netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__); __func__);
return -EINVAL; return -EINVAL;
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
*/ */
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <net/geneve.h> #include <net/geneve.h>
...@@ -64,6 +63,7 @@ ...@@ -64,6 +63,7 @@
#include "en/devlink.h" #include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "en/ptp.h" #include "en/ptp.h"
#include "en/htb.h"
#include "qos.h" #include "qos.h"
#include "en/trap.h" #include "en/trap.h"
...@@ -1912,8 +1912,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, ...@@ -1912,8 +1912,7 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
{ {
int tc; int tc;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
!params->mqprio.channel.rl) {
*hw_id = 0; *hw_id = 0;
return 0; return 0;
} }
...@@ -1922,7 +1921,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, ...@@ -1922,7 +1921,14 @@ static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
if (tc < 0) if (tc < 0)
return tc; return tc;
return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); if (tc >= params->mqprio.num_tc) {
WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
tc, params->mqprio.num_tc);
return -EINVAL;
}
*hw_id = params->mqprio.channel.hw_id[tc];
return 0;
} }
static int mlx5e_open_sqs(struct mlx5e_channel *c, static int mlx5e_open_sqs(struct mlx5e_channel *c,
...@@ -2383,9 +2389,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, ...@@ -2383,9 +2389,11 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels; goto err_close_channels;
} }
if (priv->htb) {
err = mlx5e_qos_open_queues(priv, chs); err = mlx5e_qos_open_queues(priv, chs);
if (err) if (err)
goto err_close_ptp; goto err_close_ptp;
}
mlx5e_health_channels_update(priv); mlx5e_health_channels_update(priv);
kvfree(cparam); kvfree(cparam);
...@@ -2567,9 +2575,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc, ...@@ -2567,9 +2575,11 @@ static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{ {
int qos_queues, nch, ntc, num_txqs, err; int nch, ntc, num_txqs, err;
int qos_queues = 0;
qos_queues = mlx5e_qos_cur_leaf_nodes(priv); if (priv->htb)
qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels; nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
...@@ -2615,13 +2625,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) ...@@ -2615,13 +2625,6 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs; goto err_txqs;
} }
if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
}
return 0; return 0;
...@@ -2724,6 +2727,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) ...@@ -2724,6 +2727,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{ {
mlx5e_build_txq_maps(priv); mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels); mlx5e_activate_channels(&priv->channels);
if (priv->htb)
mlx5e_qos_activate_queues(priv); mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv); mlx5e_xdp_tx_enable(priv);
...@@ -2841,7 +2845,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv, ...@@ -2841,7 +2845,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
new_chs.params = *params; new_chs.params = *params;
mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id); mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
err = mlx5e_open_channels(priv, &new_chs); err = mlx5e_open_channels(priv, &new_chs);
if (err) if (err)
...@@ -2897,7 +2901,7 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -2897,7 +2901,7 @@ int mlx5e_open_locked(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err; int err;
mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id); mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state); set_bit(MLX5E_STATE_OPENED, &priv->state);
...@@ -3135,6 +3139,11 @@ int mlx5e_create_tises(struct mlx5e_priv *priv) ...@@ -3135,6 +3139,11 @@ int mlx5e_create_tises(struct mlx5e_priv *priv)
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{ {
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
priv->mqprio_rl = NULL;
}
mlx5e_destroy_tises(priv); mlx5e_destroy_tises(priv);
} }
...@@ -3203,19 +3212,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) ...@@ -3203,19 +3212,38 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{ {
params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc; params->mqprio.num_tc = num_tc;
params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels); params->num_channels);
} }
static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
struct mlx5e_mqprio_rl *rl)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
u32 hw_id = 0;
if (rl)
mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
params->mqprio.channel.hw_id[tc] = hw_id;
}
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
struct tc_mqprio_qopt *qopt, struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl) struct mlx5e_mqprio_rl *rl)
{ {
int tc;
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc; params->mqprio.num_tc = mqprio->qopt.num_tc;
params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); for (tc = 0; tc < TC_MAX_QUEUE; tc++)
params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
mlx5e_mqprio_rl_update_params(params, rl);
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
} }
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params) static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
...@@ -3241,6 +3269,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, ...@@ -3241,6 +3269,12 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_params(priv, &new_params, err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true); mlx5e_num_channels_changed_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
priv->mqprio_rl = NULL;
}
priv->max_opened_tc = max_t(u8, priv->max_opened_tc, priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params)); mlx5e_get_dcb_num_tc(&priv->channels.params));
return err; return err;
...@@ -3299,16 +3333,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, ...@@ -3299,16 +3333,38 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0; return 0;
} }
static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{ {
int tc; int tc;
for (tc = 0; tc < mqprio->qopt.num_tc; tc++) for (tc = 0; tc < num_tc; tc++)
if (mqprio->max_rate[tc]) if (max_rate[tc])
return true; return true;
return false; return false;
} }
static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
u8 num_tc, u64 max_rate[])
{
struct mlx5e_mqprio_rl *rl;
int err;
if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
return NULL;
rl = mlx5e_mqprio_rl_alloc();
if (!rl)
return ERR_PTR(-ENOMEM);
err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
if (err) {
mlx5e_mqprio_rl_free(rl);
return ERR_PTR(err);
}
return rl;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio) struct tc_mqprio_qopt_offload *mqprio)
{ {
...@@ -3322,32 +3378,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, ...@@ -3322,32 +3378,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err) if (err)
return err; return err;
rl = NULL; rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
if (mlx5e_mqprio_rate_limit(mqprio)) { if (IS_ERR(rl))
rl = mlx5e_mqprio_rl_alloc(); return PTR_ERR(rl);
if (!rl)
return -ENOMEM;
err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
mqprio->max_rate);
if (err) {
mlx5e_mqprio_rl_free(rl);
return err;
}
}
new_params = priv->channels.params; new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl); mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx; mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
if (err && rl) { if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl); mlx5e_mqprio_rl_cleanup(rl);
mlx5e_mqprio_rl_free(rl); mlx5e_mqprio_rl_free(rl);
} }
return err; return err;
}
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
priv->mqprio_rl = rl;
return 0;
} }
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
...@@ -3356,7 +3412,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3356,7 +3412,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
/* MQPRIO is another toplevel qdisc that can't be attached /* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB. * simultaneously with the offloaded HTB.
*/ */
if (WARN_ON(priv->htb.maj_id)) if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL; return -EINVAL;
switch (mqprio->mode) { switch (mqprio->mode) {
...@@ -3369,47 +3425,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3369,47 +3425,6 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
} }
} }
static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
{
int res;
switch (htb->command) {
case TC_HTB_CREATE:
return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
htb->extack);
case TC_HTB_DESTROY:
return mlx5e_htb_root_del(priv);
case TC_HTB_LEAF_ALLOC_QUEUE:
res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
htb->rate, htb->ceil, htb->extack);
if (res < 0)
return res;
htb->qid = res;
return 0;
case TC_HTB_LEAF_TO_INNER:
return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
htb->rate, htb->ceil, htb->extack);
case TC_HTB_LEAF_DEL:
return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
case TC_HTB_LEAF_DEL_LAST:
case TC_HTB_LEAF_DEL_LAST_FORCE:
return mlx5e_htb_leaf_del_last(priv, htb->classid,
htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
htb->extack);
case TC_HTB_NODE_MODIFY:
return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
htb->extack);
case TC_HTB_LEAF_QUERY_QUEUE:
res = mlx5e_get_txq_by_classid(priv, htb->classid);
if (res < 0)
return res;
htb->qid = res;
return 0;
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(mlx5e_block_cb_list); static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
...@@ -3443,7 +3458,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -3443,7 +3458,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
return err; return err;
case TC_SETUP_QDISC_HTB: case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
err = mlx5e_setup_tc_htb(priv, type_data); err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
default: default:
...@@ -3663,6 +3678,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) ...@@ -3663,6 +3678,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
static int set_feature_hw_tc(struct net_device *netdev, bool enable) static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
...@@ -3672,12 +3688,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable) ...@@ -3672,12 +3688,14 @@ static int set_feature_hw_tc(struct net_device *netdev, bool enable)
} }
#endif #endif
if (!enable && priv->htb.maj_id) { mutex_lock(&priv->state_lock);
if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n"); netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
return -EINVAL; err = -EINVAL;
} }
mutex_unlock(&priv->state_lock);
return 0; return err;
} }
static int set_feature_rx_all(struct net_device *netdev, bool enable) static int set_feature_rx_all(struct net_device *netdev, bool enable)
...@@ -5102,6 +5120,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) ...@@ -5102,6 +5120,23 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL; priv->rx_res = NULL;
} }
static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
{
struct mlx5e_params *params;
struct mlx5e_mqprio_rl *rl;
params = &priv->channels.params;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
return;
rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
params->mqprio.channel.max_rate);
if (IS_ERR(rl))
rl = NULL;
priv->mqprio_rl = rl;
mlx5e_mqprio_rl_update_params(params, rl);
}
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{ {
int err; int err;
...@@ -5112,6 +5147,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) ...@@ -5112,6 +5147,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
return err; return err;
} }
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv); mlx5e_dcbnl_initialize(priv);
return 0; return 0;
} }
...@@ -5285,7 +5321,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, ...@@ -5285,7 +5321,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free_cpumask; goto err_free_cpumask;
hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
...@@ -5342,14 +5377,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) ...@@ -5342,14 +5377,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask); free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb.max_qos_sqs; i++) for (i = 0; i < priv->htb_max_qos_sqs; i++)
kfree(priv->htb.qos_sq_stats[i]); kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats); kvfree(priv->htb_qos_sq_stats);
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
memset(priv, 0, sizeof(*priv)); memset(priv, 0, sizeof(*priv));
} }
......
...@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev, ...@@ -229,7 +229,7 @@ mlx5e_rep_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
static int static int
......
...@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, ...@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
int i; int i;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
stats = READ_ONCE(priv->htb.qos_sq_stats); stats = READ_ONCE(priv->htb_qos_sq_stats);
for (i = 0; i < max_qos_sqs; i++) { for (i = 0; i < max_qos_sqs; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i])); mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = { ...@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
}; };
static const struct counter_desc ptp_rq_stats_desc[] = { static const struct counter_desc ptp_rq_stats_desc[] = {
...@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = { ...@@ -2184,13 +2186,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
{ {
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs); return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
} }
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos) static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
{ {
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
int i, qid; int i, qid;
for (qid = 0; qid < max_qos_sqs; qid++) for (qid = 0; qid < max_qos_sqs; qid++)
...@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos) ...@@ -2208,8 +2210,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
int i, qid; int i, qid;
/* Pairs with smp_store_release in mlx5e_open_qos_sq. */ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
stats = READ_ONCE(priv->htb.qos_sq_stats); stats = READ_ONCE(priv->htb_qos_sq_stats);
for (qid = 0; qid < max_qos_sqs; qid++) { for (qid = 0; qid < max_qos_sqs; qid++) {
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]); struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
......
...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats { ...@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
u64 err_cqe; u64 err_cqe;
u64 abort; u64 abort;
u64 abort_abs_diff_ns; u64 abort_abs_diff_ns;
u64 resync_cqe;
u64 resync_event;
}; };
struct mlx5e_stats { struct mlx5e_stats {
......
...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) ...@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq); mlx5e_tx_mpwqe_session_complete(sq);
} }
static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
ptpsq->ts_cqe_ctr_mask);
}
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
} }
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
......
...@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev, ...@@ -83,7 +83,7 @@ static void mlx5i_get_ringparam(struct net_device *dev,
{ {
struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_ringparam(priv, param); mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
} }
static int mlx5i_set_channels(struct net_device *dev, static int mlx5i_set_channels(struct net_device *dev,
......
...@@ -1833,7 +1833,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { ...@@ -1833,7 +1833,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 sw_vhca_id[0xe]; u8 sw_vhca_id[0xe];
u8 reserved_at_230[0x10]; u8 reserved_at_230[0x10];
u8 reserved_at_240[0x5c0]; u8 reserved_at_240[0xb];
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
u8 reserved_at_260[0x5a0];
}; };
enum mlx5_ifc_flow_destination_type { enum mlx5_ifc_flow_destination_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment