Commit 186abfcd authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-misc-fixes'

Tariq Toukan says:

====================
mlx5 misc fixes

This patchset provides bug fixes to mlx5 driver.

This is V2 of the series previously submitted as PR by Saeed:
https://lore.kernel.org/netdev/20240326144646.2078893-1-saeed@kernel.org/T/

Series generated against:
commit 237f3cf1 ("xsk: validate user input for XDP_{UMEM|COMPLETION}_FILL_RING")
====================

Link: https://lore.kernel.org/r/20240409190820.227554-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 65acf6e0 7772dc74
...@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo * ...@@ -95,9 +95,15 @@ static inline void mlx5e_ptp_metadata_fifo_push(struct mlx5e_ptp_metadata_fifo *
} }
static inline u8 static inline u8
mlx5e_ptp_metadata_fifo_peek(struct mlx5e_ptp_metadata_fifo *fifo)
{
return fifo->data[fifo->mask & fifo->cc];
}
static inline void
mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo) mlx5e_ptp_metadata_fifo_pop(struct mlx5e_ptp_metadata_fifo *fifo)
{ {
return fifo->data[fifo->mask & fifo->cc++]; fifo->cc++;
} }
static inline void static inline void
......
...@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, ...@@ -83,24 +83,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid); txq_ix = mlx5e_qid_from_qos(chs, node_qid);
WARN_ON(node_qid > priv->htb_max_qos_sqs); WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
if (node_qid == priv->htb_max_qos_sqs) { if (!priv->htb_qos_sq_stats) {
struct mlx5e_sq_stats *stats, **stats_list = NULL; struct mlx5e_sq_stats **stats_list;
if (priv->htb_max_qos_sqs == 0) {
stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
sizeof(*stats_list), sizeof(*stats_list), GFP_KERNEL);
GFP_KERNEL);
if (!stats_list) if (!stats_list)
return -ENOMEM; return -ENOMEM;
WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
} }
if (!priv->htb_qos_sq_stats[node_qid]) {
struct mlx5e_sq_stats *stats;
stats = kzalloc(sizeof(*stats), GFP_KERNEL); stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats) { if (!stats)
kvfree(stats_list);
return -ENOMEM; return -ENOMEM;
}
if (stats_list)
WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats); WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer. /* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c. * Pairs with smp_load_acquire in en_stats.c.
......
...@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels) ...@@ -179,6 +179,13 @@ u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
return min_t(u32, rqt_size, max_cap_rqt_size); return min_t(u32, rqt_size, max_cap_rqt_size);
} }
#define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
{
return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
}
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{ {
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
......
...@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) ...@@ -38,6 +38,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
} }
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels); u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id); int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids, int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
unsigned int num_rqns, unsigned int num_rqns,
......
...@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock) ...@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq) void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{ {
mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared); WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby); kvfree(selq->standby);
...@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq) ...@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby); kvfree(selq->standby);
selq->standby = NULL; selq->standby = NULL;
mutex_unlock(selq->state_lock);
} }
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params) void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
......
...@@ -451,6 +451,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -451,6 +451,34 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc == ETH_RSS_HASH_XOR) {
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: Requested number of channels (%d) exceeds the maximum allowed by the XOR8 RSS hfunc (%d)\n",
__func__, count, xor8_max_channels);
goto out;
}
}
/* If RXFH is configured, changing the channels number is allowed only if
* it does not require resizing the RSS table. This is because the previous
* configuration may no longer be compatible with the new RSS table.
*/
if (netif_is_rxfh_configured(priv->netdev)) {
int cur_rqt_size = mlx5e_rqt_size(priv->mdev, cur_params->num_channels);
int new_rqt_size = mlx5e_rqt_size(priv->mdev, count);
if (new_rqt_size != cur_rqt_size) {
err = -EINVAL;
netdev_err(priv->netdev,
"%s: RXFH is configured, block changing channels number that affects RSS table size (new: %d, current: %d)\n",
__func__, new_rqt_size, cur_rqt_size);
goto out;
}
}
/* Don't allow changing the number of channels if HTB offload is active, /* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue * because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached. * qdiscs are attached.
...@@ -1281,17 +1309,30 @@ int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, ...@@ -1281,17 +1309,30 @@ int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context; u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc; u8 hfunc = rxfh->hfunc;
unsigned int count;
int err; int err;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
count = priv->channels.params.num_channels;
if (hfunc == ETH_RSS_HASH_XOR) {
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
__func__, count, xor8_max_channels);
goto unlock;
}
}
if (*rss_context && rxfh->rss_delete) { if (*rss_context && rxfh->rss_delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context); err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock; goto unlock;
} }
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
unsigned int count = priv->channels.params.num_channels;
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count); err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err) if (err)
goto unlock; goto unlock;
......
...@@ -5726,9 +5726,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) ...@@ -5726,9 +5726,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates); kfree(priv->tx_rates);
kfree(priv->txq2sq); kfree(priv->txq2sq);
destroy_workqueue(priv->wq); destroy_workqueue(priv->wq);
mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq); mlx5e_selq_cleanup(&priv->selq);
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask); free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++) for (i = 0; i < priv->htb_max_qos_sqs; i++)
......
...@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -398,6 +398,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata); u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
mlx5e_ptp_metadata_fifo_pop(&sq->ptpsq->metadata_freelist);
mlx5e_skb_cb_hwtstamp_init(skb); mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb, mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index); metadata_index);
...@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -496,9 +498,6 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop: err_drop:
stats->dropped++; stats->dropped++;
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq); mlx5e_tx_flush(sq);
} }
...@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb, ...@@ -657,7 +656,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
{ {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
eseg->flow_table_metadata = eseg->flow_table_metadata =
cpu_to_be32(mlx5e_ptp_metadata_fifo_pop(&ptpsq->metadata_freelist)); cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
} }
static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
......
...@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1868,6 +1868,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err) if (err)
goto abort; goto abort;
dev->priv.eswitch = esw;
err = esw_offloads_init(esw); err = esw_offloads_init(esw);
if (err) if (err)
goto reps_err; goto reps_err;
...@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1892,11 +1893,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
else else
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
if (MLX5_ESWITCH_MANAGER(dev) &&
mlx5_esw_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head); BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
esw_info(dev, esw_info(dev,
...@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1908,6 +1904,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
reps_err: reps_err:
mlx5_esw_vports_cleanup(esw); mlx5_esw_vports_cleanup(esw);
dev->priv.eswitch = NULL;
abort: abort:
if (esw->work_queue) if (esw->work_queue)
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
...@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1926,7 +1923,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_info(esw->dev, "cleanup\n"); esw_info(esw->dev, "cleanup\n");
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue); destroy_workqueue(esw->work_queue);
WARN_ON(refcount_read(&esw->qos.refcnt)); WARN_ON(refcount_read(&esw->qos.refcnt));
mutex_destroy(&esw->state_lock); mutex_destroy(&esw->state_lock);
...@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1937,6 +1933,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw); esw_offloads_cleanup(esw);
esw->dev->priv.eswitch = NULL;
mlx5_esw_vports_cleanup(esw); mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root); debugfs_remove_recursive(esw->debugfs_root);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params, devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "rdma.h" #include "rdma.h"
#include "en.h" #include "en.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/mlx5.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "lib/eq.h" #include "lib/eq.h"
#include "lib/fs_chains.h" #include "lib/fs_chains.h"
...@@ -2476,6 +2477,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw) ...@@ -2476,6 +2477,10 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err) if (err)
return err; return err;
if (MLX5_ESWITCH_MANAGER(esw->dev) &&
mlx5_esw_vport_match_metadata_supported(esw))
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
err = devl_params_register(priv_to_devlink(esw->dev), err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params, esw_devlink_params,
ARRAY_SIZE(esw_devlink_params)); ARRAY_SIZE(esw_devlink_params));
...@@ -3707,6 +3712,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, ...@@ -3707,6 +3712,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode)) if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL; return -EINVAL;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && mlx5_get_sd(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when multi-PF netdev (Socket Direct) is configured.");
return -EPERM;
}
mlx5_lag_disable_change(esw->dev); mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw); err = mlx5_esw_try_lock(esw);
if (err < 0) { if (err < 0) {
......
...@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, ...@@ -1664,6 +1664,16 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
return err; return err;
} }
static bool mlx5_pkt_reformat_cmp(struct mlx5_pkt_reformat *p1,
struct mlx5_pkt_reformat *p2)
{
return p1->owner == p2->owner &&
(p1->owner == MLX5_FLOW_RESOURCE_OWNER_FW ?
p1->id == p2->id :
mlx5_fs_dr_action_get_pkt_reformat_id(p1) ==
mlx5_fs_dr_action_get_pkt_reformat_id(p2));
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2) struct mlx5_flow_destination *d2)
{ {
...@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, ...@@ -1675,8 +1685,8 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
(d1->vport.vhca_id == d2->vport.vhca_id) : true) && (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
(d1->vport.pkt_reformat->id == mlx5_pkt_reformat_cmp(d1->vport.pkt_reformat,
d2->vport.pkt_reformat->id) : true)) || d2->vport.pkt_reformat) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) || d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
...@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, ...@@ -1808,8 +1818,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
} }
trace_mlx5_fs_set_fte(fte, false); trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) { for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) { if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node); tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]); trace_mlx5_fs_add_rule(handle->rule[i]);
} }
......
...@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev) ...@@ -1480,6 +1480,14 @@ int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
if (err) if (err)
goto err_register; goto err_register;
err = mlx5_crdump_enable(dev);
if (err)
mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
err = mlx5_hwmon_dev_register(dev);
if (err)
mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return 0; return 0;
...@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev) ...@@ -1505,7 +1513,10 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
int err; int err;
devl_lock(devlink); devl_lock(devlink);
devl_register(devlink);
err = mlx5_init_one_devl_locked(dev); err = mlx5_init_one_devl_locked(dev);
if (err)
devl_unregister(devlink);
devl_unlock(devlink); devl_unlock(devlink);
return err; return err;
} }
...@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev) ...@@ -1517,6 +1528,8 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
devl_lock(devlink); devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
mlx5_hwmon_dev_unregister(dev);
mlx5_crdump_disable(dev);
mlx5_unregister_device(dev); mlx5_unregister_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
...@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev) ...@@ -1534,6 +1547,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mlx5_function_teardown(dev, true); mlx5_function_teardown(dev, true);
out: out:
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
devl_unregister(devlink);
devl_unlock(devlink); devl_unlock(devlink);
} }
...@@ -1680,16 +1694,20 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev) ...@@ -1680,16 +1694,20 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
} }
devl_lock(devlink); devl_lock(devlink);
devl_register(devlink);
err = mlx5_devlink_params_register(priv_to_devlink(dev)); err = mlx5_devlink_params_register(priv_to_devlink(dev));
devl_unlock(devlink);
if (err) { if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err); mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
goto query_hca_caps_err; goto query_hca_caps_err;
} }
devl_unlock(devlink);
return 0; return 0;
query_hca_caps_err: query_hca_caps_err:
devl_unregister(devlink);
devl_unlock(devlink);
mlx5_function_disable(dev, true); mlx5_function_disable(dev, true);
out: out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
...@@ -1702,6 +1720,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev) ...@@ -1702,6 +1720,7 @@ void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
devl_lock(devlink); devl_lock(devlink);
mlx5_devlink_params_unregister(priv_to_devlink(dev)); mlx5_devlink_params_unregister(priv_to_devlink(dev));
devl_unregister(devlink);
devl_unlock(devlink); devl_unlock(devlink);
if (dev->state != MLX5_DEVICE_STATE_UP) if (dev->state != MLX5_DEVICE_STATE_UP)
return; return;
...@@ -1943,16 +1962,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1943,16 +1962,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_init_one; goto err_init_one;
} }
err = mlx5_crdump_enable(dev);
if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
err = mlx5_hwmon_dev_register(dev);
if (err)
mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
pci_save_state(pdev); pci_save_state(pdev);
devlink_register(devlink);
return 0; return 0;
err_init_one: err_init_one:
...@@ -1973,16 +1983,9 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1973,16 +1983,9 @@ static void remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
/* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
* devlink notify APIs.
* Hence, we must drain them before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev); mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev); mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev, false); mlx5_sriov_disable(pdev, false);
mlx5_hwmon_dev_unregister(dev);
mlx5_crdump_disable(dev);
mlx5_uninit_one(dev); mlx5_uninit_one(dev);
mlx5_pci_close(dev); mlx5_pci_close(dev);
mlx5_mdev_uninit(dev); mlx5_mdev_uninit(dev);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define MLX5_IRQ_CTRL_SF_MAX 8 #define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */ /* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8) #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX) #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
...@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx) ...@@ -246,6 +247,7 @@ static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
return; return;
} }
vecidx -= MLX5_IRQ_VEC_COMP_BASE;
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx); snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
} }
...@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu, ...@@ -585,7 +587,7 @@ struct mlx5_irq *mlx5_irq_request_vector(struct mlx5_core_dev *dev, u16 cpu,
struct mlx5_irq_table *table = mlx5_irq_table_get(dev); struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = table->pcif_pool; struct mlx5_irq_pool *pool = table->pcif_pool;
struct irq_affinity_desc af_desc; struct irq_affinity_desc af_desc;
int offset = 1; int offset = MLX5_IRQ_VEC_COMP_BASE;
if (!pool->xa_num_irqs.max) if (!pool->xa_num_irqs.max)
offset = 0; offset = 0;
......
...@@ -101,7 +101,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) ...@@ -101,7 +101,6 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
devlink = priv_to_devlink(mdev); devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev); mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(mdev)) if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(mdev); mlx5_uninit_one_light(mdev);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment