Commit d20e391c authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-misc-patches'

Tariq Toukan says:

====================
mlx5 misc patches

This series includes patches for the mlx5 driver.

Patch 1 by Shay enables LAG with HCAs of 8 ports.

Patch 2 by Carolina optimizes the safe switch channels operation for the
TX-only changes.

Patch 3 by Parav cleans up some unused code.
====================

Link: https://lore.kernel.org/r/20240512124306.740898-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents eafbf057 db5944e1
......@@ -1102,6 +1102,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
......
......@@ -2292,7 +2292,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
*/
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err)
priv->tx_ptp_opened = true;
......
......@@ -3002,7 +3002,28 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
return err;
}
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_comp_vectors, ix, irq;
num_comp_vectors = mlx5_comp_vectors_max(mdev);
for (ix = 0; ix < params->num_channels; ix++) {
cpumask_clear(priv->scratchpad.cpumask);
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
}
}
static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
......@@ -3026,22 +3047,10 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
err = netif_set_real_num_rx_queues(netdev, nch);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
return 0;
err_txqs:
/* netif_set_real_num_rx_queues could fail only when nch increased. Only
* one of nch and ntc is changed in this function. That means, the call
* to netif_set_real_num_tx_queues below should not fail, because it
* decreases the number of TX queues.
*/
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
......@@ -3049,42 +3058,32 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
return err;
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
int ix;
for (ix = 0; ix < params->num_channels; ix++) {
int num_comp_vectors, irq, vec_ix;
struct mlx5_core_dev *mdev;
mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
num_comp_vectors = mlx5_comp_vectors_max(mdev);
cpumask_clear(priv->scratchpad.cpumask);
vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
}
}
MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
struct net_device *netdev = priv->netdev;
int old_num_rxqs;
int err;
err = mlx5e_update_netdev_queues(priv);
if (err)
old_num_rxqs = netdev->real_num_rx_queues;
err = netif_set_real_num_rx_queues(netdev, count);
if (err) {
netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
__func__, err);
return err;
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
}
err = mlx5e_update_tc_and_tx_queues(priv);
if (err) {
/* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
* Since channel number changed, it increased. That means, the call to
* netif_set_real_num_rx_queues below should not fail, because it
* decreases the number of RX queues.
*/
WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
return err;
}
/* This function may be called on attach, before priv->rx_res is created. */
if (priv->rx_res) {
......@@ -3617,7 +3616,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
......@@ -3718,10 +3717,8 @@ static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
......@@ -3735,10 +3732,8 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl);
......
......@@ -713,7 +713,6 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return 0;
}
#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
......@@ -739,8 +738,6 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
return false;
#else
for (i = 0; i < ldev->ports; i++)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
......
......@@ -507,58 +507,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
return irq;
}
/**
* mlx5_msix_alloc - allocate msix interrupt
* @dev: mlx5 device from which to request
* @handler: interrupt handler
* @affdesc: affinity descriptor
* @name: interrupt name
*
* Returns: struct msi_map with result encoded.
* Note: the caller must make sure to release the irq by calling
* mlx5_msix_free() if shutdown was initiated.
*/
struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
irqreturn_t (*handler)(int, void *),
const struct irq_affinity_desc *affdesc,
const char *name)
{
struct msi_map map;
int err;
if (!dev->pdev) {
map.virq = 0;
map.index = -EINVAL;
return map;
}
map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc);
if (!map.virq)
return map;
err = request_irq(map.virq, handler, 0, name, NULL);
if (err) {
mlx5_core_warn(dev, "err %d\n", err);
pci_msix_free_irq(dev->pdev, map);
map.virq = 0;
map.index = -ENOMEM;
}
return map;
}
EXPORT_SYMBOL(mlx5_msix_alloc);
/**
* mlx5_msix_free - free a previously allocated msix interrupt
* @dev: mlx5 device associated with interrupt
* @map: map previously returned by mlx5_msix_alloc()
*/
void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
{
free_irq(map.virq, NULL);
pci_msix_free_irq(dev->pdev, map);
}
EXPORT_SYMBOL(mlx5_msix_free);
/**
* mlx5_irq_release_vector - release one IRQ back to the system.
* @irq: the irq to release.
......
......@@ -85,7 +85,7 @@ enum mlx5_sqp_t {
};
enum {
MLX5_MAX_PORTS = 4,
MLX5_MAX_PORTS = 8,
};
enum {
......@@ -1374,11 +1374,4 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
enum {
MLX5_OCTWORD = 16,
};
struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
irqreturn_t (*handler)(int, void *),
const struct irq_affinity_desc *affdesc,
const char *name);
void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map);
#endif /* MLX5_DRIVER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment