Commit 4daa8731 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2024-03-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2024-03-01

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

* tag 'mlx5-fixes-2024-03-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Switch to using _bh variant of of spinlock API in port timestamping NAPI poll context
  net/mlx5e: Use a memory barrier to enforce PTP WQ xmit submission tracking occurs after populating the metadata_map
  net/mlx5e: Fix MACsec state loss upon state update in offload path
  net/mlx5e: Change the warning when ignore_flow_level is not supported
  net/mlx5: Check capability for fw_reset
  net/mlx5: Fix fw reporter diagnose output
  net/mlx5: E-switch, Change flow rule destination checking
  Revert "net/mlx5e: Check the number of elements before walk TC rhashtable"
  Revert "net/mlx5: Block entering switchdev mode with ns inconsistency"
====================

Link: https://lore.kernel.org/r/20240302070318.62997-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 47fe2fc1 90502d43
......@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
!dev->priv.fw_reset) {
NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
return -EOPNOTSUPP;
}
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
......
......@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
spin_lock(&list->tracker_list_lock);
spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
spin_unlock(&list->tracker_list_lock);
spin_unlock_bh(&list->tracker_list_lock);
}
static void
......@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
spin_lock(&list->tracker_list_lock);
spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
spin_unlock(&list->tracker_list_lock);
spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
......@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
spin_lock(&cqe_list->tracker_list_lock);
spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
......@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
spin_unlock(&cqe_list->tracker_list_lock);
spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
......
......@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
......
......@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
struct mlx5e_macsec_sa *sa,
bool is_tx, struct net_device *netdev, u32 fs_id)
static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
struct mlx5e_macsec_sa *sa, bool is_tx,
struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
......@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
struct mlx5e_macsec_sa *sa, bool is_tx,
struct net_device *netdev, u32 fs_id)
{
mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
}
static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa, bool encrypt,
bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
struct mlx5_macsec_rule_attrs rule_attrs;
union mlx5_macsec_rule *macsec_rule;
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
if (!macsec_rule)
return -ENOMEM;
sa->macsec_rule = macsec_rule;
return 0;
}
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
......@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
rule_attrs.macsec_obj_id = sa->macsec_obj_id;
rule_attrs.sci = sa->sci;
rule_attrs.assoc_num = sa->assoc_num;
rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
if (!macsec_rule) {
err = -ENOMEM;
goto destroy_macsec_object;
if (sa->active) {
err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
if (err)
goto destroy_macsec_object;
}
sa->macsec_rule = macsec_rule;
return 0;
destroy_macsec_object:
......@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
if (!secy->operational ||
assoc_num != tx_sc->encoding_sa ||
!tx_sa->active)
if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
......@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
......@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
......@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
rx_sc->sc_xarray_element->fs_id);
if (rx_sa->active)
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
......@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
rx_sc->sc_xarray_element->fs_id);
mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
rx_sc->sc_xarray_element->fs_id);
}
}
......@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
&rx_sc->sc_xarray_element->fs_id);
err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
&rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
......@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
......@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
......
......@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
/* ensure skb is put on metadata_map before tracking the index */
wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
......
......@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
......
......@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
bool vf_dest = false, pf_dest = false;
bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
if (dests[i].vport.num == MLX5_VPORT_UPLINK)
pf_dest = true;
/* Uplink dest is external, but considered as internal
* if there is reformat because firmware uses LB+hairpin to support it.
*/
if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
!(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
external_dest = true;
else
vf_dest = true;
internal_dest = true;
if (vf_dest && pf_dest)
if (internal_dest && external_dest)
return true;
}
......@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
esw_dests_to_vf_pf_vports(dest, i)) {
esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
"FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
"FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
......@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct net *devl_net, *netdev_net;
bool ret = false;
mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
if (dev->mlx5e_res.uplink_netdev) {
netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
ret = net_eq(devl_net, netdev_net);
}
mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
return ret;
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
......@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
!esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
return -EPERM;
}
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
......
......@@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!fw_reset)
return;
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!fw_reset)
return;
mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!fw_reset)
return;
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
......@@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
struct mlx5_fw_reset *fw_reset;
int err;
if (!MLX5_CAP_MCAM_REG(dev, mfrl))
return 0;
fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
......@@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
if (!fw_reset)
return;
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
......
......@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
......
......@@ -10261,7 +10261,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_63_to_46[0x12];
u8 mrtc[0x1];
u8 regs_44_to_32[0xd];
u8 regs_44_to_41[0x4];
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
u8 regs_31_to_10[0x16];
u8 mtmp[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment