Commit 763465e6 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2022-11-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-fixes-2022-11-24
This series provides bug fixes to mlx5 driver.

Focusing on error handling and proper memory management in mlx5, in
general and in the newly added macsec module.

I still have few fixes left in my queue and I hope those will be the
last ones for mlx5 for this cycle.

Please pull and let me know if there is any problem.

Happy thanksgiving.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1f605d6d 9034b292
...@@ -1497,8 +1497,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf, ...@@ -1497,8 +1497,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
return -EFAULT; return -EFAULT;
err = sscanf(outlen_str, "%d", &outlen); err = sscanf(outlen_str, "%d", &outlen);
if (err < 0) if (err != 1)
return err; return -EINVAL;
ptr = kzalloc(outlen, GFP_KERNEL); ptr = kzalloc(outlen, GFP_KERNEL);
if (!ptr) if (!ptr)
......
...@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) ...@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(fs, i); accel_fs_tcp_destroy_table(fs, i);
kfree(accel_tcp); kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL); mlx5e_fs_set_accel_tcp(fs, NULL);
} }
...@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) ...@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
err_destroy_tables: err_destroy_tables:
while (--i >= 0) while (--i >= 0)
accel_fs_tcp_destroy_table(fs, i); accel_fs_tcp_destroy_table(fs, i);
kfree(accel_tcp); kvfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL); mlx5e_fs_set_accel_tcp(fs, NULL);
return err; return err;
} }
...@@ -229,22 +229,6 @@ static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, voi ...@@ -229,22 +229,6 @@ static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, voi
if (!attrs->replay_protect) if (!attrs->replay_protect)
return 0; return 0;
switch (attrs->replay_window) {
case 256:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
break;
case 128:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
break;
case 64:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
break;
case 32:
window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
break;
default:
return -EINVAL;
}
MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz); MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION); MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
...@@ -427,15 +411,15 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci) ...@@ -427,15 +411,15 @@ mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
return NULL; return NULL;
} }
static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, static int macsec_rx_sa_active_update(struct macsec_context *ctx,
struct mlx5e_macsec_sa *rx_sa, struct mlx5e_macsec_sa *rx_sa,
bool active) bool active)
{ {
struct mlx5_core_dev *mdev = macsec->mdev; struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5_macsec_obj_attrs attrs = {}; struct mlx5e_macsec *macsec = priv->macsec;
int err = 0; int err = 0;
if (rx_sa->active != active) if (rx_sa->active == active)
return 0; return 0;
rx_sa->active = active; rx_sa->active = active;
...@@ -444,13 +428,11 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, ...@@ -444,13 +428,11 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec,
return 0; return 0;
} }
attrs.sci = cpu_to_be64((__force u64)rx_sa->sci); err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
attrs.enc_key_id = rx_sa->enc_key_id;
err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id);
if (err) if (err)
return err; rx_sa->active = false;
return 0; return err;
} }
static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx) static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
...@@ -476,6 +458,11 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx) ...@@ -476,6 +458,11 @@ static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
return false; return false;
} }
if (!ctx->secy->tx_sc.encrypt) {
netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
return false;
}
return true; return true;
} }
...@@ -620,6 +607,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) ...@@ -620,6 +607,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
if (tx_sa->active == ctx_tx_sa->active) if (tx_sa->active == ctx_tx_sa->active)
goto out; goto out;
tx_sa->active = ctx_tx_sa->active;
if (tx_sa->assoc_num != tx_sc->encoding_sa) if (tx_sa->assoc_num != tx_sc->encoding_sa)
goto out; goto out;
...@@ -635,8 +623,6 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx) ...@@ -635,8 +623,6 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
mlx5e_macsec_cleanup_sa(macsec, tx_sa, true); mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
} }
tx_sa->active = ctx_tx_sa->active;
out: out:
mutex_unlock(&macsec->lock); mutex_unlock(&macsec->lock);
...@@ -736,9 +722,14 @@ static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx) ...@@ -736,9 +722,14 @@ static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
sc_xarray_element->rx_sc = rx_sc; sc_xarray_element->rx_sc = rx_sc;
err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element, err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
if (err) if (err) {
if (err == -EBUSY)
netdev_err(ctx->netdev,
"MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
MLX5_MACEC_RX_FS_ID_MAX);
goto destroy_sc_xarray_elemenet; goto destroy_sc_xarray_elemenet;
}
rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL); rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
if (!rx_sc->md_dst) { if (!rx_sc->md_dst) {
...@@ -798,16 +789,16 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx) ...@@ -798,16 +789,16 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
goto out; goto out;
} }
rx_sc->active = ctx_rx_sc->active;
if (rx_sc->active == ctx_rx_sc->active) if (rx_sc->active == ctx_rx_sc->active)
goto out; goto out;
rx_sc->active = ctx_rx_sc->active;
for (i = 0; i < MACSEC_NUM_AN; ++i) { for (i = 0; i < MACSEC_NUM_AN; ++i) {
rx_sa = rx_sc->rx_sa[i]; rx_sa = rx_sc->rx_sa[i];
if (!rx_sa) if (!rx_sa)
continue; continue;
err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, rx_sa->active && ctx_rx_sc->active); err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
if (err) if (err)
goto out; goto out;
} }
...@@ -818,16 +809,43 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx) ...@@ -818,16 +809,43 @@ static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
return err; return err;
} }
static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
{
struct mlx5e_macsec_sa *rx_sa;
int i;
for (i = 0; i < MACSEC_NUM_AN; ++i) {
rx_sa = rx_sc->rx_sa[i];
if (!rx_sa)
continue;
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[i] = NULL;
}
/* At this point the relevant MACsec offload Rx rule already removed at
* mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
* Rx related data propagating using xa_erase which uses rcu to sync,
* once fs_id is erased then this rx_sc is hidden from datapath.
*/
list_del_rcu(&rx_sc->rx_sc_list_element);
xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
metadata_dst_free(rx_sc->md_dst);
kfree(rx_sc->sc_xarray_element);
kfree_rcu(rx_sc);
}
static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx) static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
{ {
struct mlx5e_priv *priv = netdev_priv(ctx->netdev); struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device; struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc; struct mlx5e_macsec_rx_sc *rx_sc;
struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec *macsec; struct mlx5e_macsec *macsec;
struct list_head *list; struct list_head *list;
int err = 0; int err = 0;
int i;
mutex_lock(&priv->macsec->lock); mutex_lock(&priv->macsec->lock);
...@@ -849,31 +867,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx) ...@@ -849,31 +867,7 @@ static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
goto out; goto out;
} }
for (i = 0; i < MACSEC_NUM_AN; ++i) { macsec_del_rxsc_ctx(macsec, rx_sc);
rx_sa = rx_sc->rx_sa[i];
if (!rx_sa)
continue;
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[i] = NULL;
}
/*
* At this point the relevant MACsec offload Rx rule already removed at
* mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
* Rx related data propagating using xa_erase which uses rcu to sync,
* once fs_id is erased then this rx_sc is hidden from datapath.
*/
list_del_rcu(&rx_sc->rx_sc_list_element);
xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
metadata_dst_free(rx_sc->md_dst);
kfree(rx_sc->sc_xarray_element);
kfree_rcu(rx_sc);
out: out:
mutex_unlock(&macsec->lock); mutex_unlock(&macsec->lock);
...@@ -1015,7 +1009,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) ...@@ -1015,7 +1009,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
goto out; goto out;
} }
err = mlx5e_macsec_update_rx_sa(macsec, rx_sa, ctx_rx_sa->active); err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
out: out:
mutex_unlock(&macsec->lock); mutex_unlock(&macsec->lock);
...@@ -1234,7 +1228,6 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) ...@@ -1234,7 +1228,6 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
struct mlx5e_priv *priv = netdev_priv(ctx->netdev); struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
struct mlx5e_macsec_device *macsec_device; struct mlx5e_macsec_device *macsec_device;
struct mlx5e_macsec_rx_sc *rx_sc, *tmp; struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
struct mlx5e_macsec_sa *rx_sa;
struct mlx5e_macsec_sa *tx_sa; struct mlx5e_macsec_sa *tx_sa;
struct mlx5e_macsec *macsec; struct mlx5e_macsec *macsec;
struct list_head *list; struct list_head *list;
...@@ -1263,28 +1256,15 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx) ...@@ -1263,28 +1256,15 @@ static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
} }
list = &macsec_device->macsec_rx_sc_list_head; list = &macsec_device->macsec_rx_sc_list_head;
list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) { list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
for (i = 0; i < MACSEC_NUM_AN; ++i) { macsec_del_rxsc_ctx(macsec, rx_sc);
rx_sa = rx_sc->rx_sa[i];
if (!rx_sa)
continue;
mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[i] = NULL;
}
list_del_rcu(&rx_sc->rx_sc_list_element);
kfree_rcu(rx_sc);
}
kfree(macsec_device->dev_addr); kfree(macsec_device->dev_addr);
macsec_device->dev_addr = NULL; macsec_device->dev_addr = NULL;
list_del_rcu(&macsec_device->macsec_device_list_element); list_del_rcu(&macsec_device->macsec_device_list_element);
--macsec->num_of_devices; --macsec->num_of_devices;
kfree(macsec_device);
out: out:
mutex_unlock(&macsec->lock); mutex_unlock(&macsec->lock);
...@@ -1748,7 +1728,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev, ...@@ -1748,7 +1728,7 @@ void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
if (!macsec) if (!macsec)
return; return;
fs_id = MLX5_MACSEC_METADATA_HANDLE(macsec_meta_data); fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
rcu_read_lock(); rcu_read_lock();
sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id); sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
......
...@@ -10,9 +10,11 @@ ...@@ -10,9 +10,11 @@
#include <net/macsec.h> #include <net/macsec.h>
#include <net/dst_metadata.h> #include <net/dst_metadata.h>
/* Bit31 - 30: MACsec marker, Bit3-0: MACsec id */ /* Bit31 - 30: MACsec marker, Bit15-0: MACsec id */
#define MLX5_MACEC_RX_FS_ID_MAX USHRT_MAX /* Must be power of two */
#define MLX5_MACSEC_RX_FS_ID_MASK MLX5_MACEC_RX_FS_ID_MAX
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1) #define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(3, 0)) #define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
struct mlx5e_priv; struct mlx5e_priv;
struct mlx5e_macsec; struct mlx5e_macsec;
......
...@@ -250,7 +250,7 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs) ...@@ -250,7 +250,7 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
u32 *flow_group_in; u32 *flow_group_in;
int err = 0; int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC); ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_MACSEC);
if (!ns) if (!ns)
...@@ -261,8 +261,10 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs) ...@@ -261,8 +261,10 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM; return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in) {
err = -ENOMEM;
goto out_spec; goto out_spec;
}
tx_tables = &tx_fs->tables; tx_tables = &tx_fs->tables;
ft_crypto = &tx_tables->ft_crypto; ft_crypto = &tx_tables->ft_crypto;
...@@ -898,7 +900,7 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs) ...@@ -898,7 +900,7 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
u32 *flow_group_in; u32 *flow_group_in;
int err = 0; int err;
ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC); ns = mlx5_get_flow_namespace(macsec_fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC);
if (!ns) if (!ns)
...@@ -909,8 +911,10 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs) ...@@ -909,8 +911,10 @@ static int macsec_fs_rx_create(struct mlx5e_macsec_fs *macsec_fs)
return -ENOMEM; return -ENOMEM;
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in) {
err = -ENOMEM;
goto free_spec; goto free_spec;
}
rx_tables = &rx_fs->tables; rx_tables = &rx_fs->tables;
ft_crypto = &rx_tables->ft_crypto; ft_crypto = &rx_tables->ft_crypto;
...@@ -1142,10 +1146,10 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, ...@@ -1142,10 +1146,10 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
ft_crypto = &rx_tables->ft_crypto; ft_crypto = &rx_tables->ft_crypto;
/* Set bit[31 - 30] macsec marker - 0x01 */ /* Set bit[31 - 30] macsec marker - 0x01 */
/* Set bit[3-0] fs id */ /* Set bit[15-0] fs id */
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
MLX5_SET(set_action_in, action, data, fs_id | BIT(30)); MLX5_SET(set_action_in, action, data, MLX5_MACSEC_RX_METADAT_HANDLE(fs_id) | BIT(30));
MLX5_SET(set_action_in, action, offset, 0); MLX5_SET(set_action_in, action, offset, 0);
MLX5_SET(set_action_in, action, length, 32); MLX5_SET(set_action_in, action, length, 32);
...@@ -1205,6 +1209,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, ...@@ -1205,6 +1209,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
rx_rule->rule[1] = rule; rx_rule->rule[1] = rule;
} }
kvfree(spec);
return macsec_rule; return macsec_rule;
err: err:
......
...@@ -1362,6 +1362,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) ...@@ -1362,6 +1362,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
devl_rate_nodes_destroy(devlink); devl_rate_nodes_destroy(devlink);
} }
/* Destroy legacy fdb when disabling sriov in legacy mode. */
if (esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_disable_locked(esw);
esw->esw_funcs.num_vfs = 0; esw->esw_funcs.num_vfs = 0;
......
...@@ -736,6 +736,14 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, ...@@ -736,6 +736,14 @@ void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw); struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
if (mlx5_esw_allowed(esw))
return esw->esw_funcs.num_vfs;
return 0;
}
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -3387,6 +3387,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, ...@@ -3387,6 +3387,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
int err; int err;
esw->mode = MLX5_ESWITCH_LEGACY; esw->mode = MLX5_ESWITCH_LEGACY;
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
*/
if (!mlx5_sriov_is_enabled(esw->dev))
return 0;
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err) if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
......
...@@ -312,6 +312,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -312,6 +312,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) { for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl; struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
attr->dests[curr_dest].termtbl = NULL;
/* search for the destination associated with the /* search for the destination associated with the
* current term table * current term table
*/ */
......
...@@ -701,8 +701,9 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) ...@@ -701,8 +701,9 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
dev = ldev->pf[MLX5_LAG_P1].dev; dev = ldev->pf[MLX5_LAG_P1].dev;
if ((mlx5_sriov_is_enabled(dev)) && !is_mdev_switchdev_mode(dev)) for (i = 0; i < ldev->ports; i++)
return false; if (mlx5_eswitch_num_vfs(dev->priv.eswitch) && !is_mdev_switchdev_mode(dev))
return false;
mode = mlx5_eswitch_mode(dev); mode = mlx5_eswitch_mode(dev);
for (i = 0; i < ldev->ports; i++) for (i = 0; i < ldev->ports; i++)
......
...@@ -46,7 +46,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn, ...@@ -46,7 +46,7 @@ static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
struct mlx5dr_action *action) struct mlx5dr_action *action)
{ {
int ret; int ret = -EOPNOTSUPP;
if (action && action->action_type != DR_ACTION_TYP_FT) if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -67,6 +67,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, ...@@ -67,6 +67,9 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
goto out; goto out;
} }
if (ret)
goto out;
/* Release old action */ /* Release old action */
if (tbl->miss_action) if (tbl->miss_action)
refcount_dec(&tbl->miss_action->refcount); refcount_dec(&tbl->miss_action->refcount);
......
...@@ -11611,13 +11611,6 @@ enum { ...@@ -11611,13 +11611,6 @@ enum {
MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1, MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
}; };
enum {
MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
};
#define MLX5_MACSEC_ASO_INC_SN 0x2 #define MLX5_MACSEC_ASO_INC_SN 0x2
#define MLX5_MACSEC_ASO_REG_C_4_5 0x2 #define MLX5_MACSEC_ASO_REG_C_4_5 0x2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment