Commit c5447c70 authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed

net/mlx5: E-Switch, Reload IB interface when switching devlink modes

Up until this point it wasn't possible to activate IB representors
when switching to switchdev mode, remove this limitation.

We trigger reload of the PF IB interface in order to make sure that
already allocated resources are invalid and new resources will be opened
correctly with all the limitations of switchdev mode applied (only raw
packet capabilities, without RoCE). We also move the remove/add to a
place where the E-Switch mode is set/unset to better control when to
trigger this action, this will allow the IB side to start in the correct
mode.

For better code reuse, create a function which reloads an interface and
export it.
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Reviewed-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent b5ca15ad
...@@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf) ...@@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
} }
EXPORT_SYMBOL(mlx5_unregister_interface); EXPORT_SYMBOL(mlx5_unregister_interface);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
{
mutex_lock(&mlx5_intf_mutex);
mlx5_remove_dev_by_protocol(mdev, protocol);
mlx5_add_dev_by_protocol(mdev, protocol);
mutex_unlock(&mlx5_intf_mutex);
}
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{ {
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
......
...@@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
esw->mode = mode; esw->mode = mode;
if (mode == SRIOV_LEGACY) if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw, nvfs + 1); err = esw_create_legacy_fdb_table(esw, nvfs + 1);
else } else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw, nvfs + 1); err = esw_offloads_init(esw, nvfs + 1);
}
if (err) if (err)
goto abort; goto abort;
...@@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort: abort:
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
if (mode == SRIOV_OFFLOADS)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
return err; return err;
} }
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{ {
struct esw_mc_addr *mc_promisc; struct esw_mc_addr *mc_promisc;
int old_mode;
int nvports; int nvports;
int i; int i;
...@@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
else if (esw->mode == SRIOV_OFFLOADS) else if (esw->mode == SRIOV_OFFLOADS)
esw_offloads_cleanup(esw, nvports); esw_offloads_cleanup(esw, nvports);
old_mode = esw->mode;
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
if (old_mode == SRIOV_OFFLOADS)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
} }
int mlx5_eswitch_init(struct mlx5_core_dev *dev) int mlx5_eswitch_init(struct mlx5_core_dev *dev)
......
...@@ -827,14 +827,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -827,14 +827,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{ {
int err; int err;
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
err = esw_create_offloads_fdb_tables(esw, nvports); err = esw_create_offloads_fdb_tables(esw, nvports);
if (err) if (err)
goto create_fdb_err; return err;
err = esw_create_offloads_table(esw); err = esw_create_offloads_table(esw);
if (err) if (err)
...@@ -859,12 +854,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) ...@@ -859,12 +854,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
create_ft_err: create_ft_err:
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err; return err;
} }
...@@ -882,9 +871,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) ...@@ -882,9 +871,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
} }
/* enable back PF RoCE */ /* enable back PF RoCE */
mlx5_dev_list_lock(); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return err; return err;
} }
......
...@@ -201,4 +201,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) ...@@ -201,4 +201,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
int mlx5_lag_allow(struct mlx5_core_dev *dev); int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev); int mlx5_lag_forbid(struct mlx5_core_dev *dev);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
#endif /* __MLX5_CORE_H__ */ #endif /* __MLX5_CORE_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment