Commit 86a12124 authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: LAG, generalize handling of shared FDB

Shared FDB handling is using the assumption that shared FDB can only
be created from two devices.
In order to support shared FDB of more than two devices, iterate over
all LAG ports instead of hard coding only the first two LAG ports
whenever handling shared FDB.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4c103aea
...@@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, ...@@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
return; return;
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
if (ldev->ports > 2)
ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
}
} }
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
...@@ -782,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) ...@@ -782,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
{ {
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
bool roce_lag; bool roce_lag;
int err; int err;
int i; int i;
...@@ -807,30 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) ...@@ -807,30 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb || roce_lag) if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev); mlx5_lag_add_devices(ldev);
if (shared_fdb) { if (shared_fdb)
if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) for (i = 0; i < ldev->ports; i++)
mlx5_eswitch_reload_reps(dev0->priv.eswitch); if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_eswitch_reload_reps(dev1->priv.eswitch);
}
} }
bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int i;
if (is_mdev_switchdev_mode(dev0) && for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
is_mdev_switchdev_mode(dev1) && dev = ldev->pf[i].dev;
mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) && if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) && mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev0->priv.devcom, MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
MLX5_DEVCOM_ESW_OFFLOADS) && MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
MLX5_CAP_GEN(dev1, lag_native_fdb_selection) && mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
MLX5_CAP_ESW(dev1, root_ft_on_other_esw) && MLX5_CAP_GEN(dev, num_lag_ports) - 1)
MLX5_CAP_ESW(dev0, esw_shared_ingress_acl) && continue;
mlx5_eswitch_get_npeers(dev0->priv.eswitch) == MLX5_CAP_GEN(dev0, num_lag_ports) - 1 && return false;
mlx5_eswitch_get_npeers(dev1->priv.eswitch) == MLX5_CAP_GEN(dev1, num_lag_ports) - 1) }
dev = ldev->pf[MLX5_LAG_P1].dev;
if (is_mdev_switchdev_mode(dev) &&
mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true; return true;
return false; return false;
...@@ -867,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond) ...@@ -867,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev) static void mlx5_do_bond(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker = { }; struct lag_tracker tracker = { };
bool do_bond, roce_lag; bool do_bond, roce_lag;
int err; int err;
...@@ -908,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -908,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
for (i = 1; i < ldev->ports; i++) for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev); mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) { } else if (shared_fdb) {
int i;
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); for (i = 0; i < ldev->ports; i++) {
if (!err) err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); if (err)
break;
}
if (err) { if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev); mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev); mlx5_lag_add_devices(ldev);
mlx5_eswitch_reload_reps(dev0->priv.eswitch); for (i = 0; i < ldev->ports; i++)
mlx5_eswitch_reload_reps(dev1->priv.eswitch); mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n"); mlx5_core_err(dev0, "Failed to enable lag\n");
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment