Commit 7e978e77 authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed

net/mlx5: Lag, use actual number of lag ports

Refactor the entire lag code to use ldev->ports instead of hard-coded
defines (like MLX5_MAX_PORTS) for its operations.
Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent cdf611d1
...@@ -53,8 +53,7 @@ enum { ...@@ -53,8 +53,7 @@ enum {
*/ */
static DEFINE_SPINLOCK(lag_lock); static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, bool shared_fdb, u8 flags)
u8 remap_port2, bool shared_fdb, u8 flags)
{ {
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
...@@ -63,8 +62,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, ...@@ -63,8 +62,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) { if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
} else { } else {
MLX5_SET(lagc, lag_ctx, port_select_mode, MLX5_SET(lagc, lag_ctx, port_select_mode,
MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT); MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
...@@ -73,8 +72,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, ...@@ -73,8 +72,8 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
return mlx5_cmd_exec_in(dev, create_lag, in); return mlx5_cmd_exec_in(dev, create_lag, in);
} }
static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
u8 remap_port2) u8 *ports)
{ {
u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
...@@ -82,8 +81,8 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1, ...@@ -82,8 +81,8 @@ static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
MLX5_SET(modify_lag_in, in, field_select, 0x1); MLX5_SET(modify_lag_in, in, field_select, 0x1);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);
MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]);
return mlx5_cmd_exec_in(dev, modify_lag, in); return mlx5_cmd_exec_in(dev, modify_lag, in);
} }
...@@ -174,7 +173,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, ...@@ -174,7 +173,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
{ {
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < ldev->ports; i++)
if (ldev->pf[i].netdev == ndev) if (ldev->pf[i].netdev == ndev)
return i; return i;
...@@ -191,39 +190,69 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) ...@@ -191,39 +190,69 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
} }
static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
u8 *ports, int *num_disabled)
{
int i;
*num_disabled = 0;
for (i = 0; i < num_ports; i++) {
if (!tracker->netdev_state[i].tx_enabled ||
!tracker->netdev_state[i].link_up)
ports[(*num_disabled)++] = i;
}
}
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2) u8 num_ports, u8 *ports)
{ {
bool p1en; int disabled[MLX5_MAX_PORTS] = {};
bool p2en; int enabled[MLX5_MAX_PORTS] = {};
int disabled_ports_num = 0;
int enabled_ports_num = 0;
u32 rand;
int i;
p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled && for (i = 0; i < num_ports; i++) {
tracker->netdev_state[MLX5_LAG_P1].link_up; if (tracker->netdev_state[i].tx_enabled &&
tracker->netdev_state[i].link_up)
enabled[enabled_ports_num++] = i;
else
disabled[disabled_ports_num++] = i;
}
p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled && /* Use native mapping by default */
tracker->netdev_state[MLX5_LAG_P2].link_up; for (i = 0; i < num_ports; i++)
ports[i] = MLX5_LAG_EGRESS_PORT_1 + i;
*port1 = MLX5_LAG_EGRESS_PORT_1; /* If all ports are disabled/enabled keep native mapping */
*port2 = MLX5_LAG_EGRESS_PORT_2; if (enabled_ports_num == num_ports ||
if ((!p1en && !p2en) || (p1en && p2en)) disabled_ports_num == num_ports)
return; return;
if (p1en) /* Go over the disabled ports and for each assign a random active port */
*port2 = MLX5_LAG_EGRESS_PORT_1; for (i = 0; i < disabled_ports_num; i++) {
else get_random_bytes(&rand, 4);
*port1 = MLX5_LAG_EGRESS_PORT_2;
ports[disabled[i]] = enabled[rand % enabled_ports_num] + 1;
}
} }
static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev) static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{ {
return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop; int i;
for (i = 0; i < ldev->ports; i++)
if (ldev->pf[i].has_drop)
return true;
return false;
} }
static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{ {
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].has_drop) if (!ldev->pf[i].has_drop)
continue; continue;
...@@ -236,12 +265,12 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev) ...@@ -236,12 +265,12 @@ static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; u8 disabled_ports[MLX5_MAX_PORTS] = {};
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; struct mlx5_core_dev *dev;
struct mlx5_core_dev *inactive; int disabled_index;
u8 v2p_port1, v2p_port2; int num_disabled;
int inactive_idx;
int err; int err;
int i;
/* First delete the current drop rule so there won't be any dropped /* First delete the current drop rule so there won't be any dropped
* packets * packets
...@@ -251,58 +280,60 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev, ...@@ -251,58 +280,60 @@ static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
if (!ldev->tracker.has_inactive) if (!ldev->tracker.has_inactive)
return; return;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2); mlx5_infer_tx_disabled(tracker, ldev->ports, disabled_ports, &num_disabled);
if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) { for (i = 0; i < num_disabled; i++) {
inactive = dev1; disabled_index = disabled_ports[i];
inactive_idx = MLX5_LAG_P2; dev = ldev->pf[disabled_index].dev;
} else { err = mlx5_esw_acl_ingress_vport_drop_rule_create(dev->priv.eswitch,
inactive = dev0; MLX5_VPORT_UPLINK);
inactive_idx = MLX5_LAG_P1; if (!err)
ldev->pf[disabled_index].has_drop = true;
else
mlx5_core_err(dev,
"Failed to create lag drop rule, error: %d", err);
} }
err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
MLX5_VPORT_UPLINK);
if (!err)
ldev->pf[inactive_idx].has_drop = true;
else
mlx5_core_err(inactive,
"Failed to create lag drop rule, error: %d", err);
} }
static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2) static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED) if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2); return mlx5_lag_port_sel_modify(ldev, ports);
return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); return mlx5_cmd_modify_lag(dev0, ldev->ports, ports);
} }
void mlx5_modify_lag(struct mlx5_lag *ldev, void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker) struct lag_tracker *tracker)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u8 v2p_port1, v2p_port2; u8 ports[MLX5_MAX_PORTS] = {};
int err; int err;
int i;
int j;
mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ports);
&v2p_port2);
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] || for (i = 0; i < ldev->ports; i++) {
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) { if (ports[i] == ldev->v2p_map[i])
err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2); continue;
err = _mlx5_modify_lag(ldev, ports);
if (err) { if (err) {
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to modify LAG (%d)\n", "Failed to modify LAG (%d)\n",
err); err);
return; return;
} }
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1; memcpy(ldev->v2p_map, ports, sizeof(ports[0]) *
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2; ldev->ports);
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1], mlx5_core_info(dev0, "modify lag map\n");
ldev->v2p_map[MLX5_LAG_P2]); for (j = 0; j < ldev->ports; j++)
mlx5_core_info(dev0, "\tmap port %d:%d\n",
j + 1,
ldev->v2p_map[j]);
break;
} }
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
...@@ -362,13 +393,15 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, ...@@ -362,13 +393,15 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err; int err;
int i;
mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s", mlx5_core_info(dev0, "lag map:\n");
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2], for (i = 0; i < ldev->ports; i++)
mlx5_core_info(dev0, "\tport %d:%d\n", i + 1, ldev->v2p_map[i]);
mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n",
shared_fdb, get_str_port_sel_mode(flags)); shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, shared_fdb, flags);
ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) { if (err) {
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to create LAG (%d)\n", "Failed to create LAG (%d)\n",
...@@ -404,16 +437,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, ...@@ -404,16 +437,14 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err; int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], mlx5_infer_tx_affinity_mapping(tracker, ldev->ports, ldev->v2p_map);
&ldev->v2p_map[MLX5_LAG_P2]);
err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags); err = mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
if (err) if (err)
return err; return err;
if (flags & MLX5_LAG_FLAG_HASH_BASED) { if (flags & MLX5_LAG_FLAG_HASH_BASED) {
err = mlx5_lag_port_sel_create(ldev, tracker->hash_type, err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map);
ldev->v2p_map[MLX5_LAG_P2]);
if (err) { if (err) {
mlx5_core_err(dev0, mlx5_core_err(dev0,
"Failed to create LAG port selection(%d)\n", "Failed to create LAG port selection(%d)\n",
...@@ -491,30 +522,37 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) ...@@ -491,30 +522,37 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
u8 mode; u8 mode;
#endif #endif
int i;
if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev) for (i = 0; i < ldev->ports; i++)
return false; if (!ldev->pf[i].dev)
return false;
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev); mode = mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev);
if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS) if (mode != MLX5_ESWITCH_NONE && mode != MLX5_ESWITCH_OFFLOADS)
return false; return false;
return (mode == MLX5_ESWITCH_NONE || mode == MLX5_ESWITCH_OFFLOADS) && for (i = 0; i < ldev->ports; i++)
(mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P1].dev) == if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
mlx5_eswitch_mode(ldev->pf[MLX5_LAG_P2].dev)); return false;
if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
return false;
#else #else
return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) && for (i = 0; i < ldev->ports; i++)
!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev)); if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif #endif
return true;
} }
static void mlx5_lag_add_devices(struct mlx5_lag *ldev) static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{ {
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev) if (!ldev->pf[i].dev)
continue; continue;
...@@ -531,7 +569,7 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) ...@@ -531,7 +569,7 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{ {
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev) if (!ldev->pf[i].dev)
continue; continue;
...@@ -551,6 +589,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) ...@@ -551,6 +589,7 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
bool shared_fdb = ldev->shared_fdb; bool shared_fdb = ldev->shared_fdb;
bool roce_lag; bool roce_lag;
int err; int err;
int i;
roce_lag = __mlx5_lag_is_roce(ldev); roce_lag = __mlx5_lag_is_roce(ldev);
...@@ -561,7 +600,8 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) ...@@ -561,7 +600,8 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev)
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
} }
mlx5_nic_vport_disable_roce(dev1); for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_disable_roce(ldev->pf[i].dev);
} }
err = mlx5_deactivate_lag(ldev); err = mlx5_deactivate_lag(ldev);
...@@ -598,6 +638,23 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) ...@@ -598,6 +638,23 @@ static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
return false; return false;
} }
static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
{
bool roce_lag = true;
int i;
for (i = 0; i < ldev->ports; i++)
roce_lag = roce_lag && !mlx5_sriov_is_enabled(ldev->pf[i].dev);
#ifdef CONFIG_MLX5_ESWITCH
for (i = 0; i < ldev->ports; i++)
roce_lag = roce_lag &&
ldev->pf[i].dev->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
return roce_lag;
}
static void mlx5_do_bond(struct mlx5_lag *ldev) static void mlx5_do_bond(struct mlx5_lag *ldev)
{ {
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
...@@ -605,6 +662,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -605,6 +662,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct lag_tracker tracker; struct lag_tracker tracker;
bool do_bond, roce_lag; bool do_bond, roce_lag;
int err; int err;
int i;
if (!mlx5_lag_is_ready(ldev)) { if (!mlx5_lag_is_ready(ldev)) {
do_bond = false; do_bond = false;
...@@ -621,14 +679,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -621,14 +679,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (do_bond && !__mlx5_lag_is_active(ldev)) { if (do_bond && !__mlx5_lag_is_active(ldev)) {
bool shared_fdb = mlx5_shared_fdb_supported(ldev); bool shared_fdb = mlx5_shared_fdb_supported(ldev);
roce_lag = !mlx5_sriov_is_enabled(dev0) && roce_lag = mlx5_lag_is_roce_lag(ldev);
!mlx5_sriov_is_enabled(dev1);
#ifdef CONFIG_MLX5_ESWITCH
roce_lag = roce_lag &&
dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
if (shared_fdb || roce_lag) if (shared_fdb || roce_lag)
mlx5_lag_remove_devices(ldev); mlx5_lag_remove_devices(ldev);
...@@ -645,7 +696,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -645,7 +696,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) { } else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
mlx5_nic_vport_enable_roce(dev1); for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) { } else if (shared_fdb) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0); mlx5_rescan_drivers_locked(dev0);
...@@ -713,7 +765,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -713,7 +765,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
bool is_bonded, is_in_lag, mode_supported; bool is_bonded, is_in_lag, mode_supported;
bool has_inactive = 0; bool has_inactive = 0;
struct slave *slave; struct slave *slave;
int bond_status = 0; u8 bond_status = 0;
int num_slaves = 0; int num_slaves = 0;
int changed = 0; int changed = 0;
int idx; int idx;
...@@ -744,7 +796,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -744,7 +796,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_unlock(); rcu_read_unlock();
/* None of this lagdev's netdevs are slaves of this master. */ /* None of this lagdev's netdevs are slaves of this master. */
if (!(bond_status & 0x3)) if (!(bond_status & GENMASK(ldev->ports - 1, 0)))
return 0; return 0;
if (lag_upper_info) { if (lag_upper_info) {
...@@ -757,7 +809,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, ...@@ -757,7 +809,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
* A device is considered bonded if both its physical ports are slaves * A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them. * of the same lag master, and only them.
*/ */
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3; is_in_lag = num_slaves == ldev->ports &&
bond_status == GENMASK(ldev->ports - 1, 0);
/* Lag mode must be activebackup or hash. */ /* Lag mode must be activebackup or hash. */
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
...@@ -886,7 +939,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev, ...@@ -886,7 +939,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
{ {
unsigned int fn = mlx5_get_dev_index(dev); unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS) if (fn >= ldev->ports)
return; return;
spin_lock(&lag_lock); spin_lock(&lag_lock);
...@@ -902,7 +955,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev, ...@@ -902,7 +955,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
int i; int i;
spin_lock(&lag_lock); spin_lock(&lag_lock);
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].netdev == netdev) { if (ldev->pf[i].netdev == netdev) {
ldev->pf[i].netdev = NULL; ldev->pf[i].netdev = NULL;
break; break;
...@@ -916,7 +969,7 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev, ...@@ -916,7 +969,7 @@ static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
{ {
unsigned int fn = mlx5_get_dev_index(dev); unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS) if (fn >= ldev->ports)
return; return;
ldev->pf[fn].dev = dev; ldev->pf[fn].dev = dev;
...@@ -928,11 +981,11 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, ...@@ -928,11 +981,11 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
{ {
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < ldev->ports; i++)
if (ldev->pf[i].dev == dev) if (ldev->pf[i].dev == dev)
break; break;
if (i == MLX5_MAX_PORTS) if (i == ldev->ports)
return; return;
ldev->pf[i].dev = NULL; ldev->pf[i].dev = NULL;
...@@ -1045,11 +1098,11 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, ...@@ -1045,11 +1098,11 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
mutex_lock(&ldev->lock); mutex_lock(&ldev->lock);
mlx5_ldev_add_netdev(ldev, dev, netdev); mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < ldev->ports; i++)
if (!ldev->pf[i].dev) if (!ldev->pf[i].dev)
break; break;
if (i >= MLX5_MAX_PORTS) if (i >= ldev->ports)
ldev->flags |= MLX5_LAG_FLAG_READY; ldev->flags |= MLX5_LAG_FLAG_READY;
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
mlx5_queue_bond_work(ldev, 0); mlx5_queue_bond_work(ldev, 0);
...@@ -1163,6 +1216,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -1163,6 +1216,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{ {
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
int i;
spin_lock(&lag_lock); spin_lock(&lag_lock);
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
...@@ -1171,9 +1225,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) ...@@ -1171,9 +1225,11 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
goto unlock; goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ? for (i = 0; i < ldev->ports; i++)
ldev->pf[MLX5_LAG_P1].netdev : if (ldev->tracker.netdev_state[i].tx_enabled)
ldev->pf[MLX5_LAG_P2].netdev; ndev = ldev->pf[i].netdev;
if (!ndev)
ndev = ldev->pf[ldev->ports - 1].netdev;
} else { } else {
ndev = ldev->pf[MLX5_LAG_P1].netdev; ndev = ldev->pf[MLX5_LAG_P1].netdev;
} }
...@@ -1192,16 +1248,19 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, ...@@ -1192,16 +1248,19 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
{ {
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
u8 port = 0; u8 port = 0;
int i;
spin_lock(&lag_lock); spin_lock(&lag_lock);
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev))) if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock; goto unlock;
if (ldev->pf[MLX5_LAG_P1].netdev == slave) for (i = 0; i < ldev->ports; i++) {
port = MLX5_LAG_P1; if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
else port = i;
port = MLX5_LAG_P2; break;
}
}
port = ldev->v2p_map[port]; port = ldev->v2p_map[port];
...@@ -1213,7 +1272,13 @@ EXPORT_SYMBOL(mlx5_lag_get_slave_port); ...@@ -1213,7 +1272,13 @@ EXPORT_SYMBOL(mlx5_lag_get_slave_port);
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev) u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
{ {
return MLX5_MAX_PORTS; struct mlx5_lag *ldev;
ldev = mlx5_lag_dev(dev);
if (!ldev)
return 0;
return ldev->ports;
} }
EXPORT_SYMBOL(mlx5_lag_get_num_ports); EXPORT_SYMBOL(mlx5_lag_get_num_ports);
...@@ -1243,7 +1308,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -1243,7 +1308,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
size_t *offsets) size_t *offsets)
{ {
int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
struct mlx5_core_dev *mdev[MLX5_MAX_PORTS]; struct mlx5_core_dev **mdev;
struct mlx5_lag *ldev; struct mlx5_lag *ldev;
int num_ports; int num_ports;
int ret, i, j; int ret, i, j;
...@@ -1253,14 +1318,20 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -1253,14 +1318,20 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
if (!out) if (!out)
return -ENOMEM; return -ENOMEM;
mdev = kvzalloc(sizeof(mdev[0]) * MLX5_MAX_PORTS, GFP_KERNEL);
if (!mdev) {
ret = -ENOMEM;
goto free_out;
}
memset(values, 0, sizeof(*values) * num_counters); memset(values, 0, sizeof(*values) * num_counters);
spin_lock(&lag_lock); spin_lock(&lag_lock);
ldev = mlx5_lag_dev(dev); ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) { if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS; num_ports = ldev->ports;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev; for (i = 0; i < ldev->ports; i++)
mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev; mdev[i] = ldev->pf[i].dev;
} else { } else {
num_ports = 1; num_ports = 1;
mdev[MLX5_LAG_P1] = dev; mdev[MLX5_LAG_P1] = dev;
...@@ -1275,13 +1346,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, ...@@ -1275,13 +1346,15 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in, ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
out); out);
if (ret) if (ret)
goto free; goto free_mdev;
for (j = 0; j < num_counters; ++j) for (j = 0; j < num_counters; ++j)
values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
} }
free: free_mdev:
kvfree(mdev);
free_out:
kvfree(out); kvfree(out);
return ret; return ret;
} }
......
...@@ -12,7 +12,8 @@ enum { ...@@ -12,7 +12,8 @@ enum {
static struct mlx5_flow_group * static struct mlx5_flow_group *
mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_definer *definer) struct mlx5_flow_definer *definer,
u8 ports)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
...@@ -25,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, ...@@ -25,7 +26,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
MLX5_SET(create_flow_group_in, in, match_definer_id, MLX5_SET(create_flow_group_in, in, match_definer_id,
mlx5_get_match_definer_id(definer)); mlx5_get_match_definer_id(definer));
MLX5_SET(create_flow_group_in, in, start_flow_index, 0); MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1); MLX5_SET(create_flow_group_in, in, end_flow_index, ports - 1);
MLX5_SET(create_flow_group_in, in, group_type, MLX5_SET(create_flow_group_in, in, group_type,
MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT); MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
...@@ -36,7 +37,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, ...@@ -36,7 +37,7 @@ mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_lag_definer *lag_definer, struct mlx5_lag_definer *lag_definer,
u8 port1, u8 port2) u8 *ports)
{ {
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
...@@ -45,7 +46,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, ...@@ -45,7 +46,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
int err, i; int err, i;
ft_attr.max_fte = MLX5_MAX_PORTS; ft_attr.max_fte = ldev->ports;
ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL); ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
...@@ -61,7 +62,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, ...@@ -61,7 +62,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
} }
lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft, lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
lag_definer->definer); lag_definer->definer,
ldev->ports);
if (IS_ERR(lag_definer->fg)) { if (IS_ERR(lag_definer->fg)) {
err = PTR_ERR(lag_definer->fg); err = PTR_ERR(lag_definer->fg);
goto destroy_ft; goto destroy_ft;
...@@ -70,8 +72,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, ...@@ -70,8 +72,8 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.flags |= FLOW_ACT_NO_APPEND; flow_act.flags |= FLOW_ACT_NO_APPEND;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < ldev->ports; i++) {
u8 affinity = i == 0 ? port1 : port2; u8 affinity = ports[i];
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
vhca_id); vhca_id);
...@@ -279,8 +281,7 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask, ...@@ -279,8 +281,7 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask,
static struct mlx5_lag_definer * static struct mlx5_lag_definer *
mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
enum mlx5_traffic_types tt, bool tunnel, u8 port1, enum mlx5_traffic_types tt, bool tunnel, u8 *ports)
u8 port2)
{ {
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_definer *lag_definer; struct mlx5_lag_definer *lag_definer;
...@@ -308,7 +309,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, ...@@ -308,7 +309,7 @@ mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
goto free_mask; goto free_mask;
} }
err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2); err = mlx5_lag_create_port_sel_table(ldev, lag_definer, ports);
if (err) if (err)
goto destroy_match_definer; goto destroy_match_definer;
...@@ -331,7 +332,7 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, ...@@ -331,7 +332,7 @@ static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
int i; int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) for (i = 0; i < ldev->ports; i++)
mlx5_del_flow_rules(lag_definer->rules[i]); mlx5_del_flow_rules(lag_definer->rules[i]);
mlx5_destroy_flow_group(lag_definer->fg); mlx5_destroy_flow_group(lag_definer->fg);
mlx5_destroy_flow_table(lag_definer->ft); mlx5_destroy_flow_table(lag_definer->ft);
...@@ -356,7 +357,7 @@ static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev) ...@@ -356,7 +357,7 @@ static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
static int mlx5_lag_create_definers(struct mlx5_lag *ldev, static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, enum netdev_lag_hash hash_type,
u8 port1, u8 port2) u8 *ports)
{ {
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_lag_definer *lag_definer; struct mlx5_lag_definer *lag_definer;
...@@ -364,7 +365,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev, ...@@ -364,7 +365,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt, lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
false, port1, port2); false, ports);
if (IS_ERR(lag_definer)) { if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer); err = PTR_ERR(lag_definer);
goto destroy_definers; goto destroy_definers;
...@@ -376,7 +377,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev, ...@@ -376,7 +377,7 @@ static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
lag_definer = lag_definer =
mlx5_lag_create_definer(ldev, hash_type, tt, mlx5_lag_create_definer(ldev, hash_type, tt,
true, port1, port2); true, ports);
if (IS_ERR(lag_definer)) { if (IS_ERR(lag_definer)) {
err = PTR_ERR(lag_definer); err = PTR_ERR(lag_definer);
goto destroy_definers; goto destroy_definers;
...@@ -513,13 +514,13 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) ...@@ -513,13 +514,13 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
} }
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1, u8 port2) enum netdev_lag_hash hash_type, u8 *ports)
{ {
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err; int err;
set_tt_map(port_sel, hash_type); set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, port1, port2); err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err) if (err)
return err; return err;
...@@ -546,12 +547,13 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, ...@@ -546,12 +547,13 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
static int static int
mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
struct mlx5_lag_definer **definers, struct mlx5_lag_definer **definers,
u8 port1, u8 port2) u8 *ports)
{ {
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
int err; int err;
int tt; int tt;
int i;
dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
...@@ -559,19 +561,13 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, ...@@ -559,19 +561,13 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
struct mlx5_flow_handle **rules = definers[tt]->rules; struct mlx5_flow_handle **rules = definers[tt]->rules;
if (ldev->v2p_map[MLX5_LAG_P1] != port1) { for (i = 0; i < ldev->ports; i++) {
dest.vport.vhca_id = if (ldev->v2p_map[i] == ports[i])
MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id); continue;
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
&dest, NULL);
if (err)
return err;
}
if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
dest.vport.vhca_id = dest.vport.vhca_id =
MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id); MLX5_CAP_GEN(ldev->pf[ports[i] - 1].dev,
err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2], vhca_id);
err = mlx5_modify_rule_destination(rules[i],
&dest, NULL); &dest, NULL);
if (err) if (err)
return err; return err;
...@@ -581,14 +577,14 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev, ...@@ -581,14 +577,14 @@ mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
return 0; return 0;
} }
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2) int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
{ {
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
int err; int err;
err = mlx5_lag_modify_definers_destinations(ldev, err = mlx5_lag_modify_definers_destinations(ldev,
port_sel->outer.definers, port_sel->outer.definers,
port1, port2); ports);
if (err) if (err)
return err; return err;
...@@ -597,7 +593,7 @@ int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2) ...@@ -597,7 +593,7 @@ int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
return mlx5_lag_modify_definers_destinations(ldev, return mlx5_lag_modify_definers_destinations(ldev,
port_sel->inner.definers, port_sel->inner.definers,
port1, port2); ports);
} }
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
......
...@@ -27,22 +27,20 @@ struct mlx5_lag_port_sel { ...@@ -27,22 +27,20 @@ struct mlx5_lag_port_sel {
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2); int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports);
void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev); void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, u8 port1, enum netdev_lag_hash hash_type, u8 *ports);
u8 port2);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
enum netdev_lag_hash hash_type, enum netdev_lag_hash hash_type,
u8 port1, u8 port2) u8 *ports)
{ {
return 0; return 0;
} }
static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 *ports)
u8 port2)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment