Commit 8ce81fc0 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed

net/mlx5e: TC, Add peer flow in mpesw mode

While at it rename mlx5_lag_mpesw_is_activated() to mlx5_lag_is_mpesw() to
be consistent with checking if other lag modes are activated.
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarMaor Dickman <maord@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent a32327a3
......@@ -461,7 +461,7 @@ static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
ctx->val.vbool = mlx5_lag_mpesw_is_activated(dev);
ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0;
}
......
......@@ -97,7 +97,8 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
return -EOPNOTSUPP;
if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev &&
!mlx5_lag_is_mpesw(priv->mdev))
return -EOPNOTSUPP;
return 0;
......
......@@ -4311,7 +4311,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
return same_hw_reps(priv, out_dev) && mlx5_lag_mpesw_is_activated(priv->mdev);
return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
}
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
......@@ -4482,6 +4482,9 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
(is_rep_ingress || act_is_encap))
return true;
if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
return true;
return false;
}
......@@ -4687,8 +4690,10 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* So packets redirected to uplink use the same mdev of the
* original flow and packets redirected from uplink use the
* peer mdev.
* In multiport eswitch it's a special case that we need to
* keep the original mdev.
*/
if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
......
......@@ -443,7 +443,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
mlx5_lag_mpesw_is_activated(esw->dev))
mlx5_lag_is_mpesw(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
......
......@@ -116,7 +116,7 @@ int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
}
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = mlx5_lag_dev(dev);
......
......@@ -27,7 +27,7 @@ struct mlx5_mpesw_work_st {
int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
struct net_device *out_dev,
struct netlink_ext_ack *extack);
bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev);
int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment