Commit 9be6c21f authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5e: Handle offloads flows per peer

Currently, E-switch offloads table have a list of all flows that
create a peer_flow over the peer eswitch.
In order to support more than one peer, extend E-switch offloads
table peer_flow to hold an array of lists, where each peer have
dedicate index via mlx5_get_dev_index(). Thereafter, extend original
flow to hold an array of peers as well.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 0af3613d
...@@ -96,7 +96,7 @@ struct mlx5e_tc_flow { ...@@ -96,7 +96,7 @@ struct mlx5e_tc_flow {
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */ struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g struct list_head unready; /* flows not ready to be offloaded (e.g
* due to missing route) * due to missing route)
*/ */
......
...@@ -1980,7 +1980,8 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) ...@@ -1980,7 +1980,8 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
mlx5e_flow_put(priv, flow); mlx5e_flow_put(priv, flow);
} }
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
int peer_index)
{ {
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *peer_flow; struct mlx5e_tc_flow *peer_flow;
...@@ -1991,18 +1992,32 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) ...@@ -1991,18 +1992,32 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
return; return;
mutex_lock(&esw->offloads.peer_mutex); mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer); list_del(&flow->peer[peer_index]);
mutex_unlock(&esw->offloads.peer_mutex); mutex_unlock(&esw->offloads.peer_mutex);
flow_flag_clear(flow, DUP);
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
if (refcount_dec_and_test(&peer_flow->refcnt)) { if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
list_del(&peer_flow->peer_flows); list_del(&peer_flow->peer_flows);
kfree(peer_flow); kfree(peer_flow);
} }
} }
if (list_empty(&flow->peer_flows))
flow_flag_clear(flow, DUP);
}
static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (i == mlx5_get_dev_index(flow->priv->mdev))
continue;
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
} }
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
...@@ -2017,7 +2032,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -2017,7 +2032,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return; return;
} }
mlx5e_tc_del_fdb_peer_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
} else { } else {
...@@ -4403,6 +4418,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, ...@@ -4403,6 +4418,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr;
int i = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_priv *peer_urpriv; struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow; struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
...@@ -4435,7 +4451,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, ...@@ -4435,7 +4451,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
list_add_tail(&peer_flow->peer_flows, &flow->peer_flows); list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
flow_flag_set(flow, DUP); flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex); mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer, &esw->offloads.peer_flows); list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
mutex_unlock(&esw->offloads.peer_mutex); mutex_unlock(&esw->offloads.peer_mutex);
out: out:
...@@ -5288,9 +5304,14 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) ...@@ -5288,9 +5304,14 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{ {
struct mlx5e_tc_flow *flow, *tmp; struct mlx5e_tc_flow *flow, *tmp;
int i;
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) for (i = 0; i < MLX5_MAX_PORTS; i++) {
mlx5e_tc_del_fdb_peer_flow(flow); if (i == mlx5_get_dev_index(esw->dev))
continue;
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}
} }
void mlx5e_tc_reoffload_flows_work(struct work_struct *work) void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
......
...@@ -249,7 +249,7 @@ struct mlx5_esw_offload { ...@@ -249,7 +249,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_drop_group; struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule; struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps; struct xarray vport_reps;
struct list_head peer_flows; struct list_head peer_flows[MLX5_MAX_PORTS];
struct mutex peer_mutex; struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
......
...@@ -2825,8 +2825,10 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2825,8 +2825,10 @@ static int mlx5_esw_offloads_devcom_event(int event,
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_devcom *devcom = esw->dev->priv.devcom; struct mlx5_devcom *devcom = esw->dev->priv.devcom;
int i;
INIT_LIST_HEAD(&esw->offloads.peer_flows); for (i = 0; i < MLX5_MAX_PORTS; i++)
INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
mutex_init(&esw->offloads.peer_mutex); mutex_init(&esw->offloads.peer_mutex);
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment