Commit ee950e5d authored by Chris Mi's avatar Chris Mi Committed by Saeed Mahameed

net/mlx5e: TC, Restore tunnel info for sample offload

Currently the sample offload actions send the encapsulated packet
to software. sFlow expects tunneled packets to be decapsulated while
having the tunnel properties on the skb metadata fields.

Reuse the functions used by connection tracking to map the outer
header properties to a unique id. The next patch  will use that id
to restore the tunnel information of decapsulated packets onto the
skb.
Signed-off-by: default avatarChris Mi <cmi@nvidia.com>
Reviewed-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent d12e20ac
...@@ -608,8 +608,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, ...@@ -608,8 +608,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return true; return true;
} }
static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5e_tc_update_priv *tc_priv) struct mlx5e_tc_update_priv *tc_priv)
{ {
struct mlx5e_priv *priv = netdev_priv(skb->dev); struct mlx5e_priv *priv = netdev_priv(skb->dev);
u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK; u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
...@@ -641,6 +641,21 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, ...@@ -641,6 +641,21 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
} }
static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_mapped_obj *mapped_obj,
struct mlx5e_tc_update_priv *tc_priv)
{
if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
netdev_dbg(priv->netdev,
"Failed to restore tunnel info for sampled packet\n");
return;
}
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj);
#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv);
}
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv) struct mlx5e_tc_update_priv *tc_priv)
...@@ -648,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -648,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct mlx5_mapped_obj mapped_obj; struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
u32 reg_c0, reg_c1; u32 reg_c0;
int err; int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
...@@ -660,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -660,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
*/ */
skb->mark = 0; skb->mark = 0;
reg_c1 = be32_to_cpu(cqe->ft_metadata);
priv = netdev_priv(skb->dev); priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch; esw = priv->mdev->priv.eswitch;
err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj); err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
...@@ -673,12 +686,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -673,12 +686,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
} }
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv); u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
} else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
mlx5e_tc_sample_skb(skb, &mapped_obj); mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
return false; return false;
#endif /* CONFIG_MLX5_TC_SAMPLE */
} else { } else {
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false; return false;
......
...@@ -364,7 +364,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj ...@@ -364,7 +364,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr) struct mlx5_flow_attr *attr,
u32 tunnel_id)
{ {
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_vport_tbl_attr per_vport_tbl_attr; struct mlx5_vport_tbl_attr per_vport_tbl_attr;
...@@ -438,6 +439,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, ...@@ -438,6 +439,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
restore_obj.sample.group_id = sample_attr->group_num; restore_obj.sample.group_id = sample_attr->group_num;
restore_obj.sample.rate = sample_attr->rate; restore_obj.sample.rate = sample_attr->rate;
restore_obj.sample.trunc_size = sample_attr->trunc_size; restore_obj.sample.trunc_size = sample_attr->trunc_size;
restore_obj.sample.tunnel_id = tunnel_id;
err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
if (err) if (err)
goto err_obj_id; goto err_obj_id;
......
...@@ -24,7 +24,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj ...@@ -24,7 +24,8 @@ void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv, mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr); struct mlx5_flow_attr *attr,
u32 tunnel_id);
void void
mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv, mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv,
......
...@@ -1148,7 +1148,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, ...@@ -1148,7 +1148,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
mod_hdr_acts); mod_hdr_acts);
#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) { } else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr); rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow));
#endif #endif
} else { } else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
...@@ -1625,17 +1626,22 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -1625,17 +1626,22 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
} }
} }
static int flow_has_tc_fwd_action(struct flow_cls_offload *f) static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action *flow_action = &rule->action; struct flow_action *flow_action = &rule->action;
const struct flow_action_entry *act; const struct flow_action_entry *act;
int i; int i;
if (chain)
return false;
flow_action_for_each(i, act, flow_action) { flow_action_for_each(i, act, flow_action) {
switch (act->id) { switch (act->id) {
case FLOW_ACTION_GOTO: case FLOW_ACTION_GOTO:
return true; return true;
case FLOW_ACTION_SAMPLE:
return true;
default: default:
continue; continue;
} }
...@@ -1876,7 +1882,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -1876,7 +1882,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
needs_mapping = !!flow->attr->chain; needs_mapping = !!flow->attr->chain;
sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f); sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
*match_inner = !needs_mapping; *match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) && if ((needs_mapping || sets_mapping) &&
......
...@@ -61,6 +61,7 @@ struct mlx5_mapped_obj { ...@@ -61,6 +61,7 @@ struct mlx5_mapped_obj {
u32 group_id; u32 group_id;
u32 rate; u32 rate;
u32 trunc_size; u32 trunc_size;
u32 tunnel_id;
} sample; } sample;
}; };
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment