Commit 93b3586e authored by Huy Nguyen's avatar Huy Nguyen Committed by Saeed Mahameed

net/mlx5: Support inner header match criteria for non decap flow action

We have an issue that OVS application creates an offloaded drop rule
that drops VXLAN traffic with both inner and outer header match
criteria. mlx5_core driver detects correctly the inner and outer
header match criteria but does not enable the inner header match criteria
due to an incorrect assumption in mlx5_eswitch_add_offloaded_rule that
only decap rule needs inner header criteria.

Solution:
Remove mlx5_esw_flow_attr's match_level and tunnel_match_level and add
two new members: inner_match_level and outer_match_level.
inner/outer_match_level is set to NONE if the inner/outer match criteria
is not specified in the tc rule creation request. The decap assumption is
removed and the code just needs to check for inner/outer_match_level to
enable the corresponding bit in firmware's match_criteria_enable value.

Fixes: 6363651d ("net/mlx5e: Properly set steering match levels for offloaded TC decap rules")
Signed-off-by: default avatarHuy Nguyen <huyn@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 405b93eb
......@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *match_level, u8 *tunnel_match_level)
u8 *inner_match_level, u8 *outer_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
......@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
*match_level = MLX5_MATCH_NONE;
match_level = outer_match_level;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_META) |
......@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
if (mlx5e_get_tc_tun(filter_dev)) {
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
if (parse_tunnel_attr(priv, spec, f, filter_dev,
outer_match_level))
return -EOPNOTSUPP;
/* In decap flow, header pointers should point to the inner
/* At this point, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
*/
match_level = inner_match_level;
headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
spec);
headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
......@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
struct mlx5_eswitch_rep *rep;
int err;
err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
&outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < match_level)) {
esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
match_level, esw->offloads.inline_mode);
non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->esw_attr->match_level = match_level;
flow->esw_attr->tunnel_match_level = tunnel_match_level;
flow->esw_attr->inner_match_level = inner_match_level;
flow->esw_attr->outer_match_level = outer_match_level;
} else {
flow->nic_attr->match_level = match_level;
flow->nic_attr->match_level = non_tunnel_match_level;
}
return err;
......
......@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
u8 tunnel_match_level;
u8 inner_match_level;
u8 outer_match_level;
struct mlx5_fc *counter;
u32 chain;
u16 prio;
......
......@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (attr->tunnel_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
} else if (attr->match_level != MLX5_MATCH_NONE) {
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
}
if (attr->inner_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
......@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
if (attr->match_level != MLX5_MATCH_NONE)
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment