Commit 5249001d authored by Vlad Buslov's avatar Vlad Buslov Committed by Saeed Mahameed

net/mlx5: Bridge, mark reg_c1 when pushing VLAN

On ingress VLAN push also assign value 0x7FE to reg_c1 tunnel id+opts
bits (tunnel id 0, which is not a valid tunnel id, and option 0x7FE which
was reserved by one of previous patches in the series). In following patch
the reg value is matched on egress miss to restore the packet to its
original state by removing the VLAN before passing it to the software data
path.
Signed-off-by: default avatarVlad Buslov <vladbu@nvidia.com>
Reviewed-by: default avatarPaul Blakey <paulb@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 64fc4b35
...@@ -465,8 +465,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char ...@@ -465,8 +465,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) { if (vlan && vlan->pkt_reformat_push) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.pkt_reformat = vlan->pkt_reformat_push; flow_act.pkt_reformat = vlan->pkt_reformat_push;
flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) { } else if (vlan) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.cvlan_tag); outer_headers.cvlan_tag);
...@@ -845,6 +847,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_ ...@@ -845,6 +847,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_
vlan->pkt_reformat_pop = NULL; vlan->pkt_reformat_pop = NULL;
} }
static int
mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_modify_hdr *pkt_mod_hdr;
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
MLX5_SET(set_action_in, action, offset, 8);
MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
if (IS_ERR(pkt_mod_hdr))
return PTR_ERR(pkt_mod_hdr);
vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
return 0;
}
static void
mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
vlan->pkt_mod_hdr_push_mark = NULL;
}
static struct mlx5_esw_bridge_vlan * static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port, mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
struct mlx5_eswitch *esw) struct mlx5_eswitch *esw)
...@@ -864,6 +893,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por ...@@ -864,6 +893,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err = mlx5_esw_bridge_vlan_push_create(vlan, esw); err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
if (err) if (err)
goto err_vlan_push; goto err_vlan_push;
err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
if (err)
goto err_vlan_push_mark;
} }
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) { if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw); err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
...@@ -882,6 +915,9 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por ...@@ -882,6 +915,9 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
if (vlan->pkt_reformat_pop) if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop: err_vlan_pop:
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
err_vlan_push_mark:
if (vlan->pkt_reformat_push) if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
err_vlan_push: err_vlan_push:
...@@ -908,6 +944,8 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, ...@@ -908,6 +944,8 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
if (vlan->pkt_reformat_pop) if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw); mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
if (vlan->pkt_mod_hdr_push_mark)
mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
if (vlan->pkt_reformat_push) if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw); mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
} }
......
...@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan { ...@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list; struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push; struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop; struct mlx5_pkt_reformat *pkt_reformat_pop;
struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
}; };
struct mlx5_esw_bridge_port { struct mlx5_esw_bridge_port {
......
...@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, ...@@ -130,11 +130,20 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
/* 0x7FF is a reserved mapping */ /* 0x7FF is a reserved mapping */
#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
ESW_TUN_OPTS_OFFSET + 1)
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment