Commit 8ea7bcf6 authored by Jianbo Liu's avatar Jianbo Liu Committed by Saeed Mahameed

net/mlx5: E-Switch, Add default drop rule for unmatched packets

The ft_offloads table serves to steer packets, which are from the
eswitch, to the representor associated with the packets' source vport.

Previously, if a packet's source vport or metadata was not associated
with any representor, it was forwarded to the uplink representor. The
representor got packets it shouldn't have as they weren't coming from
the uplink vport.

One such effect of this breakage can be observed if the uplink
representor is attached to a bridge, where such illegal packets will
be broadcast to the remaining ports, flooding the switch with illegal
packets. In the case where IB loopback (e.g, SNAP) is enabled, all
transmitted packets would be looped back, and received by the uplink
representor, and result in an infinite feedback loop.

Therefore, block this hole by adding a default drop rule to the
ft_offloads table, so that all unmatched packets with no associated
representor are dropped.
Signed-off-by: default avatarJianbo Liu <jianbol@nvidia.com>
Reviewed-by: default avatarGavi Teitz <gavi@nvidia.com>
Reviewed-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent d494dd2b
...@@ -244,6 +244,8 @@ struct mlx5_esw_offload { ...@@ -244,6 +244,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads; struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_group;
struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps; struct xarray vport_reps;
struct list_head peer_flows; struct list_head peer_flows;
struct mutex peer_mutex; struct mutex peer_mutex;
......
...@@ -70,6 +70,8 @@ ...@@ -70,6 +70,8 @@
#define MLX5_ESW_VPORT_TBL_SIZE 128 #define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE, .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS, .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
...@@ -1930,7 +1932,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) ...@@ -1930,7 +1932,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0); atomic64_set(&esw->user_count, 0);
} }
static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw) static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{ {
int nvports; int nvports;
...@@ -1955,7 +1957,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) ...@@ -1955,7 +1957,8 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
ft_attr.max_fte = esw_get_offloads_ft_size(esw); ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1; ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr); ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
...@@ -1984,7 +1987,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -1984,7 +1987,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports; int nvports;
int err = 0; int err = 0;
nvports = esw_get_offloads_ft_size(esw); nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
...@@ -2014,6 +2017,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -2014,6 +2017,52 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group); mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
} }
static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
{
/* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
* for the drop rule, which is placed at the end of the table.
* So return the total of vport and int_port as rule index.
*/
return esw_get_nr_ft_offloads_steering_src_ports(esw);
}
static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
int flow_index;
int err = 0;
flow_index = esw_create_vport_rx_drop_rule_index(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
goto out;
}
esw->offloads.vport_rx_drop_group = g;
out:
kvfree(flow_group_in);
return err;
}
static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_group)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
}
struct mlx5_flow_handle * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest) struct mlx5_flow_destination *dest)
...@@ -2062,6 +2111,32 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, ...@@ -2062,6 +2111,32 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
return flow_rule; return flow_rule;
} }
static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"fs offloads: Failed to add vport rx drop rule err %ld\n",
PTR_ERR(flow_rule));
return PTR_ERR(flow_rule);
}
esw->offloads.vport_rx_drop_rule = flow_rule;
return 0;
}
static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_rule)
mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
}
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode) static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{ {
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
...@@ -3062,8 +3137,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -3062,8 +3137,20 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err) if (err)
goto create_fg_err; goto create_fg_err;
err = esw_create_vport_rx_drop_group(esw);
if (err)
goto create_rx_drop_fg_err;
err = esw_create_vport_rx_drop_rule(esw);
if (err)
goto create_rx_drop_rule_err;
return 0; return 0;
create_rx_drop_rule_err:
esw_destroy_vport_rx_drop_group(esw);
create_rx_drop_fg_err:
esw_destroy_vport_rx_group(esw);
create_fg_err: create_fg_err:
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
create_fdb_err: create_fdb_err:
...@@ -3081,6 +3168,8 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -3081,6 +3168,8 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{ {
esw_destroy_vport_rx_drop_rule(esw);
esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw); esw_destroy_restore_table(esw);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment