Commit 10652f39 authored by Parav Pandit's avatar Parav Pandit Committed by Saeed Mahameed

net/mlx5: Refactor ingress acl configuration

Drop, untagged, spoof check and untagged spoof check flow groups are
limited to legacy mode only.

Therefore, following refactoring is done to
(a) improve code readability
(b) have better code split between legacy and offloads mode

1. Move legacy flow groups under legacy structure
2. Add validity check for group deletion
3. Restrict scope of esw_vport_disable_ingress_acl to legacy mode
4. Rename esw_vport_enable_ingress_acl() to
esw_vport_create_ingress_acl_table() and limit its scope to
table creation
5. Introduce legacy flow groups creation helper
esw_legacy_create_ingress_acl_groups() and keep its scope to legacy mode
6. Reduce offloads ingress groups from 4 to just 1 metadata group
per vport
7. Removed redundant IS_ERR_OR_NULL as entries are marked NULL on free.
8. Shortern error message to remove redundant 'E-switch'
Signed-off-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent a962d7a6
...@@ -65,16 +65,17 @@ ...@@ -65,16 +65,17 @@
struct vport_ingress { struct vport_ingress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allow_untagged_spoofchk_grp; struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *allow_rule;
struct { struct {
struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp;
struct mlx5_flow_handle *drop_rule; struct mlx5_flow_handle *drop_rule;
struct mlx5_fc *drop_counter; struct mlx5_fc *drop_counter;
} legacy; } legacy;
struct { struct {
struct mlx5_flow_group *metadata_grp;
struct mlx5_modify_hdr *modify_metadata; struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule; struct mlx5_flow_handle *modify_metadata_rule;
} offloads; } offloads;
...@@ -257,16 +258,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); ...@@ -257,16 +258,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport,
int table_size);
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
u32 rate_mbps); u32 rate_mbps);
......
...@@ -1858,6 +1858,44 @@ static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, ...@@ -1858,6 +1858,44 @@ static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
} }
} }
static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
int ret = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev,
"Failed to create vport[%d] ingress metdata group, err(%d)\n",
vport->vport, ret);
goto grp_err;
}
vport->ingress.offloads.metadata_grp = g;
grp_err:
kvfree(flow_group_in);
return ret;
}
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
{
if (vport->ingress.offloads.metadata_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
vport->ingress.offloads.metadata_grp = NULL;
}
}
static int esw_vport_ingress_config(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
...@@ -1868,8 +1906,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1868,8 +1906,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0; return 0;
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
err = esw_vport_enable_ingress_acl(esw, vport);
if (err) { if (err) {
esw_warn(esw->dev, esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n", "failed to enable ingress acl (%d) on vport[%d]\n",
...@@ -1877,25 +1914,34 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1877,25 +1914,34 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return err; return err;
} }
err = esw_vport_create_ingress_acl_group(esw, vport);
if (err)
goto group_err;
esw_debug(esw->dev, esw_debug(esw->dev,
"vport[%d] configure ingress rules\n", vport->vport); "vport[%d] configure ingress rules\n", vport->vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport); err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
if (err) if (err)
goto out; goto metadata_err;
} }
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport)) { mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport); err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err) if (err)
goto out; goto prio_tag_err;
} }
return 0;
out: prio_tag_err:
if (err) esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_disable_ingress_acl(esw, vport); metadata_err:
esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
group_err:
esw_vport_destroy_ingress_acl_table(vport);
return err; return err;
} }
...@@ -1964,7 +2010,8 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -1964,7 +2010,8 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
err = esw_vport_egress_config(esw, vport); err = esw_vport_egress_config(esw, vport);
if (err) { if (err) {
esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_disable_ingress_acl(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_destroy_ingress_acl_table(vport);
} }
} }
return err; return err;
...@@ -1976,7 +2023,9 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -1976,7 +2023,9 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{ {
esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_egress_acl(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_disable_ingress_acl(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport);
} }
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment