Commit 8300f225 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed

net/mlx5e: Create new flow attr for multi table actions

Some TC actions use post actions for their implementation.
For example CT and sample actions.

Create a new flow attr after each multi table action and
create a post action rule for it.

First flow attr being offloaded normally and linked to the next
attr (post action rule) with setting an id on reg_c.
Post action rules match the id on reg_c and continue to the next one.

The flow counter is allocated on the last rule.
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 314e1105
......@@ -2,6 +2,7 @@
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
......@@ -101,3 +102,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder)
{
struct flow_action_entry *act;
int i, j = 0;
flow_action_for_each(i, act, flow_action) {
/* Add CT action to be first. */
if (act->id == FLOW_ACTION_CT)
flow_action_reorder->entries[j++] = act;
}
flow_action_for_each(i, act, flow_action) {
if (act->id == FLOW_ACTION_CT)
continue;
flow_action_reorder->entries[j++] = act;
}
}
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type)
{
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
struct mlx5e_priv *priv;
int err = 0, i;
priv = parse_state->flow->priv;
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse ||
!tc_act->can_offload(parse_state, act, i, attr))
continue;
err = tc_act->post_parse(parse_state, priv, attr);
if (err)
goto out;
}
out:
return err;
}
int
mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct mlx5_flow_attr *next_attr)
{
struct mlx5_core_dev *mdev = flow->priv->mdev;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
int err;
mod_acts = &attr->parse_attr->mod_hdr_acts;
/* Set handle on current post act rule to next post act rule. */
err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
if (err) {
mlx5_core_warn(mdev, "Failed setting post action handle");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return 0;
}
......@@ -42,6 +42,15 @@ struct mlx5e_tc_act {
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
};
struct mlx5e_tc_flow_action {
unsigned int num_entries;
struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
......@@ -74,4 +83,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder);
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type);
int
mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct mlx5_flow_attr *next_attr);
#endif /* __MLX5_EN_TC_ACT_H__ */
......@@ -140,15 +140,9 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
goto err_xarray;
handle->attr = post_attr;
err = mlx5e_tc_post_act_offload(post_act, handle);
if (err)
goto err_rule;
return handle;
err_rule:
xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
kfree(handle);
......
......@@ -533,6 +533,9 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
err = PTR_ERR(post_act_handle);
goto err_post_act;
}
err = mlx5e_tc_post_act_offload(tc_psample->post_act, post_act_handle);
if (err)
goto err_post_rule;
sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
......
......@@ -1823,6 +1823,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Failed to allocate post action handle");
goto err_post_act_handle;
}
err = mlx5e_tc_post_act_offload(ct_priv->post_act, handle);
if (err)
goto err_alloc_pre;
ct_flow->post_act_handle = handle;
/* Base flow attributes of both rules on original rule attribute */
......
......@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
struct list_head attrs;
};
struct mlx5_flow_handle *
......@@ -129,6 +130,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
......
......@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
spec = &flow->attr->parse_attr->spec;
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
err = mlx5e_tc_offload_flow_post_acts(flow);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
err);
continue;
}
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
......@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
......@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
......@@ -495,6 +507,9 @@ void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (!mlx5e_is_eswitch_flow(flow))
return;
if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
......@@ -1360,17 +1375,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
spec = &flow->attr->parse_attr->spec;
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
......@@ -1392,9 +1409,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
err = mlx5e_tc_offload_flow_post_acts(flow);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
err);
goto offload_to_slow_path;
}
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
......
......@@ -53,7 +53,6 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
......@@ -84,6 +83,8 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment