Commit 92d2c594 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-04-05

From Paul:
 - TC action parsing cleanups
 - Correctly report stats for missed packets
 - Many CT actions limitations removed due to hw misses will now
   continue from the relevant tc ct action in software.

From Adham:
 - RQ/SQ devlink health diagnostics layout fixes

From Gal And Rahul:
 - PTP code cleanup and cyclecounter shift value improvement

* tag 'mlx5-updates-2023-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix SQ SW state layout in SQ devlink health diagnostics
  net/mlx5e: Fix RQ SW state layout in RQ devlink health diagnostics
  net/mlx5e: Rename misleading skb_pc/cc references in ptp code
  net/mlx5: Update cyclecounter shift value to improve ptp free running mode precision
  net/mlx5e: Remove redundant macsec code
  net/mlx5e: TC, Remove sample and ct limitation
  net/mlx5e: TC, Remove mirror and ct limitation
  net/mlx5e: TC, Remove tuple rewrite and ct limitation
  net/mlx5e: TC, Remove multiple ct actions limitation
  net/mlx5e: TC, Remove special handling of CT action
  net/mlx5e: TC, Remove CT action reordering
  net/mlx5e: CT: Use per action stats
  net/mlx5e: TC, Move main flow attribute cleanup to helper func
  net/mlx5e: TC, Remove unused vf_tun variable
  net/mlx5e: Set default can_offload action
====================

Link: https://lore.kernel.org/r/20230406020232.83844-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a9fda7a0 b0d87ed2
......@@ -81,23 +81,23 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_ci, u16 skb_id)
{
return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
return (ptpsq->ts_cqe_ctr_mask && (skb_ci != skb_id));
}
static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
{
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
u16 skb_pi = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
if (PTP_WQE_CTR2IDX(skb_id - skb_ci) >= PTP_WQE_CTR2IDX(skb_pi - skb_ci))
return true;
return false;
}
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ci,
u16 skb_id, int budget)
{
struct skb_shared_hwtstamps hwts = {};
......@@ -105,13 +105,13 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_
ptpsq->cq_stats->resync_event++;
while (skb_cc != skb_id) {
while (skb_ci != skb_id) {
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++;
napi_consume_skb(skb, budget);
skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
}
}
......@@ -120,7 +120,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
int budget)
{
u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
u16 skb_ci = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
struct sk_buff *skb;
ktime_t hwtstamp;
......@@ -131,13 +131,13 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out;
}
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_ci, skb_id)) {
if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
/* already handled by a previous resync */
ptpsq->cq_stats->ooo_cqe_drop++;
return;
}
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_ci, skb_id, budget);
}
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
......
......@@ -259,10 +259,6 @@ static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
"rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
if (err)
return err;
......@@ -274,11 +270,7 @@ static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
return err;
}
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err)
return err;
return devlink_fmsg_obj_nest_end(fmsg);
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
......
......@@ -57,10 +57,6 @@ static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
"sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
if (err)
return err;
......@@ -72,11 +68,7 @@ static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_
return err;
}
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
if (err)
return err;
return devlink_fmsg_obj_nest_end(fmsg);
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
......
......@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -26,7 +17,6 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_accept = {
.can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
.is_terminating_action = true,
};
......@@ -82,26 +82,6 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->flow_action = flow_action;
}
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder)
{
struct flow_action_entry *act;
int i, j = 0;
flow_action_for_each(i, act, flow_action) {
/* Add CT action to be first. */
if (act->id == FLOW_ACTION_CT)
flow_action_reorder->entries[j++] = act;
}
flow_action_for_each(i, act, flow_action) {
if (act->id == FLOW_ACTION_CT)
continue;
flow_action_reorder->entries[j++] = act;
}
}
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
......
......@@ -17,8 +17,6 @@ struct mlx5e_tc_act_parse_state {
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
bool ct;
bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
......@@ -56,6 +54,8 @@ struct mlx5e_tc_act {
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
bool (*is_missable)(const struct flow_action_entry *act);
int (*offload_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act,
struct flow_action_entry *act);
......@@ -110,10 +110,6 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder);
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
......
......@@ -5,53 +5,22 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
static bool
tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
if (parse_state->ct && !clear_action) {
NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
return true;
}
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
/* It's redundant to do ct clear more than once. */
if (clear_action && parse_state->ct_clear)
return 0;
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr, act, parse_state->extack);
if (err)
return err;
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
if (clear_action) {
parse_state->ct_clear = true;
} else {
attr->flags |= MLX5_ATTR_FLAG_CT;
flow_flag_set(parse_state->flow, CT);
parse_state->ct = true;
}
attr->flags |= MLX5_ATTR_FLAG_CT;
return 0;
}
......@@ -61,27 +30,10 @@ tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts;
int err;
/* If ct action exist, we can ignore previous ct_clear actions */
if (parse_state->ct)
if (!(attr->flags & MLX5_ATTR_FLAG_CT))
return 0;
if (parse_state->ct_clear) {
err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts);
if (err) {
NL_SET_ERR_MSG_MOD(parse_state->extack,
"Failed to set registers for ct clear");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
/* Prevent handling of additional, redundant clear actions */
parse_state->ct_clear = false;
}
return 0;
return mlx5_tc_ct_flow_offload(parse_state->ct_priv, attr);
}
static bool
......@@ -95,10 +47,16 @@ tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
return true;
}
static bool
tc_act_is_missable_ct(const struct flow_action_entry *act)
{
return !(act->ct.action & TCA_CT_ACT_CLEAR);
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
.is_missable = tc_act_is_missable_ct,
};
......@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -25,7 +16,6 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_drop = {
.can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
.is_terminating_action = true,
};
......@@ -78,15 +78,6 @@ mlx5e_tc_act_pedit_parse_action(struct mlx5e_priv *priv,
return err;
}
static bool
tc_act_can_offload_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -114,6 +105,5 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_pedit = {
.can_offload = tc_act_can_offload_pedit,
.parse_action = tc_act_parse_pedit,
};
......@@ -4,15 +4,6 @@
#include "act.h"
#include "en/tc_priv.h"
static bool
tc_act_can_offload_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -31,6 +22,5 @@ tc_act_parse_ptype(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_ptype = {
.can_offload = tc_act_can_offload_ptype,
.parse_action = tc_act_parse_ptype,
};
......@@ -6,25 +6,6 @@
#include "en/tc_priv.h"
#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
bool ct_nat;
ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
return true;
}
static int
tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -65,7 +46,6 @@ tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
}
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
.is_multi_table_act = tc_act_is_multi_table_act_sample,
};
......@@ -5,15 +5,6 @@
#include "en/tc_priv.h"
#include "eswitch.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -27,6 +18,5 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_trap = {
.can_offload = tc_act_can_offload_trap,
.parse_action = tc_act_parse_trap,
};
......@@ -32,15 +32,6 @@ tc_act_parse_tun_encap(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
}
static bool
tc_act_can_offload_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_tun_decap(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -58,6 +49,5 @@ struct mlx5e_tc_act mlx5e_tc_act_tun_encap = {
};
struct mlx5e_tc_act mlx5e_tc_act_tun_decap = {
.can_offload = tc_act_can_offload_tun_decap,
.parse_action = tc_act_parse_tun_decap,
};
......@@ -141,15 +141,6 @@ mlx5e_tc_act_vlan_add_pop_action(struct mlx5e_priv *priv,
return err;
}
static bool
tc_act_can_offload_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -205,7 +196,6 @@ tc_act_post_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_vlan = {
.can_offload = tc_act_can_offload_vlan,
.parse_action = tc_act_parse_vlan,
.post_parse = tc_act_post_parse_vlan,
};
......@@ -50,15 +50,6 @@ mlx5e_tc_act_vlan_add_rewrite_action(struct mlx5e_priv *priv, int namespace,
return err;
}
static bool
tc_act_can_offload_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
int act_index,
struct mlx5_flow_attr *attr)
{
return true;
}
static int
tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
......@@ -81,6 +72,5 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
}
struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle = {
.can_offload = tc_act_can_offload_vlan_mangle,
.parse_action = tc_act_parse_vlan_mangle,
};
......@@ -83,12 +83,6 @@ struct mlx5_tc_ct_priv {
struct mlx5_tc_ct_debugfs debugfs;
};
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_ct_ft *ft;
};
struct mlx5_ct_zone_rule {
struct mlx5_ct_fs_rule *rule;
struct mlx5e_mod_hdr_handle *mh;
......@@ -598,12 +592,6 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return 0;
}
int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
}
static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
char *modact)
......@@ -1545,7 +1533,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
......@@ -1555,8 +1542,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
attr->ct_attr.zone = act->ct.zone;
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
attr->ct_attr.act_miss_cookie = act->miss_cookie;
......@@ -1892,14 +1879,14 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
* + ft prio (tc chain) +
* + original match +
* +---------------------+
* +-----------------------+
* + rule (either original +
* + or post_act rule) +
* +-----------------------+
* | set act_miss_cookie mapping
* | set fte_id
* | set tunnel_id
* | do decap
* | rest of actions before the CT action (for this orig/post_act rule)
* |
* +-------------+
* | Chain 0 |
......@@ -1924,32 +1911,21 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
* + post_act + original filter actions
* + post_act + rest of parsed filter's actions
* + fte_id match +------------------------>
* +--------------+
*
*/
static struct mlx5_flow_handle *
static int
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_spec *orig_spec,
struct mlx5_flow_attr *attr)
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
int act_miss_mapping = 0, err;
struct mlx5_ct_ft *ft;
u16 zone;
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
return ERR_PTR(-ENOMEM);
}
/* Register for CT established events */
ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
attr->ct_attr.nf_ft);
......@@ -1958,23 +1934,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Failed to register to ft callback");
goto err_ft;
}
ct_flow->ft = ft;
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
err = -ENOMEM;
goto err_alloc_pre;
}
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->ct_attr.ft = ft;
err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
&act_miss_mapping);
......@@ -1982,136 +1942,89 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Failed to get register mapping for act miss");
goto err_get_act_miss;
}
attr->ct_attr.act_miss_mapping = act_miss_mapping;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
MAPPED_OBJ_TO_REG, act_miss_mapping);
err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
ct_priv->ns_type, MAPPED_OBJ_TO_REG, act_miss_mapping);
if (err) {
ct_dbg("Failed to set act miss register mapping");
goto err_mapping;
}
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
attr->tunnel_id);
if (err) {
ct_dbg("Failed to set tunnel register mapping");
goto err_mapping;
}
}
/* Change original rule point to ct table
* Chain 0 sets the zone and jumps to ct table
/* Chain 0 sets the zone and jumps to ct table
* Other chains jump to pre_ct table to align with act_ct cached logic
*/
pre_ct_attr->dest_chain = 0;
if (!attr->chain) {
zone = ft->zone & MLX5_CT_ZONE_MASK;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
ZONE_TO_REG, zone);
err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
ct_priv->ns_type, ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
} else {
pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts->num_actions,
pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
goto err_mapping;
}
pre_ct_attr->modify_hdr = mod_hdr;
ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
err = PTR_ERR(ct_flow->pre_ct_rule);
ct_dbg("Failed to add pre ct rule");
goto err_insert_orig;
attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
}
attr->ct_attr.ct_flow = ct_flow;
mlx5e_mod_hdr_dealloc(pre_mod_acts);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->ct_attr.act_miss_mapping = act_miss_mapping;
return ct_flow->pre_ct_rule;
return 0;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
err_get_act_miss:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return ERR_PTR(err);
return err;
}
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr)
{
struct mlx5_flow_handle *rule;
int err;
if (!priv)
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
0, 0, 0, 0);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
return 0;
mutex_lock(&priv->control_lock);
rule = __mlx5_tc_ct_flow_offload(priv, spec, attr);
err = __mlx5_tc_ct_flow_offload(priv, attr);
mutex_unlock(&priv->control_lock);
return rule;
return err;
}
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_flow *ct_flow,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
mlx5_tc_ct_del_ft_cb(ct_priv, attr->ct_attr.ft);
}
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
/* We are called on error to clean up stuff from parsing
* but we don't have anything for now
*/
if (!ct_flow)
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
return;
mutex_lock(&priv->control_lock);
__mlx5_tc_ct_delete_flow(priv, ct_flow, attr);
__mlx5_tc_ct_delete_flow(priv, attr);
mutex_unlock(&priv->control_lock);
}
......
......@@ -25,11 +25,11 @@ struct nf_flowtable;
struct mlx5_ct_attr {
u16 zone;
u16 ct_action;
struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
u32 act_miss_mapping;
u64 act_miss_cookie;
struct mlx5_ct_ft *ft;
};
#define zone_to_reg_ct {\
......@@ -113,15 +113,12 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
struct mlx5_flow_handle *
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr);
void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr);
......@@ -130,10 +127,6 @@ bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
int
mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts);
#else /* CONFIG_MLX5_TC_CT */
static inline struct mlx5_tc_ct_priv *
......@@ -175,17 +168,9 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
return 0;
}
static inline int
mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
return -EOPNOTSUPP;
}
static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
......@@ -193,13 +178,11 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
static inline struct mlx5_flow_handle *
static inline int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
struct mlx5_flow_attr *attr)
{
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
}
static inline void
......
......@@ -25,12 +25,11 @@ enum {
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 12,
MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 7,
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 8,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 10,
MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 11,
};
struct mlx5e_tc_flow_parse_attr {
......
......@@ -292,8 +292,6 @@ static int macsec_fs_tx_create(struct mlx5e_macsec_fs *macsec_fs)
}
/* Tx crypto table MKE rule - MKE packets shouldn't be offloaded */
memset(&flow_act, 0, sizeof(flow_act));
memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
......@@ -1109,7 +1107,6 @@ static void macsec_fs_rx_setup_fte(struct mlx5_flow_spec *spec,
static union mlx5e_macsec_rule *
macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs,
const struct macsec_context *macsec_ctx,
struct mlx5_macsec_rule_attrs *attrs,
u32 fs_id)
{
......@@ -1334,7 +1331,7 @@ mlx5e_macsec_fs_add_rule(struct mlx5e_macsec_fs *macsec_fs,
{
return (attrs->action == MLX5_ACCEL_MACSEC_ACTION_ENCRYPT) ?
macsec_fs_tx_add_rule(macsec_fs, macsec_ctx, attrs, sa_fs_id) :
macsec_fs_rx_add_rule(macsec_fs, macsec_ctx, attrs, *sa_fs_id);
macsec_fs_rx_add_rule(macsec_fs, attrs, *sa_fs_id);
}
void mlx5e_macsec_fs_del_rule(struct mlx5e_macsec_fs *macsec_fs,
......
......@@ -177,7 +177,8 @@ static struct lock_class_key tc_ht_wq_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
......@@ -487,15 +488,6 @@ mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
&attr->parse_attr->mod_hdr_acts;
return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
spec, attr,
mod_hdr_acts);
}
if (!is_mdev_switchdev_mode(priv->mdev))
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
......@@ -518,11 +510,6 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (attr->flags & MLX5_ATTR_FLAG_CT) {
mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
return;
}
if (!is_mdev_switchdev_mode(priv->mdev)) {
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
return;
......@@ -1395,13 +1382,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return err;
}
if (attr->flags & MLX5_ATTR_FLAG_CT)
flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
attr, &parse_attr->mod_hdr_acts);
else
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
attr);
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
return PTR_ERR_OR_ZERO(flow->rule[0]);
}
......@@ -1432,9 +1413,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
flow_flag_clear(flow, OFFLOADED);
if (attr->flags & MLX5_ATTR_FLAG_CT)
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
else if (!IS_ERR_OR_NULL(flow->rule[0]))
if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
/* Remove root table if no rules are left to avoid
......@@ -1785,8 +1764,7 @@ set_encap_dests(struct mlx5e_priv *priv,
static void
clean_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
bool *vf_tun)
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr;
int out_index;
......@@ -1795,17 +1773,11 @@ clean_encap_dests(struct mlx5e_priv *priv,
return;
esw_attr = attr->esw_attr;
*vf_tun = false;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
if (esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
!esw_attr->dest_int_port)
*vf_tun = true;
mlx5e_detach_encap(priv, flow, attr, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
......@@ -2028,7 +2000,7 @@ static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *
if (!attr)
return;
mlx5_free_flow_attr(flow, attr);
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->parse_attr);
kfree(attr);
}
......@@ -2039,7 +2011,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
bool vf_tun;
esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
......@@ -2061,18 +2032,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
clean_encap_dests(priv, flow, attr, &vf_tun);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
if (esw_attr->int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
......@@ -2085,8 +2046,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
......@@ -3463,114 +3423,59 @@ struct ipv6_hoplimit_word {
};
static bool
is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
bool *modify_ip_header, bool *modify_tuple,
struct netlink_ext_ack *extack)
is_flow_action_modify_ip_header(struct flow_action *flow_action)
{
const struct flow_action_entry *act;
u32 mask, offset;
u8 htype;
int i;
htype = act->mangle.htype;
offset = act->mangle.offset;
mask = ~act->mangle.mask;
/* For IPv4 & IPv6 header check 4 byte word,
* to determine that modified fields
* are NOT ttl & hop_limit only.
*/
if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
struct ip_ttl_word *ttl_word =
(struct ip_ttl_word *)&mask;
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check) {
*modify_ip_header = true;
}
if (offset >= offsetof(struct iphdr, saddr))
*modify_tuple = true;
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct");
return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
(struct ipv6_hoplimit_word *)&mask;
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr) {
*modify_ip_header = true;
}
if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
*modify_tuple = true;
flow_action_for_each(i, act, flow_action) {
if (act->id != FLOW_ACTION_MANGLE &&
act->id != FLOW_ACTION_ADD)
continue;
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct");
return false;
htype = act->mangle.htype;
offset = act->mangle.offset;
mask = ~act->mangle.mask;
if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
struct ip_ttl_word *ttl_word =
(struct ip_ttl_word *)&mask;
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check)
return true;
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
(struct ipv6_hoplimit_word *)&mask;
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr)
return true;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
*modify_tuple = true;
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct");
return false;
}
}
return true;
}
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
bool ct_flow, struct netlink_ext_ack *extack,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
if (!modify_tuple || ct_clear)
return true;
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modification with non-clear ct()");
netdev_info(priv->netdev,
"can't offload tuple modification with non-clear ct()");
return false;
}
/* Add ct_state=-trk match so it will be offloaded for non ct flows
* (or after clear action), as otherwise, since the tuple is changed,
* we can't restore ct state
*/
if (mlx5_tc_ct_add_no_trk_match(spec)) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modification with ct matches and no ct(clear) action");
netdev_info(priv->netdev,
"can't offload tuple modification with ct matches and no ct(clear) action");
return false;
}
return true;
return false;
}
static bool modify_header_match_supported(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions, bool ct_flow,
bool ct_clear,
u32 actions,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
bool modify_ip_header, modify_tuple;
bool modify_ip_header;
void *headers_c;
void *headers_v;
u16 ethertype;
u8 ip_proto;
int i;
headers_c = mlx5e_get_match_headers_criteria(actions, spec);
headers_v = mlx5e_get_match_headers_value(actions, spec);
......@@ -3581,23 +3486,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
goto out_ok;
modify_ip_header = false;
modify_tuple = false;
flow_action_for_each(i, act, flow_action) {
if (act->id != FLOW_ACTION_MANGLE &&
act->id != FLOW_ACTION_ADD)
continue;
if (!is_action_keys_supported(act, ct_flow,
&modify_ip_header,
&modify_tuple, extack))
return false;
}
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
priv, spec))
return false;
modify_ip_header = is_flow_action_modify_ip_header(flow_action);
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
......@@ -3618,19 +3507,6 @@ actions_match_supported_fdb(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
if (esw_attr->split_count && ct_flow &&
!MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
/* All registers used by ct are cleared when using
* split rules.
*/
NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
return false;
}
if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
......@@ -3651,14 +3527,9 @@ actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
!modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions,
extack))
return false;
if (mlx5e_is_eswitch_flow(flow) &&
......@@ -3752,6 +3623,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
attr2->act_id_restore_rule = NULL;
memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr));
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
......@@ -3805,9 +3677,7 @@ free_flow_post_acts(struct mlx5e_tc_flow *flow)
if (list_is_last(&attr->list, &flow->attrs))
break;
mlx5_free_flow_attr(flow, attr);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
mlx5_free_flow_attr_actions(flow, attr);
list_del(&attr->list);
kvfree(attr->parse_attr);
......@@ -4065,76 +3935,79 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow_action flow_action_reorder;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_jump_state jump_state = {};
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
struct flow_action_entry *act, **_act;
struct mlx5_flow_attr *prev_attr;
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
bool is_missable;
int err, i;
flow_action_reorder.num_entries = flow_action->num_entries;
flow_action_reorder.entries = kcalloc(flow_action->num_entries,
sizeof(flow_action), GFP_KERNEL);
if (!flow_action_reorder.entries)
return -ENOMEM;
mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
ns_type = mlx5e_get_flow_namespace(flow);
list_add(&attr->list, &flow->attrs);
flow_action_for_each(i, _act, &flow_action_reorder) {
flow_action_for_each(i, act, flow_action) {
jump_state.jump_target = false;
act = *_act;
is_missable = false;
prev_attr = attr;
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
err = -EOPNOTSUPP;
goto out_free;
goto out_free_post_acts;
}
if (!tc_act->can_offload(parse_state, act, i, attr)) {
if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) {
err = -EOPNOTSUPP;
goto out_free;
goto out_free_post_acts;
}
err = tc_act->parse_action(parse_state, act, priv, attr);
if (err)
goto out_free;
goto out_free_post_acts;
dec_jump_count(act, tc_act, attr, priv, &jump_state);
err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
if (err)
goto out_free;
goto out_free_post_acts;
parse_state->actions |= attr->action;
if (!tc_act->stats_action)
attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
(tc_act->is_multi_table_act &&
tc_act->is_multi_table_act(priv, act, attr) &&
i < flow_action_reorder.num_entries - 1)) {
i < flow_action->num_entries - 1)) {
is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
goto out_free;
goto out_free_post_acts;
attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
if (!attr) {
err = -ENOMEM;
goto out_free;
goto out_free_post_acts;
}
list_add(&attr->list, &flow->attrs);
}
}
kfree(flow_action_reorder.entries);
if (is_missable) {
/* Add counter to prev, and assign act to new (next) attr */
prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_flag_set(flow, USE_ACT_STATS);
attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
} else if (!tc_act->stats_action) {
prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie;
}
}
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
......@@ -4146,8 +4019,6 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
out_free:
kfree(flow_action_reorder.entries);
out_free_post_acts:
free_flow_post_acts(flow);
......@@ -4442,10 +4313,9 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
}
static void
mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
bool vf_tun;
if (!attr)
return;
......@@ -4453,7 +4323,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
if (attr->post_act_handle)
mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
clean_encap_dests(flow->priv, flow, attr, &vf_tun);
clean_encap_dests(flow->priv, flow, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(counter_dev, attr->counter);
......@@ -4462,6 +4332,11 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
}
static int
......@@ -4923,7 +4798,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto errout;
}
if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
} else {
......
......@@ -39,7 +39,7 @@
#include "clock.h"
enum {
MLX5_CYCLES_SHIFT = 23
MLX5_CYCLES_SHIFT = 31
};
enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment