Commit d6c7fc0c authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-07-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-07-09

This series provides updates to mlx5 CT (connection tracking) offloads
For more information please see tag log below.

Please pull and let me know if there is any problem.

The following conflict is expected when net is merged into net-next:
to resolve just use the hunks from net-next.

<<<<<<< HEAD (net-next)
	mlx5_tc_ct_del_ft_entry(ct_priv, entry);
	kfree(entry);
======= (net)
	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
	kfree(entry);
>>>>>>> b1a7d5bdfe54c98eca46e2c997d4e3b1484a49af

mlx5 connection tracking offloads updates:

1)  Restore CT state from lookup in zone instead of tupleid

    On a miss, Use this zone + 5 tuple taken from the skb, to lookup the CT
    entry and restore it, instead of the driver allocated tuple id.

    This improves flow insertion rate by avoiding the allocation of a header
    rewrite context to maintain the tupleid.

2) Re-use modify header HW objects for identical modify actions.

3) Expand tunnel register mappings
   Reg_c1 is 32 bits wide. Before this patchset, 24 bit were allocated
   for the tuple_id,  6 bits for tunnel mapping and 2 bits for tunnel
   options mappings.

   Restoring the ct state from zone lookup instead of tuple id requires
   reg_c1 to store 8 bits mapping the ct zone, leaving 24 bits for tunnel
   mappings.

   Expand tunnel and tunnel options register mappings to 12 bit each.

4) Trivial cleanup and fixes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0ea46047 bbe11249
...@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o ...@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o esw/chains.o en/tc_tun.o \ en/mapping.o esw/chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifndef __MLX5E_FLOW_STEER_H__ #ifndef __MLX5E_FLOW_STEER_H__
#define __MLX5E_FLOW_STEER_H__ #define __MLX5E_FLOW_STEER_H__
#include "mod_hdr.h"
enum { enum {
MLX5E_TC_FT_LEVEL = 0, MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL, MLX5E_TC_TTC_FT_LEVEL,
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
#include <linux/jhash.h>
#include "mod_hdr.h"
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mod_hdr_key {
int num_actions;
void *actions;
};
struct mlx5e_mod_hdr_handle {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
struct mod_hdr_key key;
struct mlx5_modify_hdr *modify_hdr;
refcount_t refcnt;
struct completion res_ready;
int compl_result;
};
static u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
key->num_actions * MLX5_MH_ACT_SZ, 0);
}
static int cmp_mod_hdr_info(struct mod_hdr_key *a, struct mod_hdr_key *b)
{
if (a->num_actions != b->num_actions)
return 1;
return memcmp(a->actions, b->actions,
a->num_actions * MLX5_MH_ACT_SZ);
}
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
{
mutex_init(&tbl->lock);
hash_init(tbl->hlist);
}
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
{
mutex_destroy(&tbl->lock);
}
static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl,
struct mod_hdr_key *key,
u32 hash_key)
{
struct mlx5e_mod_hdr_handle *mh, *found = NULL;
hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
if (!cmp_mod_hdr_info(&mh->key, key)) {
refcount_inc(&mh->refcnt);
found = mh;
break;
}
}
return found;
}
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
enum mlx5_flow_namespace_type namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
int num_actions, actions_size, err;
struct mlx5e_mod_hdr_handle *mh;
struct mod_hdr_key key;
u32 hash_key;
num_actions = mod_hdr_acts->num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
key.actions = mod_hdr_acts->actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
mutex_lock(&tbl->lock);
mh = mod_hdr_get(tbl, &key, hash_key);
if (mh) {
mutex_unlock(&tbl->lock);
wait_for_completion(&mh->res_ready);
if (mh->compl_result < 0) {
err = -EREMOTEIO;
goto attach_header_err;
}
goto attach_header;
}
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
if (!mh) {
mutex_unlock(&tbl->lock);
return ERR_PTR(-ENOMEM);
}
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
refcount_set(&mh->refcnt, 1);
init_completion(&mh->res_ready);
hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
mutex_unlock(&tbl->lock);
mh->modify_hdr = mlx5_modify_header_alloc(mdev, namespace,
mh->key.num_actions,
mh->key.actions);
if (IS_ERR(mh->modify_hdr)) {
err = PTR_ERR(mh->modify_hdr);
mh->compl_result = err;
goto alloc_header_err;
}
mh->compl_result = 1;
complete_all(&mh->res_ready);
attach_header:
return mh;
alloc_header_err:
complete_all(&mh->res_ready);
attach_header_err:
mlx5e_mod_hdr_detach(mdev, tbl, mh);
return ERR_PTR(err);
}
void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
struct mlx5e_mod_hdr_handle *mh)
{
if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
return;
hash_del(&mh->mod_hdr_hlist);
mutex_unlock(&tbl->lock);
if (mh->compl_result > 0)
mlx5_modify_header_dealloc(mdev, mh->modify_hdr);
kfree(mh);
}
struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
{
return mh->modify_hdr;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies */
#ifndef __MLX5E_EN_MOD_HDR_H__
#define __MLX5E_EN_MOD_HDR_H__
#include <linux/hashtable.h>
#include <linux/mlx5/fs.h>
struct mlx5e_mod_hdr_handle;
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
void *actions;
};
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
enum mlx5_flow_namespace_type namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
struct mlx5e_mod_hdr_handle *mh);
struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
#endif /* __MLX5E_EN_MOD_HDR_H__ */
...@@ -594,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -594,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_tc_update_priv *tc_priv) struct mlx5e_tc_update_priv *tc_priv)
{ {
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id; u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id;
struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv; struct mlx5e_rep_priv *uplink_rpriv;
struct tc_skb_ext *tc_skb_ext; struct tc_skb_ext *tc_skb_ext;
...@@ -631,11 +631,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -631,11 +631,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain; tc_skb_ext->chain = chain;
tuple_id = reg_c1 & TUPLE_ID_MAX; zone_restore_id = reg_c1 & ZONE_RESTORE_MAX;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv; uplink_priv = &uplink_rpriv->uplink_priv;
if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id)) if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
zone_restore_id))
return false; return false;
} }
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include "esw/chains.h" #include "esw/chains.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en.h" #include "en.h"
#include "en_tc.h" #include "en_tc.h"
#include "en_rep.h" #include "en_rep.h"
...@@ -39,10 +41,13 @@ struct mlx5_tc_ct_priv { ...@@ -39,10 +41,13 @@ struct mlx5_tc_ct_priv {
struct idr fte_ids; struct idr fte_ids;
struct xarray tuple_ids; struct xarray tuple_ids;
struct rhashtable zone_ht; struct rhashtable zone_ht;
struct rhashtable ct_tuples_ht;
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct; struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct; struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */ struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
}; };
struct mlx5_ct_flow { struct mlx5_ct_flow {
...@@ -57,8 +62,8 @@ struct mlx5_ct_flow { ...@@ -57,8 +62,8 @@ struct mlx5_ct_flow {
struct mlx5_ct_zone_rule { struct mlx5_ct_zone_rule {
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5e_mod_hdr_handle *mh;
struct mlx5_esw_flow_attr attr; struct mlx5_esw_flow_attr attr;
int tupleid;
bool nat; bool nat;
}; };
...@@ -74,6 +79,7 @@ struct mlx5_tc_ct_pre { ...@@ -74,6 +79,7 @@ struct mlx5_tc_ct_pre {
struct mlx5_ct_ft { struct mlx5_ct_ft {
struct rhash_head node; struct rhash_head node;
u16 zone; u16 zone;
u32 zone_restore_id;
refcount_t refcount; refcount_t refcount;
struct nf_flowtable *nf_ft; struct nf_flowtable *nf_ft;
struct mlx5_tc_ct_priv *ct_priv; struct mlx5_tc_ct_priv *ct_priv;
...@@ -82,12 +88,37 @@ struct mlx5_ct_ft { ...@@ -82,12 +88,37 @@ struct mlx5_ct_ft {
struct mlx5_tc_ct_pre pre_ct_nat; struct mlx5_tc_ct_pre pre_ct_nat;
}; };
struct mlx5_ct_entry { struct mlx5_ct_tuple {
u16 addr_type;
__be16 n_proto;
u8 ip_proto;
struct {
union {
__be32 src_v4;
struct in6_addr src_v6;
};
union {
__be32 dst_v4;
struct in6_addr dst_v6;
};
} ip;
struct {
__be16 src;
__be16 dst;
} port;
u16 zone; u16 zone;
};
struct mlx5_ct_entry {
struct rhash_head node; struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
struct mlx5_fc *counter; struct mlx5_fc *counter;
unsigned long cookie; unsigned long cookie;
unsigned long restore_cookie; unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
struct mlx5_ct_tuple tuple_nat;
struct mlx5_ct_zone_rule zone_rules[2]; struct mlx5_ct_zone_rule zone_rules[2];
}; };
...@@ -106,6 +137,22 @@ static const struct rhashtable_params zone_params = { ...@@ -106,6 +137,22 @@ static const struct rhashtable_params zone_params = {
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
static const struct rhashtable_params tuples_ht_params = {
.head_offset = offsetof(struct mlx5_ct_entry, tuple_node),
.key_offset = offsetof(struct mlx5_ct_entry, tuple),
.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple),
.automatic_shrinking = true,
.min_size = 16 * 1024,
};
static const struct rhashtable_params tuples_nat_ht_params = {
.head_offset = offsetof(struct mlx5_ct_entry, tuple_nat_node),
.key_offset = offsetof(struct mlx5_ct_entry, tuple_nat),
.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple_nat),
.automatic_shrinking = true,
.min_size = 16 * 1024,
};
static struct mlx5_tc_ct_priv * static struct mlx5_tc_ct_priv *
mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
{ {
...@@ -118,6 +165,115 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) ...@@ -118,6 +165,115 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
return uplink_priv->ct_priv; return uplink_priv->ct_priv;
} }
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
struct flow_match_control control;
struct flow_match_basic basic;
flow_rule_match_basic(rule, &basic);
flow_rule_match_control(rule, &control);
tuple->n_proto = basic.key->n_proto;
tuple->ip_proto = basic.key->ip_proto;
tuple->addr_type = control.key->addr_type;
if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
tuple->ip.src_v4 = match.key->src;
tuple->ip.dst_v4 = match.key->dst;
} else if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
tuple->ip.src_v6 = match.key->src;
tuple->ip.dst_v6 = match.key->dst;
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
switch (tuple->ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
tuple->port.src = match.key->src;
tuple->port.dst = match.key->dst;
break;
default:
return -EOPNOTSUPP;
}
} else {
return -EOPNOTSUPP;
}
return 0;
}
static int
mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
struct flow_rule *rule)
{
struct flow_action *flow_action = &rule->action;
struct flow_action_entry *act;
u32 offset, val, ip6_offset;
int i;
flow_action_for_each(i, act, flow_action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
offset = act->mangle.offset;
val = act->mangle.val;
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
if (offset == offsetof(struct iphdr, saddr))
tuple->ip.src_v4 = cpu_to_be32(val);
else if (offset == offsetof(struct iphdr, daddr))
tuple->ip.dst_v4 = cpu_to_be32(val);
else
return -EOPNOTSUPP;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
ip6_offset = (offset - offsetof(struct ipv6hdr, saddr));
ip6_offset /= 4;
if (ip6_offset < 8)
tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val);
else
return -EOPNOTSUPP;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
if (offset == offsetof(struct tcphdr, source))
tuple->port.src = cpu_to_be16(val);
else if (offset == offsetof(struct tcphdr, dest))
tuple->port.dst = cpu_to_be16(val);
else
return -EOPNOTSUPP;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
if (offset == offsetof(struct udphdr, source))
tuple->port.src = cpu_to_be16(val);
else if (offset == offsetof(struct udphdr, dest))
tuple->port.dst = cpu_to_be16(val);
else
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
static int static int
mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct flow_rule *rule) struct flow_rule *rule)
...@@ -243,11 +399,11 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -243,11 +399,11 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
ct_dbg("Deleting ct entry rule in zone %d", entry->zone); ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); mlx5e_mod_hdr_detach(ct_priv->esw->dev,
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid); &esw->offloads.mod_hdr, zone_rule->mh);
} }
static void static void
...@@ -281,7 +437,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, ...@@ -281,7 +437,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
u8 ct_state, u8 ct_state,
u32 mark, u32 mark,
u32 label, u32 label,
u32 tupleid) u8 zone_restore_id)
{ {
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
int err; int err;
...@@ -302,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, ...@@ -302,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
TUPLEID_TO_REG, tupleid); ZONE_RESTORE_TO_REG, zone_restore_id);
if (err) if (err)
return err; return err;
...@@ -429,12 +585,10 @@ static int ...@@ -429,12 +585,10 @@ static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
u32 tupleid, struct mlx5e_mod_hdr_handle **mh,
bool nat) u8 zone_restore_id, bool nat)
{ {
struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_modify_hdr *mod_hdr;
struct flow_action_entry *meta; struct flow_action_entry *meta;
u16 ct_state = 0; u16 ct_state = 0;
int err; int err;
...@@ -464,18 +618,19 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ...@@ -464,18 +618,19 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
ct_state, ct_state,
meta->ct_metadata.mark, meta->ct_metadata.mark,
meta->ct_metadata.labels[0], meta->ct_metadata.labels[0],
tupleid); zone_restore_id);
if (err) if (err)
goto err_mapping; goto err_mapping;
mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
mod_acts.num_actions, &ct_priv->esw->offloads.mod_hdr,
mod_acts.actions); MLX5_FLOW_NAMESPACE_FDB,
if (IS_ERR(mod_hdr)) { &mod_acts);
err = PTR_ERR(mod_hdr); if (IS_ERR(*mh)) {
err = PTR_ERR(*mh);
goto err_mapping; goto err_mapping;
} }
attr->modify_hdr = mod_hdr; attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
dealloc_mod_hdr_actions(&mod_acts); dealloc_mod_hdr_actions(&mod_acts);
return 0; return 0;
...@@ -489,13 +644,12 @@ static int ...@@ -489,13 +644,12 @@ static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry, struct mlx5_ct_entry *entry,
bool nat) bool nat, u8 zone_restore_id)
{ {
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
struct mlx5_flow_spec *spec = NULL; struct mlx5_flow_spec *spec = NULL;
u32 tupleid;
int err; int err;
zone_rule->nat = nat; zone_rule->nat = nat;
...@@ -504,18 +658,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -504,18 +658,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (!spec) if (!spec)
return -ENOMEM; return -ENOMEM;
/* Get tuple unique id */
err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
if (err) {
netdev_warn(ct_priv->netdev,
"Failed to allocate tuple id, err: %d\n", err);
goto err_xa_alloc;
}
zone_rule->tupleid = tupleid;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
tupleid, nat); &zone_rule->mh,
zone_restore_id, nat);
if (err) { if (err) {
ct_dbg("Failed to create ct entry mod hdr"); ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr; goto err_mod_hdr;
...@@ -533,7 +678,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -533,7 +678,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
entry->zone & MLX5_CT_ZONE_MASK, entry->tuple.zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK); MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
...@@ -544,15 +689,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -544,15 +689,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
} }
kfree(spec); kfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->zone); ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0; return 0;
err_rule: err_rule:
mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); mlx5e_mod_hdr_detach(ct_priv->esw->dev,
&esw->offloads.mod_hdr, zone_rule->mh);
err_mod_hdr: err_mod_hdr:
xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
err_xa_alloc:
kfree(spec); kfree(spec);
return err; return err;
} }
...@@ -560,7 +704,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -560,7 +704,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
static int static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule, struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry) struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{ {
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
int err; int err;
...@@ -572,11 +717,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -572,11 +717,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
} }
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false); err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
zone_restore_id);
if (err) if (err)
goto err_orig; goto err_orig;
err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true); err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
zone_restore_id);
if (err) if (err)
goto err_nat; goto err_nat;
...@@ -613,11 +760,35 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, ...@@ -613,11 +760,35 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (!entry) if (!entry)
return -ENOMEM; return -ENOMEM;
entry->zone = ft->zone; entry->tuple.zone = ft->zone;
entry->cookie = flow->cookie; entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie; entry->restore_cookie = meta_action->ct_metadata.cookie;
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
if (err)
goto err_set;
memcpy(&entry->tuple_nat, &entry->tuple, sizeof(entry->tuple));
err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
if (err)
goto err_set;
err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
if (err)
goto err_tuple;
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
}
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
ft->zone_restore_id);
if (err) if (err)
goto err_rules; goto err_rules;
...@@ -631,12 +802,34 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, ...@@ -631,12 +802,34 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_insert: err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry); mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules: err_rules:
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
err_tuple:
err_set:
kfree(entry); kfree(entry);
netdev_warn(ct_priv->netdev, netdev_warn(ct_priv->netdev,
"Failed to offload ct entry, err: %d\n", err); "Failed to offload ct entry, err: %d\n", err);
return err; return err;
} }
static void
mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
}
static int static int
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow) struct flow_cls_offload *flow)
...@@ -649,7 +842,7 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, ...@@ -649,7 +842,7 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
if (!entry) if (!entry)
return -ENOENT; return -ENOENT;
mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
&entry->node, &entry->node,
cts_ht_params)); cts_ht_params));
...@@ -702,6 +895,66 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, ...@@ -702,6 +895,66 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static bool
mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
u16 zone)
{
struct flow_keys flow_keys;
skb_reset_network_header(skb);
skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
tuple->zone = zone;
if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
flow_keys.basic.ip_proto != IPPROTO_UDP)
return false;
tuple->port.src = flow_keys.ports.src;
tuple->port.dst = flow_keys.ports.dst;
tuple->n_proto = flow_keys.basic.n_proto;
tuple->ip_proto = flow_keys.basic.ip_proto;
switch (flow_keys.basic.n_proto) {
case htons(ETH_P_IP):
tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src;
tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst;
break;
case htons(ETH_P_IPV6):
tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src;
tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst;
break;
default:
goto out;
}
return true;
out:
return false;
}
int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
u32 ctstate = 0, ctstate_mask = 0;
mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
&ctstate, &ctstate_mask);
if (ctstate_mask)
return -EOPNOTSUPP;
ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
ctstate, ctstate_mask);
return 0;
}
int int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -1054,6 +1307,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, ...@@ -1054,6 +1307,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
if (!ft) if (!ft)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id);
if (err)
goto err_mapping;
ft->zone = zone; ft->zone = zone;
ft->nf_ft = nf_ft; ft->nf_ft = nf_ft;
ft->ct_priv = ct_priv; ft->ct_priv = ct_priv;
...@@ -1086,6 +1343,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, ...@@ -1086,6 +1343,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
err_init: err_init:
mlx5_tc_ct_free_pre_ct_tables(ft); mlx5_tc_ct_free_pre_ct_tables(ft);
err_alloc_pre_ct: err_alloc_pre_ct:
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
err_mapping:
kfree(ft); kfree(ft);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1096,7 +1355,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) ...@@ -1096,7 +1355,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
struct mlx5_tc_ct_priv *ct_priv = arg; struct mlx5_tc_ct_priv *ct_priv = arg;
struct mlx5_ct_entry *entry = ptr; struct mlx5_ct_entry *entry = ptr;
mlx5_tc_ct_entry_del_rules(ct_priv, entry); mlx5_tc_ct_del_ft_entry(ct_priv, entry);
kfree(entry);
} }
static void static void
...@@ -1112,6 +1372,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1112,6 +1372,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
mlx5_tc_ct_flush_ft_entry, mlx5_tc_ct_flush_ft_entry,
ct_priv); ct_priv);
mlx5_tc_ct_free_pre_ct_tables(ft); mlx5_tc_ct_free_pre_ct_tables(ft);
mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
kfree(ft); kfree(ft);
} }
...@@ -1139,6 +1400,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1139,6 +1400,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | set mark * | set mark
* | set label * | set label
* | set established * | set established
* | set zone_restore
* | do nat (if needed) * | do nat (if needed)
* v * v
* +--------------+ * +--------------+
...@@ -1146,12 +1408,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1146,12 +1408,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + fte_id match +------------------------> * + fte_id match +------------------------>
* +--------------+ * +--------------+
*/ */
static int static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec, struct mlx5_flow_spec *orig_spec,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr)
struct mlx5_flow_handle **flow_rule)
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
...@@ -1171,7 +1432,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1171,7 +1432,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
if (!post_ct_spec || !ct_flow) { if (!post_ct_spec || !ct_flow) {
kfree(post_ct_spec); kfree(post_ct_spec);
kfree(ct_flow); kfree(ct_flow);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
/* Register for CT established events */ /* Register for CT established events */
...@@ -1292,11 +1553,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1292,11 +1553,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
} }
attr->ct_attr.ct_flow = ct_flow; attr->ct_attr.ct_flow = ct_flow;
*flow_rule = ct_flow->post_ct_rule;
dealloc_mod_hdr_actions(&pre_mod_acts); dealloc_mod_hdr_actions(&pre_mod_acts);
kfree(post_ct_spec); kfree(post_ct_spec);
return 0; return rule;
err_insert_orig: err_insert_orig:
mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule, mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
...@@ -1314,16 +1574,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1314,16 +1574,14 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
kfree(post_ct_spec); kfree(post_ct_spec);
kfree(ct_flow); kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return err; return ERR_PTR(err);
} }
static int static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec, struct mlx5_flow_spec *orig_spec,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts, struct mlx5e_tc_mod_hdr_acts *mod_acts)
struct mlx5_flow_handle **flow_rule)
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
...@@ -1335,7 +1593,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ...@@ -1335,7 +1593,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) if (!ct_flow)
return -ENOMEM; return ERR_PTR(-ENOMEM);
/* Base esw attributes on original rule attribute */ /* Base esw attributes on original rule attribute */
pre_ct_attr = &ct_flow->pre_ct_attr; pre_ct_attr = &ct_flow->pre_ct_attr;
...@@ -1370,16 +1628,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ...@@ -1370,16 +1628,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
attr->ct_attr.ct_flow = ct_flow; attr->ct_attr.ct_flow = ct_flow;
ct_flow->pre_ct_rule = rule; ct_flow->pre_ct_rule = rule;
*flow_rule = rule; return rule;
return 0;
err_insert: err_insert:
mlx5_modify_header_dealloc(priv->mdev, mod_hdr); mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
err_set_registers: err_set_registers:
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err); "Failed to offload ct clear flow, err %d\n", err);
return err; return ERR_PTR(err);
} }
struct mlx5_flow_handle * struct mlx5_flow_handle *
...@@ -1391,22 +1647,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1391,22 +1647,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
{ {
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL); struct mlx5_flow_handle *rule;
int err;
if (!ct_priv) if (!ct_priv)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&ct_priv->control_lock); mutex_lock(&ct_priv->control_lock);
if (clear_action) if (clear_action)
err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
mod_hdr_acts, &rule);
else else
err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
&rule);
mutex_unlock(&ct_priv->control_lock); mutex_unlock(&ct_priv->control_lock);
if (err)
return ERR_PTR(err);
return rule; return rule;
} }
...@@ -1534,6 +1786,12 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1534,6 +1786,12 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_alloc; goto err_alloc;
} }
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping;
}
ct_priv->esw = esw; ct_priv->esw = esw;
ct_priv->netdev = rpriv->netdev; ct_priv->netdev = rpriv->netdev;
ct_priv->ct = mlx5_esw_chains_create_global_table(esw); ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
...@@ -1560,9 +1818,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1560,9 +1818,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
} }
idr_init(&ct_priv->fte_ids); idr_init(&ct_priv->fte_ids);
xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
mutex_init(&ct_priv->control_lock); mutex_init(&ct_priv->control_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
/* Done, set ct_priv to know it initializted */ /* Done, set ct_priv to know it initializted */
uplink_priv->ct_priv = ct_priv; uplink_priv->ct_priv = ct_priv;
...@@ -1574,6 +1833,8 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1574,6 +1833,8 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
err_ct_nat_tbl: err_ct_nat_tbl:
mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
err_ct_tbl: err_ct_tbl:
mapping_destroy(ct_priv->zone_mapping);
err_mapping:
kfree(ct_priv); kfree(ct_priv);
err_alloc: err_alloc:
err_support: err_support:
...@@ -1592,10 +1853,12 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1592,10 +1853,12 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
rhashtable_destroy(&ct_priv->ct_tuples_ht);
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht); rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock); mutex_destroy(&ct_priv->control_lock);
xa_destroy(&ct_priv->tuple_ids);
idr_destroy(&ct_priv->fte_ids); idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv); kfree(ct_priv);
...@@ -1604,22 +1867,30 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) ...@@ -1604,22 +1867,30 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
bool bool
mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
struct sk_buff *skb, u32 tupleid) struct sk_buff *skb, u8 zone_restore_id)
{ {
struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
struct mlx5_ct_zone_rule *zone_rule; struct mlx5_ct_tuple tuple = {};
struct mlx5_ct_entry *entry; struct mlx5_ct_entry *entry;
u16 zone;
if (!ct_priv || !tupleid) if (!ct_priv || !zone_restore_id)
return true; return true;
zone_rule = xa_load(&ct_priv->tuple_ids, tupleid); if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
if (!zone_rule)
return false; return false;
entry = container_of(zone_rule, struct mlx5_ct_entry, if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
zone_rules[zone_rule->nat]); return false;
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
tuples_ht_params);
if (!entry)
entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
&tuple, tuples_nat_ht_params);
if (!entry)
return false;
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
return true; return true;
} }
...@@ -67,16 +67,17 @@ struct mlx5_ct_attr { ...@@ -67,16 +67,17 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_5),\ misc_parameters_2.metadata_reg_c_5),\
} }
#define tupleid_to_reg_ct {\ #define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\ .moffset = 0,\
.mlen = 3,\ .mlen = 1,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\ .soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_1),\ misc_parameters_2.metadata_reg_c_1) + 3,\
} }
#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8) #define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0) #define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT) #if IS_ENABLED(CONFIG_MLX5_TC_CT)
...@@ -91,6 +92,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -91,6 +92,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
const struct flow_action_entry *act, const struct flow_action_entry *act,
...@@ -109,7 +113,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, ...@@ -109,7 +113,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
bool bool
mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
struct sk_buff *skb, u32 tupleid); struct sk_buff *skb, u8 zone_restore_id);
#else /* CONFIG_MLX5_TC_CT */ #else /* CONFIG_MLX5_TC_CT */
...@@ -140,6 +144,13 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, ...@@ -140,6 +144,13 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
return 0;
}
static inline int static inline int
mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
...@@ -170,9 +181,9 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, ...@@ -170,9 +181,9 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
static inline bool static inline bool
mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
struct sk_buff *skb, u32 tupleid) struct sk_buff *skb, u8 zone_restore_id)
{ {
if (!tupleid) if (!zone_restore_id)
return true; return true;
return false; return false;
......
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "en/tc_tun.h" #include "en/tc_tun.h"
#include "en/mapping.h" #include "en/mapping.h"
#include "en/tc_ct.h" #include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "lib/devcom.h" #include "lib/devcom.h"
#include "lib/geneve.h" #include "lib/geneve.h"
#include "diag/en_tc_tracepoint.h" #include "diag/en_tc_tracepoint.h"
...@@ -140,8 +141,7 @@ struct mlx5e_tc_flow { ...@@ -140,8 +141,7 @@ struct mlx5e_tc_flow {
*/ */
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow; struct mlx5e_tc_flow *peer_flow;
struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */ struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */ struct list_head peer; /* flows with peer flow */
...@@ -180,17 +180,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { ...@@ -180,17 +180,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
}, },
[TUNNEL_TO_REG] = { [TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
.moffset = 3, .moffset = 1,
.mlen = 1, .mlen = 3,
.soffset = MLX5_BYTE_OFF(fte_match_param, .soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1), misc_parameters_2.metadata_reg_c_1),
}, },
[ZONE_TO_REG] = zone_to_reg_ct, [ZONE_TO_REG] = zone_to_reg_ct,
[ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
[CTSTATE_TO_REG] = ctstate_to_reg_ct, [CTSTATE_TO_REG] = ctstate_to_reg_ct,
[MARK_TO_REG] = mark_to_reg_ct, [MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct, [LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct, [FTEID_TO_REG] = fteid_to_reg_ct,
[TUPLEID_TO_REG] = tupleid_to_reg_ct,
}; };
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
...@@ -219,6 +219,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, ...@@ -219,6 +219,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} }
void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 *data,
u32 *mask)
{
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
void *headers_c = spec->match_criteria;
void *headers_v = spec->match_value;
void *fmask, *fval;
fmask = headers_c + soffset;
fval = headers_v + soffset;
memcpy(mask, fmask, match_len);
memcpy(data, fval, match_len);
*mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
*data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
}
int int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
...@@ -287,29 +309,6 @@ struct mlx5e_hairpin_entry { ...@@ -287,29 +309,6 @@ struct mlx5e_hairpin_entry {
struct completion res_ready; struct completion res_ready;
}; };
struct mod_hdr_key {
int num_actions;
void *actions;
};
struct mlx5e_mod_hdr_entry {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
/* protects flows list */
spinlock_t flows_lock;
/* flows sharing the same mod_hdr entry */
struct list_head flows;
struct mod_hdr_key key;
struct mlx5_modify_hdr *modify_hdr;
refcount_t refcnt;
struct completion res_ready;
int compl_result;
};
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow); struct mlx5e_tc_flow *flow);
...@@ -386,148 +385,43 @@ static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) ...@@ -386,148 +385,43 @@ static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, OFFLOADED); return flow_flag_test(flow, OFFLOADED);
} }
static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) static int get_flow_name_space(struct mlx5e_tc_flow *flow)
{
return jhash(key->actions,
key->num_actions * MLX5_MH_ACT_SZ, 0);
}
static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
struct mod_hdr_key *b)
{ {
if (a->num_actions != b->num_actions) return mlx5e_is_eswitch_flow(flow) ?
return 1; MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
} }
static struct mod_hdr_tbl * static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, int namespace) get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr; &priv->fs.tc.mod_hdr;
} }
static struct mlx5e_mod_hdr_entry *
mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
{
struct mlx5e_mod_hdr_entry *mh, *found = NULL;
hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
if (!cmp_mod_hdr_info(&mh->key, key)) {
refcount_inc(&mh->refcnt);
found = mh;
break;
}
}
return found;
}
static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
struct mlx5e_mod_hdr_entry *mh,
int namespace)
{
struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
return;
hash_del(&mh->mod_hdr_hlist);
mutex_unlock(&tbl->lock);
WARN_ON(!list_empty(&mh->flows));
if (mh->compl_result > 0)
mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
kfree(mh);
}
static int get_flow_name_space(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr) struct mlx5e_tc_flow_parse_attr *parse_attr)
{ {
int num_actions, actions_size, namespace, err; struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_entry *mh; struct mlx5e_mod_hdr_handle *mh;
struct mod_hdr_tbl *tbl;
struct mod_hdr_key key;
u32 hash_key;
num_actions = parse_attr->mod_hdr_acts.num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
key.actions = parse_attr->mod_hdr_acts.actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
namespace = get_flow_name_space(flow);
tbl = get_mod_hdr_table(priv, namespace);
mutex_lock(&tbl->lock);
mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
if (mh) {
mutex_unlock(&tbl->lock);
wait_for_completion(&mh->res_ready);
if (mh->compl_result < 0) {
err = -EREMOTEIO;
goto attach_header_err;
}
goto attach_flow;
}
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
if (!mh) {
mutex_unlock(&tbl->lock);
return -ENOMEM;
}
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
spin_lock_init(&mh->flows_lock);
INIT_LIST_HEAD(&mh->flows);
refcount_set(&mh->refcnt, 1);
init_completion(&mh->res_ready);
hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
mutex_unlock(&tbl->lock);
mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace, mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
mh->key.num_actions, get_flow_name_space(flow),
mh->key.actions); &parse_attr->mod_hdr_acts);
if (IS_ERR(mh->modify_hdr)) { if (IS_ERR(mh))
err = PTR_ERR(mh->modify_hdr); return PTR_ERR(mh);
mh->compl_result = err;
goto alloc_header_err;
}
mh->compl_result = 1;
complete_all(&mh->res_ready);
attach_flow: modify_hdr = mlx5e_mod_hdr_get(mh);
flow->mh = mh;
spin_lock(&mh->flows_lock);
list_add(&flow->mod_hdr, &mh->flows);
spin_unlock(&mh->flows_lock);
if (mlx5e_is_eswitch_flow(flow)) if (mlx5e_is_eswitch_flow(flow))
flow->esw_attr->modify_hdr = mh->modify_hdr; flow->esw_attr->modify_hdr = modify_hdr;
else else
flow->nic_attr->modify_hdr = mh->modify_hdr; flow->nic_attr->modify_hdr = modify_hdr;
flow->mh = mh;
return 0; return 0;
alloc_header_err:
complete_all(&mh->res_ready);
attach_header_err:
mlx5e_mod_hdr_put(priv, mh, namespace);
return err;
} }
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
...@@ -537,11 +431,8 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, ...@@ -537,11 +431,8 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
if (!flow->mh) if (!flow->mh)
return; return;
spin_lock(&flow->mh->flows_lock); mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
list_del(&flow->mod_hdr); flow->mh);
spin_unlock(&flow->mh->flows_lock);
mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
flow->mh = NULL; flow->mh = NULL;
} }
...@@ -3086,6 +2977,7 @@ struct ipv6_hoplimit_word { ...@@ -3086,6 +2977,7 @@ struct ipv6_hoplimit_word {
static int is_action_keys_supported(const struct flow_action_entry *act, static int is_action_keys_supported(const struct flow_action_entry *act,
bool ct_flow, bool *modify_ip_header, bool ct_flow, bool *modify_ip_header,
bool *modify_tuple,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
u32 mask, offset; u32 mask, offset;
...@@ -3108,7 +3000,10 @@ static int is_action_keys_supported(const struct flow_action_entry *act, ...@@ -3108,7 +3000,10 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
*modify_ip_header = true; *modify_ip_header = true;
} }
if (ct_flow && offset >= offsetof(struct iphdr, saddr)) { if (offset >= offsetof(struct iphdr, saddr))
*modify_tuple = true;
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct"); "can't offload re-write of ipv4 address with action ct");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3123,28 +3018,36 @@ static int is_action_keys_supported(const struct flow_action_entry *act, ...@@ -3123,28 +3018,36 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
*modify_ip_header = true; *modify_ip_header = true;
} }
if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) { if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
*modify_tuple = true;
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct"); "can't offload re-write of ipv6 address with action ct");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) { htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
*modify_tuple = true;
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct"); "can't offload re-write of transport header ports with action ct");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
}
return 0; return 0;
} }
static bool modify_header_match_supported(struct mlx5_flow_spec *spec, static bool modify_header_match_supported(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_action *flow_action, struct flow_action *flow_action,
u32 actions, bool ct_flow, u32 actions, bool ct_flow,
bool ct_clear,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
const struct flow_action_entry *act; const struct flow_action_entry *act;
bool modify_ip_header; bool modify_ip_header, modify_tuple;
void *headers_c; void *headers_c;
void *headers_v; void *headers_v;
u16 ethertype; u16 ethertype;
...@@ -3161,23 +3064,39 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, ...@@ -3161,23 +3064,39 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
goto out_ok; goto out_ok;
modify_ip_header = false; modify_ip_header = false;
modify_tuple = false;
flow_action_for_each(i, act, flow_action) { flow_action_for_each(i, act, flow_action) {
if (act->id != FLOW_ACTION_MANGLE && if (act->id != FLOW_ACTION_MANGLE &&
act->id != FLOW_ACTION_ADD) act->id != FLOW_ACTION_ADD)
continue; continue;
err = is_action_keys_supported(act, ct_flow, err = is_action_keys_supported(act, ct_flow,
&modify_ip_header, extack); &modify_ip_header,
&modify_tuple, extack);
if (err) if (err)
return err; return err;
} }
/* Add ct_state=-trk match so it will be offloaded for non ct flows
* (or after clear action), as otherwise, since the tuple is changed,
* we can't restore ct state
*/
if (!ct_clear && modify_tuple &&
mlx5_tc_ct_add_no_trk_match(priv, spec)) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modify header with ct matches");
netdev_info(priv->netdev,
"can't offload tuple modify header with ct matches");
return false;
}
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP && if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of non TCP/UDP"); "can't offload re-write of non TCP/UDP");
pr_info("can't offload re-write of ip proto %d\n", ip_proto); netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
ip_proto);
return false; return false;
} }
...@@ -3191,13 +3110,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3191,13 +3110,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
bool ct_flow; bool ct_flow = false, ct_clear = false;
u32 actions; u32 actions;
ct_flow = flow_flag_test(flow, CT);
if (mlx5e_is_eswitch_flow(flow)) { if (mlx5e_is_eswitch_flow(flow)) {
actions = flow->esw_attr->action; actions = flow->esw_attr->action;
ct_clear = flow->esw_attr->ct_attr.ct_action &
TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
if (flow->esw_attr->split_count && ct_flow) { if (flow->esw_attr->split_count && ct_flow) {
/* All registers used by ct are cleared when using /* All registers used by ct are cleared when using
* split rules. * split rules.
...@@ -3211,9 +3131,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -3211,9 +3131,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
} }
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, return modify_header_match_supported(priv, &parse_attr->spec,
flow_action, actions, flow_action, actions,
ct_flow, extack); ct_flow, ct_clear,
extack);
return true; return true;
} }
...@@ -4408,7 +4329,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, ...@@ -4408,7 +4329,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->priv = priv; flow->priv = priv;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
INIT_LIST_HEAD(&flow->encaps[out_index].list); INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->mod_hdr);
INIT_LIST_HEAD(&flow->hairpin); INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat); INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
refcount_set(&flow->refcnt, 1); refcount_set(&flow->refcnt, 1);
...@@ -4480,11 +4400,12 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4480,11 +4400,12 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free; goto err_free;
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); /* actions validation depends on parsing the ct matches first */
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
if (err) if (err)
goto err_free; goto err_free;
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
if (err) if (err)
goto err_free; goto err_free;
...@@ -5011,9 +4932,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -5011,9 +4932,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
int err; int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
mutex_init(&tc->t_lock); mutex_init(&tc->t_lock);
mutex_init(&tc->mod_hdr.lock);
hash_init(tc->mod_hdr.hlist);
mutex_init(&tc->hairpin_tbl_lock); mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl); hash_init(tc->hairpin_tbl);
...@@ -5051,7 +4971,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) ...@@ -5051,7 +4971,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
&tc->netdevice_nb, &tc->netdevice_nb,
&tc->netdevice_nn); &tc->netdevice_nn);
mutex_destroy(&tc->mod_hdr.lock); mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
mutex_destroy(&tc->hairpin_tbl_lock); mutex_destroy(&tc->hairpin_tbl_lock);
rhashtable_destroy(&tc->ht); rhashtable_destroy(&tc->ht);
......
...@@ -70,9 +70,9 @@ struct tunnel_match_enc_opts { ...@@ -70,9 +70,9 @@ struct tunnel_match_enc_opts {
* Upper TUNNEL_INFO_BITS for general tunnel info. * Upper TUNNEL_INFO_BITS for general tunnel info.
* Lower ENC_OPTS_BITS bits for enc_opts. * Lower ENC_OPTS_BITS bits for enc_opts.
*/ */
#define TUNNEL_INFO_BITS 6 #define TUNNEL_INFO_BITS 12
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
#define ENC_OPTS_BITS 2 #define ENC_OPTS_BITS 12
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
...@@ -129,10 +129,10 @@ enum mlx5e_tc_attr_to_reg { ...@@ -129,10 +129,10 @@ enum mlx5e_tc_attr_to_reg {
TUNNEL_TO_REG, TUNNEL_TO_REG,
CTSTATE_TO_REG, CTSTATE_TO_REG,
ZONE_TO_REG, ZONE_TO_REG,
ZONE_RESTORE_TO_REG,
MARK_TO_REG, MARK_TO_REG,
LABELS_TO_REG, LABELS_TO_REG,
FTEID_TO_REG, FTEID_TO_REG,
TUPLEID_TO_REG,
}; };
struct mlx5e_tc_attr_to_reg_mapping { struct mlx5e_tc_attr_to_reg_mapping {
...@@ -148,12 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; ...@@ -148,12 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev); struct net_device *out_dev);
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
void *actions;
};
int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5e_tc_attr_to_reg type, enum mlx5e_tc_attr_to_reg type,
...@@ -164,6 +158,11 @@ void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, ...@@ -164,6 +158,11 @@ void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
u32 data, u32 data,
u32 mask); u32 mask);
void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 *data,
u32 *mask);
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
int namespace, int namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "fs_core.h" #include "fs_core.h"
#include "devlink.h" #include "devlink.h"
#include "ecpf.h" #include "ecpf.h"
#include "en/mod_hdr.h"
enum { enum {
MLX5_ACTION_NONE = 0, MLX5_ACTION_NONE = 0,
...@@ -69,7 +70,7 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) ...@@ -69,7 +70,7 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!MLX5_ESWITCH_MANAGER(dev)) if (!MLX5_ESWITCH_MANAGER(dev))
return -EPERM; return -EOPNOTSUPP;
return 0; return 0;
} }
...@@ -1748,10 +1749,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1748,10 +1749,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mutex_init(&esw->offloads.encap_tbl_lock); mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.mod_hdr.lock);
hash_init(esw->offloads.mod_hdr.hlist);
mutex_init(&esw->offloads.decap_tbl_lock); mutex_init(&esw->offloads.decap_tbl_lock);
hash_init(esw->offloads.decap_tbl); hash_init(esw->offloads.decap_tbl);
mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
atomic64_set(&esw->offloads.num_flows, 0); atomic64_set(&esw->offloads.num_flows, 0);
ida_init(&esw->offloads.vport_metadata_ida); ida_init(&esw->offloads.vport_metadata_ida);
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
...@@ -1793,7 +1793,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1793,7 +1793,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock); mutex_destroy(&esw->state_lock);
ida_destroy(&esw->offloads.vport_metadata_ida); ida_destroy(&esw->offloads.vport_metadata_ida);
mutex_destroy(&esw->offloads.mod_hdr.lock); mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock);
kfree(esw->vports); kfree(esw->vports);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment