Commit d6c7fc0c authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-07-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-07-09

This series provides updates to mlx5 CT (connection tracking) offloads
For more information please see tag log below.

Please pull and let me know if there is any problem.

The following conflict is expected when net is merged into net-next:
to resolve just use the hunks from net-next.

<<<<<<< HEAD (net-next)
	mlx5_tc_ct_del_ft_entry(ct_priv, entry);
	kfree(entry);
======= (net)
	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
	kfree(entry);
>>>>>>> b1a7d5bdfe54c98eca46e2c997d4e3b1484a49af

mlx5 connection tracking offloads updates:

1)  Restore CT state from lookup in zone instead of tupleid

    On a miss, Use this zone + 5 tuple taken from the skb, to lookup the CT
    entry and restore it, instead of the driver allocated tuple id.

    This improves flow insertion rate by avoiding the allocation of a header
    rewrite context to maintain the tupleid.

2) Re-use modify header HW objects for identical modify actions.

3) Expand tunnel register mappings
   Reg_c1 is 32 bits wide. Before this patchset, 24 bit were allocated
   for the tuple_id,  6 bits for tunnel mapping and 2 bits for tunnel
   options mappings.

   Restoring the ct state from zone lookup instead of tuple id requires
   reg_c1 to store 8 bits mapping the ct zone, leaving 24 bits for tunnel
   mappings.

   Expand tunnel and tunnel options register mappings to 12 bit each.

4) Trivial cleanup and fixes.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0ea46047 bbe11249
......@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o esw/chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
......
......@@ -4,6 +4,8 @@
#ifndef __MLX5E_FLOW_STEER_H__
#define __MLX5E_FLOW_STEER_H__
#include "mod_hdr.h"
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2020 Mellanox Technologies
#include <linux/jhash.h>
#include "mod_hdr.h"
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
struct mod_hdr_key {
int num_actions;
void *actions;
};
struct mlx5e_mod_hdr_handle {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
struct mod_hdr_key key;
struct mlx5_modify_hdr *modify_hdr;
refcount_t refcnt;
struct completion res_ready;
int compl_result;
};
static u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
key->num_actions * MLX5_MH_ACT_SZ, 0);
}
static int cmp_mod_hdr_info(struct mod_hdr_key *a, struct mod_hdr_key *b)
{
if (a->num_actions != b->num_actions)
return 1;
return memcmp(a->actions, b->actions,
a->num_actions * MLX5_MH_ACT_SZ);
}
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
{
mutex_init(&tbl->lock);
hash_init(tbl->hlist);
}
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
{
mutex_destroy(&tbl->lock);
}
static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl,
struct mod_hdr_key *key,
u32 hash_key)
{
struct mlx5e_mod_hdr_handle *mh, *found = NULL;
hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
if (!cmp_mod_hdr_info(&mh->key, key)) {
refcount_inc(&mh->refcnt);
found = mh;
break;
}
}
return found;
}
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
enum mlx5_flow_namespace_type namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
int num_actions, actions_size, err;
struct mlx5e_mod_hdr_handle *mh;
struct mod_hdr_key key;
u32 hash_key;
num_actions = mod_hdr_acts->num_actions;
actions_size = MLX5_MH_ACT_SZ * num_actions;
key.actions = mod_hdr_acts->actions;
key.num_actions = num_actions;
hash_key = hash_mod_hdr_info(&key);
mutex_lock(&tbl->lock);
mh = mod_hdr_get(tbl, &key, hash_key);
if (mh) {
mutex_unlock(&tbl->lock);
wait_for_completion(&mh->res_ready);
if (mh->compl_result < 0) {
err = -EREMOTEIO;
goto attach_header_err;
}
goto attach_header;
}
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
if (!mh) {
mutex_unlock(&tbl->lock);
return ERR_PTR(-ENOMEM);
}
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
refcount_set(&mh->refcnt, 1);
init_completion(&mh->res_ready);
hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
mutex_unlock(&tbl->lock);
mh->modify_hdr = mlx5_modify_header_alloc(mdev, namespace,
mh->key.num_actions,
mh->key.actions);
if (IS_ERR(mh->modify_hdr)) {
err = PTR_ERR(mh->modify_hdr);
mh->compl_result = err;
goto alloc_header_err;
}
mh->compl_result = 1;
complete_all(&mh->res_ready);
attach_header:
return mh;
alloc_header_err:
complete_all(&mh->res_ready);
attach_header_err:
mlx5e_mod_hdr_detach(mdev, tbl, mh);
return ERR_PTR(err);
}
void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
struct mlx5e_mod_hdr_handle *mh)
{
if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
return;
hash_del(&mh->mod_hdr_hlist);
mutex_unlock(&tbl->lock);
if (mh->compl_result > 0)
mlx5_modify_header_dealloc(mdev, mh->modify_hdr);
kfree(mh);
}
struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
{
return mh->modify_hdr;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020 Mellanox Technologies */
#ifndef __MLX5E_EN_MOD_HDR_H__
#define __MLX5E_EN_MOD_HDR_H__
#include <linux/hashtable.h>
#include <linux/mlx5/fs.h>
struct mlx5e_mod_hdr_handle;
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
void *actions;
};
struct mlx5e_mod_hdr_handle *
mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
enum mlx5_flow_namespace_type namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
struct mod_hdr_tbl *tbl,
struct mlx5e_mod_hdr_handle *mh);
struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
#endif /* __MLX5E_EN_MOD_HDR_H__ */
......@@ -594,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct mlx5e_tc_update_priv *tc_priv)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct tc_skb_ext *tc_skb_ext;
......@@ -631,11 +631,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
tuple_id = reg_c1 & TUPLE_ID_MAX;
zone_restore_id = reg_c1 & ZONE_RESTORE_MAX;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
zone_restore_id))
return false;
}
......
......@@ -67,16 +67,17 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_5),\
}
#define tupleid_to_reg_ct {\
#define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\
.mlen = 3,\
.mlen = 1,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_1),\
misc_parameters_2.metadata_reg_c_1) + 3,\
}
#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
......@@ -91,6 +92,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
struct netlink_ext_ack *extack);
int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
const struct flow_action_entry *act,
......@@ -109,7 +113,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
bool
mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
struct sk_buff *skb, u32 tupleid);
struct sk_buff *skb, u8 zone_restore_id);
#else /* CONFIG_MLX5_TC_CT */
......@@ -140,6 +144,13 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
static inline int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec)
{
return 0;
}
static inline int
mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr,
......@@ -170,10 +181,10 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
static inline bool
mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
struct sk_buff *skb, u32 tupleid)
struct sk_buff *skb, u8 zone_restore_id)
{
if (!tupleid)
return true;
if (!zone_restore_id)
return true;
return false;
}
......
......@@ -70,9 +70,9 @@ struct tunnel_match_enc_opts {
* Upper TUNNEL_INFO_BITS for general tunnel info.
* Lower ENC_OPTS_BITS bits for enc_opts.
*/
#define TUNNEL_INFO_BITS 6
#define TUNNEL_INFO_BITS 12
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
#define ENC_OPTS_BITS 2
#define ENC_OPTS_BITS 12
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
......@@ -129,10 +129,10 @@ enum mlx5e_tc_attr_to_reg {
TUNNEL_TO_REG,
CTSTATE_TO_REG,
ZONE_TO_REG,
ZONE_RESTORE_TO_REG,
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
TUPLEID_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
......@@ -148,12 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev);
struct mlx5e_tc_mod_hdr_acts {
int num_actions;
int max_actions;
void *actions;
};
int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5e_tc_attr_to_reg type,
......@@ -164,6 +158,11 @@ void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
u32 data,
u32 mask);
void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 *data,
u32 *mask);
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
int namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
......
......@@ -42,6 +42,7 @@
#include "fs_core.h"
#include "devlink.h"
#include "ecpf.h"
#include "en/mod_hdr.h"
enum {
MLX5_ACTION_NONE = 0,
......@@ -69,7 +70,7 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
return -EOPNOTSUPP;
if (!MLX5_ESWITCH_MANAGER(dev))
return -EPERM;
return -EOPNOTSUPP;
return 0;
}
......@@ -1748,10 +1749,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
mutex_init(&esw->offloads.mod_hdr.lock);
hash_init(esw->offloads.mod_hdr.hlist);
mutex_init(&esw->offloads.decap_tbl_lock);
hash_init(esw->offloads.decap_tbl);
mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
atomic64_set(&esw->offloads.num_flows, 0);
ida_init(&esw->offloads.vport_metadata_ida);
mutex_init(&esw->state_lock);
......@@ -1793,7 +1793,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
ida_destroy(&esw->offloads.vport_metadata_ida);
mutex_destroy(&esw->offloads.mod_hdr.lock);
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
kfree(esw->vports);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment