Commit d44a919a authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-07-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-07-16

Fixes:
1) Fix build break when CONFIG_XPS is not set
2) Fix missing switch_id for representors

Updates:
1) IPsec XFRM RX offloads from Raed and Huy.
  - Added IPSec RX steering flow tables to NIC RX
  - Refactoring of the existing FPGA IPSec, to add support
    for ConnectX IPsec.
  - RX data path handling for IPSec traffic
  - Synchronize offloading device ESN with xfrm received SN

2) Parav allows E-Switch to siwtch to switchdev mode directly without
   the need to go through legacy mode first.

3) From Tariq, Misc updates including:
   3.1) indirect calls for RX and XDP handlers
   3.2) Make MLX5_EN_TLS non-prompt as it should always be enabled when
        TLS and MLX5_EN are selected.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 721dab2b 54b154ec
......@@ -134,12 +134,25 @@ config MLX5_FPGA_IPSEC
mlx5_core driver will include the Innova FPGA core and allow building
sandbox-specific client drivers.
config MLX5_IPSEC
bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
select MLX5_ACCEL
default n
help
Build IPsec support for the Connect-X family of network cards by Mellanox
Technologies.
Note: If you select this option, the mlx5_core driver will include
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC
bool "IPSec XFRM cryptography-offload accelaration"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
default n
help
Build support for IPsec cryptography-offload accelaration in the NIC.
......@@ -150,7 +163,10 @@ config MLX5_FPGA_TLS
bool "Mellanox Technologies TLS Innova support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
depends on XPS
select MLX5_EN_TLS
default n
help
Build TLS support for the Innova family of network cards by Mellanox
......@@ -161,21 +177,19 @@ config MLX5_FPGA_TLS
config MLX5_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on MLX5_CORE_EN
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on XPS
select MLX5_ACCEL
select MLX5_EN_TLS
default n
help
Build TLS support for the Connect-X family of network cards by Mellanox
Technologies.
config MLX5_EN_TLS
bool "TLS cryptography-offload accelaration"
depends on MLX5_CORE_EN
depends on XPS
depends on MLX5_FPGA_TLS || MLX5_TLS
default y
bool
help
Build support for TLS cryptography-offload accelaration in the NIC.
Note: Support for hardware with this capability needs to be selected
......
......@@ -64,6 +64,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
......@@ -71,7 +72,7 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
en_accel/ipsec_stats.o en_accel/ipsec_fs.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
......
......@@ -31,37 +31,88 @@
*
*/
#ifdef CONFIG_MLX5_FPGA_IPSEC
#include <linux/mlx5/device.h>
#include "accel/ipsec.h"
#include "mlx5_core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec_offload.h"
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops;
int err = 0;
ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
mlx5_ipsec_offload_ops(mdev) :
mlx5_fpga_ipsec_ops(mdev);
if (!ipsec_ops || !ipsec_ops->init) {
mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
return;
}
err = ipsec_ops->init(mdev);
if (err) {
mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
return;
}
mdev->ipsec_ops = ipsec_ops;
}
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->cleanup)
return;
ipsec_ops->cleanup(mdev);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_device_caps(mdev);
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->device_caps)
return 0;
return ipsec_ops->device_caps(mdev);
}
EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_counters_count(mdev);
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_count)
return -EOPNOTSUPP;
return ipsec_ops->counters_count(mdev);
}
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count)
{
return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->counters_read)
return -EOPNOTSUPP;
return ipsec_ops->counters_read(mdev, counters, count);
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
__be32 saddr[4] = {}, daddr[4] = {};
if (!ipsec_ops || !ipsec_ops->create_hw_context)
return ERR_PTR(-EOPNOTSUPP);
if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
......@@ -70,29 +121,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
daddr, xfrm->attrs.spi,
return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(void *context)
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
mlx5_fpga_ipsec_delete_sa_ctx(context);
}
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_init(mdev);
}
void mlx5_accel_ipsec_build_fs_cmds(void)
{
mlx5_fpga_ipsec_build_fs_cmds();
}
if (!ipsec_ops || !ipsec_ops->free_hw_context)
return;
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
ipsec_ops->free_hw_context(context);
}
struct mlx5_accel_esp_xfrm *
......@@ -100,9 +140,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
return ERR_PTR(-EOPNOTSUPP);
xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
......@@ -113,15 +157,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_fpga_esp_destroy_xfrm(xfrm);
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
return;
ipsec_ops->esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
return -EOPNOTSUPP;
return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
#endif
......@@ -37,7 +37,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
#ifdef CONFIG_MLX5_FPGA_IPSEC
#ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
......@@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(void *context);
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_build_fs_cmds(void);
void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
struct mlx5_accel_ipsec_ops {
u32 (*device_caps)(struct mlx5_core_dev *mdev);
unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
void* (*create_hw_context)(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle);
void (*free_hw_context)(void *context);
int (*init)(struct mlx5_core_dev *mdev);
void (*cleanup)(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
};
#else
#define MLX5_IPSEC_DEV(mdev) false
......@@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(void *context)
{
}
static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
static inline void mlx5_accel_ipsec_build_fs_cmds(void)
{
}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
}
static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
#endif
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5_ACCEL_IPSEC_H__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include "accel/ipsec.h"
#ifdef CONFIG_MLX5_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return false;
if (!MLX5_CAP_GEN(mdev, log_max_dek))
return false;
if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return false;
return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer);
}
#else
static inline const struct mlx5_accel_ipsec_ops *
mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
{
return false;
}
#endif /* CONFIG_MLX5_IPSEC */
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
......@@ -113,7 +113,9 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
return -EINVAL;
}
return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id);
return mlx5_create_encryption_key(mdev, key, sz_bytes,
MLX5_ACCEL_OBJ_TLS_KEY,
p_key_id);
}
void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
......
......@@ -129,7 +129,11 @@ enum {
MLX5E_ACCEL_FS_TCP_FT_LEVEL,
#endif
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL
MLX5E_ARFS_FT_LEVEL,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
#endif
};
......
......@@ -32,6 +32,9 @@
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
#define ct_dbg(fmt, args...)\
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
......@@ -48,6 +51,7 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
};
struct mlx5_ct_flow {
......@@ -404,6 +408,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
mlx5e_mod_hdr_detach(ct_priv->esw->dev,
&esw->offloads.mod_hdr, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
}
static void
......@@ -436,7 +441,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts,
u8 ct_state,
u32 mark,
u32 label,
u32 labels_id,
u8 zone_restore_id)
{
struct mlx5_eswitch *esw = ct_priv->esw;
......@@ -453,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
return err;
err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
LABELS_TO_REG, label);
LABELS_TO_REG, labels_id);
if (err)
return err;
......@@ -597,13 +602,10 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (!meta)
return -EOPNOTSUPP;
if (meta->ct_metadata.labels[1] ||
meta->ct_metadata.labels[2] ||
meta->ct_metadata.labels[3]) {
ct_dbg("Failed to offload ct entry due to unsupported label");
err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id);
if (err)
return -EOPNOTSUPP;
}
if (nat) {
err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
&mod_acts);
......@@ -617,7 +619,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
meta->ct_metadata.mark,
meta->ct_metadata.labels[0],
attr->ct_attr.ct_labels_id,
zone_restore_id);
if (err)
goto err_mapping;
......@@ -637,6 +639,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
err_mapping:
dealloc_mod_hdr_actions(&mod_acts);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
return err;
}
......@@ -959,6 +962,7 @@ int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
......@@ -969,6 +973,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask;
struct flow_match_ct match;
u32 ct_labels[4];
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
......@@ -995,12 +1000,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
NL_SET_ERR_MSG_MOD(extack,
"only lower 32bits of ct_labels are supported for offload");
return -EOPNOTSUPP;
}
ct_state_on = ct_state & ct_state_mask;
ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
......@@ -1029,10 +1028,17 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
if (mask->ct_mark)
mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
key->ct_mark, mask->ct_mark);
if (mask->ct_labels[0])
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG,
key->ct_labels[0],
mask->ct_labels[0]);
if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] ||
mask->ct_labels[3]) {
ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0];
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
}
return 0;
}
......@@ -1398,7 +1404,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + tuple + zone match +
* +--------------------+
* | set mark
* | set label
* | set labels_id
* | set established
* | set zone_restore
* | do nat (if needed)
......@@ -1789,7 +1795,13 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
err = PTR_ERR(ct_priv->zone_mapping);
goto err_mapping;
goto err_mapping_zone;
}
ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true);
if (IS_ERR(ct_priv->labels_mapping)) {
err = PTR_ERR(ct_priv->labels_mapping);
goto err_mapping_labels;
}
ct_priv->esw = esw;
......@@ -1833,8 +1845,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
err_ct_nat_tbl:
mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels:
mapping_destroy(ct_priv->zone_mapping);
err_mapping:
err_mapping_zone:
kfree(ct_priv);
err_alloc:
err_support:
......@@ -1854,6 +1868,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping);
rhashtable_destroy(&ct_priv->ct_tuples_ht);
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
......
......@@ -25,6 +25,7 @@ struct mlx5_ct_attr {
u16 ct_action;
struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
};
#define zone_to_reg_ct {\
......@@ -90,6 +91,7 @@ int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack);
int
mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
......@@ -132,6 +134,7 @@ static inline int
mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
......
......@@ -34,6 +34,7 @@
#include <net/xdp_sock_drv.h>
#include "en/xdp.h"
#include "en/params.h"
#include <linux/indirect_call_wrapper.h>
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
{
......@@ -114,7 +115,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.page.di = *di;
}
return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0);
return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
}
/* returns true if packet was consumed by xdp */
......@@ -237,7 +239,7 @@ enum {
MLX5E_XDP_CHECK_START_MPWQE = 2,
};
static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
......@@ -256,10 +258,9 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK;
}
static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
......@@ -293,7 +294,7 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
return true;
}
static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
{
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
/* SQ is full, ring doorbell */
......@@ -305,10 +306,9 @@ static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
return MLX5E_XDP_CHECK_OK;
}
static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result)
INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
......@@ -506,6 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
bool ret;
xdptxd.data = xdpf->data;
xdptxd.len = xdpf->len;
......@@ -522,7 +523,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = xdptxd.dma_addr;
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) {
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE);
xdp_return_frame_rx_napi(xdpf);
......
......@@ -32,6 +32,8 @@
#ifndef __MLX5_EN_XDP_H__
#define __MLX5_EN_XDP_H__
#include <linux/indirect_call_wrapper.h>
#include "en.h"
#include "en/txrx.h"
......@@ -70,6 +72,17 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
......
......@@ -6,6 +6,7 @@
#include "en/xdp.h"
#include "en/params.h"
#include <net/xdp_sock_drv.h>
#include <linux/indirect_call_wrapper.h>
int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
......@@ -75,8 +76,12 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
for (; budget; budget--) {
int check_result = sq->xmit_xdp_frame_check(sq);
int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check,
mlx5e_xmit_xdp_frame_check_mpwqe,
mlx5e_xmit_xdp_frame_check,
sq);
struct xdp_desc desc;
bool ret;
if (unlikely(check_result < 0)) {
work_done = false;
......@@ -98,7 +103,9 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
if (unlikely(!ret)) {
if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
......
......@@ -148,16 +148,6 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
return true;
}
static inline int mlx5e_accel_sk_get_rxq(struct sock *sk)
{
int rxq = sk_rx_queue_get(sk);
if (unlikely(rxq == -1))
rxq = 0;
return rxq;
}
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
return mlx5e_ktls_init_rx(priv);
......
......@@ -40,7 +40,7 @@
#include "en.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec_fs.h"
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
......@@ -111,7 +111,7 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct xfrm_replay_state_esn *replay_esn;
u32 seq_bottom;
u32 seq_bottom = 0;
u8 overlap;
u32 *esn;
......@@ -121,7 +121,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
}
replay_esn = sa_entry->x->replay_esn;
if (replay_esn->seq >= replay_esn->replay_window)
seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
overlap = sa_entry->esn_state.overlap;
sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
......@@ -282,6 +284,27 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return 0;
}
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return 0;
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
}
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!mlx5_is_ipsec_device(priv->mdev))
return;
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
......@@ -329,10 +352,15 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
goto err_xfrm;
}
sa_entry->ipsec_obj_id = sa_handle;
err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
if (err)
goto err_hw_ctx;
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
if (err)
goto err_hw_ctx;
goto err_add_rule;
} else {
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
......@@ -341,8 +369,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_add_rule:
mlx5e_xfrm_fs_del_rule(priv, sa_entry);
err_hw_ctx:
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sa_entry:
......@@ -366,13 +396,15 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
if (!sa_entry)
return;
if (sa_entry->hw_context) {
flush_workqueue(sa_entry->ipsec->wq);
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
mlx5e_xfrm_fs_del_rule(priv, sa_entry);
mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
......@@ -405,6 +437,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
kfree(ipsec);
return -ENOMEM;
}
mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
}
......@@ -416,6 +450,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
mlx5e_accel_ipsec_fs_cleanup(priv);
destroy_workqueue(ipsec->wq);
ida_destroy(&ipsec->halloc);
......
......@@ -75,6 +75,8 @@ struct mlx5e_ipsec_stats {
u64 ipsec_cmd_drop;
};
struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
......@@ -84,6 +86,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
};
struct mlx5e_ipsec_esn_state {
......@@ -92,6 +95,11 @@ struct mlx5e_ipsec_esn_state {
u8 overlap: 1;
};
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *set_modify_hdr;
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
......@@ -102,6 +110,8 @@ struct mlx5e_ipsec_sa_entry {
void *hw_context;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
u32 ipsec_obj_id;
struct mlx5e_ipsec_rule ipsec_rule;
};
void mlx5e_ipsec_build_inverse_table(void);
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_STEERING_H__
#define __MLX5_IPSEC_STEERING_H__
#include "en.h"
#include "ipsec.h"
#include "accel/ipsec_offload.h"
#include "en/fs.h"
#ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule);
#else
static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
#endif
#endif /* __MLX5_IPSEC_STEERING_H__ */
......@@ -360,6 +360,62 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
return skb;
}
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
};
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
struct mlx5e_priv *priv;
struct xfrm_offload *xo;
struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
priv = netdev_priv(netdev);
sp = secpath_set(skb);
if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return;
}
xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
if (unlikely(!xs)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return;
}
sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
if (WARN_ON_ONCE(priv->ipsec->no_trailer))
xo->flags |= XFRM_ESP_NO_TRAILER;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features)
{
......
......@@ -34,13 +34,17 @@
#ifndef __MLX5E_IPSEC_RXTX_H__
#define __MLX5E_IPSEC_RXTX_H__
#ifdef CONFIG_MLX5_EN_IPSEC
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
#include "en/txrx.h"
#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
#ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
......@@ -55,7 +59,21 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
}
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe)
{}
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
......@@ -547,6 +547,16 @@ void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
queue_work(rule->priv->tls->rx_wq, &rule->work);
}
static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
{
int rxq = sk_rx_queue_get(sk);
if (unlikely(rxq == -1))
rxq = 0;
return rxq;
}
int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct tls_crypto_info *crypto_info,
u32 start_offload_tcp_sn)
......@@ -573,7 +583,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
rxq = mlx5e_accel_sk_get_rxq(sk);
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
priv_rx->sk = sk;
......
......@@ -65,6 +65,7 @@
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
......@@ -231,7 +232,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
cseg->umr_mkey = rq->mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
......@@ -496,7 +496,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
if (c->priv->ipsec)
if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
c->priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
......
......@@ -1196,18 +1196,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn);
if (rep->vport == MLX5_VPORT_UPLINK) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pfnum;
memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
if (rep->vport == MLX5_VPORT_UPLINK)
devlink_port_attrs_set(&rpriv->dl_port, &attrs);
else if (rep->vport == MLX5_VPORT_PF)
} else if (rep->vport == MLX5_VPORT_PF) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum);
else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
pfnum, rep->vport - 1);
}
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
......
......@@ -973,9 +973,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
u8 ipproto = get_ip_proto(skb, network_depth, proto);
if (unlikely(ipproto == IPPROTO_SCTP))
goto csum_unnecessary;
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
goto csum_none;
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
......@@ -1021,6 +1026,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
......@@ -1258,7 +1266,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto free_wqe;
}
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
......
......@@ -4401,7 +4401,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
/* actions validation depends on parsing the ct matches first */
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
&flow->esw_attr->ct_attr, extack);
if (err)
goto err_free;
......
......@@ -1652,7 +1652,17 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
return 0;
mutex_lock(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
} else {
enum mlx5_eswitch_vport_event vport_events;
vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
if (!ret)
esw->esw_funcs.num_vfs = num_vfs;
}
mutex_unlock(&esw->mode_lock);
return ret;
}
......@@ -1699,6 +1709,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
mutex_lock(&esw->mode_lock);
mlx5_eswitch_disable_locked(esw, clear_vf);
esw->esw_funcs.num_vfs = 0;
mutex_unlock(&esw->mode_lock);
}
......
......@@ -513,16 +513,9 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
/* Ideally device should have the functions changed supported
* capability regardless of it being ECPF or PF wherever such
* event should be processed such as on eswitch manager device.
* However, some ECPF based device might not have this capability
* set. Hence OR for ECPF check to cover such device.
*/
return MLX5_CAP_ESW(dev, esw_functions_changed) ||
mlx5_core_is_ecpf_esw_manager(dev);
return mlx5_core_is_ecpf_esw_manager(dev);
}
static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
......
......@@ -1578,13 +1578,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
{
int err, err1;
if (esw->mode != MLX5_ESWITCH_LEGACY &&
!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set offloads mode, SRIOV legacy not enabled");
return -EINVAL;
}
mlx5_eswitch_disable_locked(esw, false);
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
esw->dev->priv.sriov.num_vfs);
......@@ -2293,7 +2286,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
{
u16 cur_mlx5_mode, mlx5_mode = 0;
struct mlx5_eswitch *esw;
int err;
int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
......@@ -2303,12 +2296,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return -EINVAL;
mutex_lock(&esw->mode_lock);
err = eswitch_devlink_esw_mode_check(esw);
if (err)
goto unlock;
cur_mlx5_mode = esw->mode;
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
......
......@@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
return ret;
}
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
......@@ -370,7 +370,7 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
number_of_ipsec_counters);
}
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
......@@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
return true;
}
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
u32 *sa_handle)
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *sa_handle)
{
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
......@@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
mutex_unlock(&fipsec->sa_hash_lock);
}
void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
......@@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo
}
}
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_conn_attr init_attr = {0};
struct mlx5_fpga_device *fdev = mdev->fpga;
......@@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root)
}
}
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
......@@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
return 0;
}
struct mlx5_accel_esp_xfrm *
static struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
......@@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
return &fpga_xfrm->accel_xfrm;
}
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_fpga_esp_xfrm *fpga_xfrm =
container_of(xfrm, struct mlx5_fpga_esp_xfrm,
......@@ -1488,7 +1486,7 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
kfree(fpga_xfrm);
}
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = xfrm->mdev;
......@@ -1560,3 +1558,24 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
mutex_unlock(&fpga_xfrm->lock);
return err;
}
static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
.device_caps = mlx5_fpga_ipsec_device_caps,
.counters_count = mlx5_fpga_ipsec_counters_count,
.counters_read = mlx5_fpga_ipsec_counters_read,
.create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
.free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
.init = mlx5_fpga_ipsec_init,
.cleanup = mlx5_fpga_ipsec_cleanup,
.esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
.esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
.esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
};
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{
if (!mlx5_fpga_is_ipsec_device(mdev))
return NULL;
return &fpga_ipsec_ops;
}
......@@ -38,44 +38,23 @@
#include "fs_cmd.h"
#ifdef CONFIG_MLX5_FPGA_IPSEC
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int counters_count);
void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6,
u32 *sa_handle);
void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
void mlx5_fpga_ipsec_build_fs_cmds(void);
struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags);
void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
#else
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
{ return NULL; }
static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
static inline const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
{
return mlx5_fs_cmd_get_default(type);
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
#endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */
......@@ -459,6 +459,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
......
......@@ -105,7 +105,7 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {aRFS/accel} */
/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 6
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
......
......@@ -250,6 +250,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
if (MLX5_CAP_GEN(dev, ipsec_offload)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
if (err)
return err;
}
return 0;
}
......
......@@ -6,7 +6,7 @@
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes,
u32 *p_key_id)
u32 key_type, u32 *p_key_id)
{
u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
......@@ -41,8 +41,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
memcpy(key_p, key, sz_bytes);
MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
MLX5_SET(encryption_key_obj, obj, key_type,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
MLX5_SET(encryption_key_obj, obj, key_type, key_type);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
......
......@@ -80,8 +80,14 @@ void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
/* Crypto */
enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
};
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes, u32 *p_key_id);
void *key, u32 sz_bytes,
u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
......
......@@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fpga_start;
}
err = mlx5_accel_ipsec_init(dev);
if (err) {
mlx5_core_err(dev, "IPSec device start failed %d\n", err);
goto err_ipsec_start;
}
mlx5_accel_ipsec_init(dev);
err = mlx5_accel_tls_init(dev);
if (err) {
......@@ -1135,7 +1131,6 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_accel_tls_cleanup(dev);
err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
......@@ -1628,7 +1623,7 @@ static int __init init(void)
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
mlx5_core_verify_params();
mlx5_accel_ipsec_build_fs_cmds();
mlx5_fpga_ipsec_build_fs_cmds();
mlx5_register_debugfs();
err = pci_register_driver(&mlx5_core_driver);
......
......@@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
};
#ifdef CONFIG_MLX5_FPGA_IPSEC
#ifdef CONFIG_MLX5_ACCEL
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
......@@ -152,5 +152,5 @@ static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
#endif
#endif
#endif /* CONFIG_MLX5_ACCEL */
#endif /* __MLX5_ACCEL_H__ */
......@@ -707,6 +707,9 @@ struct mlx5_core_dev {
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
#ifdef CONFIG_MLX5_ACCEL
const struct mlx5_accel_ipsec_ops *ipsec_ops;
#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
......
......@@ -207,7 +207,10 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
union {
u32 ipsec_obj_id;
uintptr_t esp_id;
};
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
......
......@@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reformat_and_fwd_to_table[0x1];
u8 reserved_at_1a[0x6];
u8 reserved_at_1a[0x2];
u8 ipsec_encrypt[0x1];
u8 ipsec_decrypt[0x1];
u8 reserved_at_1e[0x2];
u8 termination_table_raw_traffic[0x1];
u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
......@@ -2965,6 +2969,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
};
enum {
......@@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
u8 reserved_at_120[0xe0];
u8 ipsec_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
......@@ -5752,6 +5759,7 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment