Commit ff254dad authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-04-19

This patchset provides some updates to mlx5e and mlx5 SW steering drivers:

1) Tariq and Vladyslav they both provide some trivial update to mlx5e netdev.

The next 12 patches in the patchset are focused toward mlx5 SW steering:
2) 3 trivial cleanup patches

3) Dynamic Flex parser support:
   Flex parser is a HW parser that can support protocols that are not
    natively supported by the HCA, such as Geneve (TLV options) and GTP-U.
    There are 8 such parsers, and each of them can be assigned to parse a
    specific set of protocols.

4) Enable matching on Geneve TLV options

5) Use Flex parser for MPLS over UDP/GRE

6) Enable matching on tunnel GTP-U and GTP-U first extension
   header using

7) Improved QoS for SW steering internal QPair for a better insertion rate
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 316bcffe aeacb52a
...@@ -55,12 +55,17 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) ...@@ -55,12 +55,17 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{ {
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
if (dl_port->registered)
devlink_port_unregister(dl_port); devlink_port_unregister(dl_port);
} }
struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct devlink_port *port;
return mlx5e_devlink_get_dl_port(priv); port = mlx5e_devlink_get_dl_port(priv);
if (port->registered)
return port;
return NULL;
} }
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "en/port.h" #include "en/port.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "fpga/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params, static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk) struct mlx5e_xsk_param *xsk)
...@@ -89,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params, ...@@ -89,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
return !params->lro_en && linear_frag_sz <= PAGE_SIZE; return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
} }
#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
MLX5_MPWQE_LOG_STRIDE_SZ_BASE) u8 log_stride_sz, u8 log_num_strides)
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{ {
u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk); if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
s8 signed_log_num_strides_param; return false;
u8 log_num_strides;
if (!mlx5e_rx_is_linear_skb(params, xsk)) if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
return false; return false;
if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ) if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
return false; return false;
if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
return true; return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
}
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
s8 log_num_strides;
u8 log_stride_sz;
if (!mlx5e_rx_is_linear_skb(params, xsk))
return false;
log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
signed_log_num_strides_param = log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
(s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
return signed_log_num_strides_param >= 0; return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
} }
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params, u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
...@@ -282,7 +292,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, ...@@ -282,7 +292,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false; return false;
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
return false; return false;
if (params->xdp_prog) { if (params->xdp_prog) {
...@@ -364,7 +374,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, ...@@ -364,7 +374,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0; u32 buf_size = 0;
int i; int i;
if (MLX5_IPSEC_DEV(mdev)) if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN; byte_count += MLX5E_METADATA_ETHER_LEN;
if (mlx5e_rx_is_linear_skb(params, xsk)) { if (mlx5e_rx_is_linear_skb(params, xsk)) {
...@@ -461,7 +471,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, ...@@ -461,7 +471,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
} }
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
u16 q_counter, u16 q_counter,
...@@ -472,15 +482,25 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, ...@@ -472,15 +482,25 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
int ndsegs = 1; int ndsegs = 1;
switch (params->rq_wq_type) { switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
log_wqe_num_of_strides)) {
mlx5_core_err(mdev,
"Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
log_wqe_stride_size, log_wqe_num_of_strides);
return -EINVAL;
}
MLX5_SET(wq, wq, log_wqe_num_of_strides, MLX5_SET(wq, wq, log_wqe_num_of_strides,
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) - log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
MLX5_SET(wq, wq, log_wqe_stride_size, MLX5_SET(wq, wq, log_wqe_stride_size,
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) - log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
break; break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info); mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
...@@ -498,6 +518,8 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, ...@@ -498,6 +518,8 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp); mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);
return 0;
} }
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
...@@ -642,14 +664,17 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, ...@@ -642,14 +664,17 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_tx_cq_param(mdev, params, &param->cqp); mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
} }
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 q_counter, u16 q_counter,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
u8 icosq_log_wq_sz, async_icosq_log_wq_sz; u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq); err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
if (err)
return err;
icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev); async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
...@@ -658,4 +683,6 @@ void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, ...@@ -658,4 +683,6 @@ void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq); mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq); mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq); mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
return 0;
} }
...@@ -96,6 +96,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *para ...@@ -96,6 +96,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *para
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk); struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
...@@ -122,7 +124,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, ...@@ -122,7 +124,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */ /* Build queue parameters */
void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c); void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev, int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
u16 q_counter, u16 q_counter,
...@@ -141,7 +143,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev, ...@@ -141,7 +143,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param); struct mlx5e_sq_param *param);
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev, int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
u16 q_counter, u16 q_counter,
struct mlx5e_channel_param *cparam); struct mlx5e_channel_param *cparam);
......
...@@ -2086,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, ...@@ -2086,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam) if (!chs->c || !cparam)
goto err_free; goto err_free;
mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam); err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
if (err)
goto err_free;
for (i = 0; i < chs->num; i++) { for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL; struct xsk_buff_pool *xsk_pool = NULL;
...@@ -4886,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4886,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev) struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct devlink_port *dl_port;
int err; int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
...@@ -4901,6 +4905,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4901,6 +4905,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err) if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
dl_port = mlx5e_devlink_get_dl_port(priv);
if (dl_port->registered)
mlx5e_health_create_reporters(priv); mlx5e_health_create_reporters(priv);
return 0; return 0;
...@@ -4908,6 +4914,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -4908,6 +4914,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{ {
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
if (dl_port->registered)
mlx5e_health_destroy_reporters(priv); mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv); mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv); mlx5e_ipsec_cleanup(priv);
......
...@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev, ...@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
ft_attr.autogroup.max_num_groups = 1; ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) { if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table\n"); esw_warn(dev, "Failed to create termination table (error %d)\n",
IS_ERR(tt->termtbl));
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act, tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1); &tt->dest, 1);
if (IS_ERR(tt->rule)) { if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule\n"); esw_warn(dev, "Failed to create termination table rule (error %d)\n",
IS_ERR(tt->rule));
goto add_flow_err; goto add_flow_err;
} }
return 0; return 0;
...@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw, ...@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
memcpy(&tt->flow_act, flow_act, sizeof(*flow_act)); memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));
err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act); err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
if (err) { if (err)
esw_warn(esw->dev, "Failed to create termination table\n");
goto tt_create_err; goto tt_create_err;
}
hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key); hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref: tt_add_ref:
tt->ref_count++; tt->ref_count++;
...@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, ...@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act, tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr); &dest[i], attr);
if (IS_ERR(tt)) { if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to create termination table\n"); esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
IS_ERR(tt));
goto revert_changes; goto revert_changes;
} }
attr->dests[num_vport_dests].termtbl = tt; attr->dests[num_vport_dests].termtbl = tt;
......
...@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, ...@@ -85,15 +85,53 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
u16 vport, bool *roce_en)
{
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
int err;
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*roce_en = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.roce_en);
return 0;
}
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps) struct mlx5dr_cmd_caps *caps)
{ {
bool roce_en;
int err;
caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
if (err)
return err;
caps->roce_caps.roce_en = roce_en;
caps->roce_caps.fl_rc_qp_when_roce_disabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
}
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
...@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -106,6 +144,34 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1); MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
} }
if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
caps->flex_parser_id_geneve_tlv_option_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
caps->flex_parser_id_mpls_over_gre =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
if (caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
caps->flex_parser_id_mpls_over_udp =
MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
caps->flex_parser_id_gtpu_dw_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
caps->flex_parser_id_gtpu_teid =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
caps->flex_parser_id_gtpu_dw_2 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
caps->flex_parser_id_gtpu_first_ext_dw_0 =
MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
caps->nic_rx_drop_address = caps->nic_rx_drop_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address); MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
caps->nic_tx_drop_address = caps->nic_tx_drop_address =
......
...@@ -92,15 +92,17 @@ static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc) ...@@ -92,15 +92,17 @@ static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
misc->gre_k_present || misc->gre_s_present); misc->gre_k_present || misc->gre_s_present);
} }
#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET(_misc2, gre_udp) ( \ #define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc2).outer_first_mpls_over_##gre_udp##_label || \ (_misc)->outer_first_mpls_over_gre_label || \
(_misc2).outer_first_mpls_over_##gre_udp##_exp || \ (_misc)->outer_first_mpls_over_gre_exp || \
(_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \ (_misc)->outer_first_mpls_over_gre_s_bos || \
(_misc2).outer_first_mpls_over_##gre_udp##_ttl) (_misc)->outer_first_mpls_over_gre_ttl)
#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \ #define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ (_misc)->outer_first_mpls_over_udp_label || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) (_misc)->outer_first_mpls_over_udp_exp || \
(_misc)->outer_first_mpls_over_udp_s_bos || \
(_misc)->outer_first_mpls_over_udp_ttl)
static bool static bool
dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
...@@ -133,6 +135,11 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc) ...@@ -133,6 +135,11 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
misc->geneve_opt_len; misc->geneve_opt_len;
} }
static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
{
return misc3->geneve_tlv_option_0_data;
}
static bool static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{ {
...@@ -148,6 +155,109 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask, ...@@ -148,6 +155,109 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
dr_matcher_supp_tnl_geneve(&dmn->info.caps); dr_matcher_supp_tnl_geneve(&dmn->info.caps);
} }
static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
{
return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
}
static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
}
static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
}
static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
}
static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return mask->misc3.gtpu_dw_0 &&
dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
}
static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
}
static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return mask->misc3.gtpu_teid &&
dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
}
static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
}
static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return mask->misc3.gtpu_dw_2 &&
dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
}
static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
}
static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return mask->misc3.gtpu_first_ext_dw_0 &&
dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
}
static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
(dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
(dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
(dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
}
static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
(dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
(dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
(dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
}
static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
dr_mask_is_tnl_gtpu(mask, dmn);
}
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{ {
return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) || return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
...@@ -199,6 +309,65 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc) ...@@ -199,6 +309,65 @@ static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
return (misc->source_sqn || misc->source_port); return (misc->source_sqn || misc->source_port);
} }
static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
u32 flex_parser_value)
{
if (flex_parser_id)
return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
/* Using flex_parser 0 means that id is zero, thus value must be set. */
return flex_parser_value;
}
static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
{
return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
misc4->prog_sample_field_value_0) ||
dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
misc4->prog_sample_field_value_1) ||
dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
misc4->prog_sample_field_value_2) ||
dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
misc4->prog_sample_field_value_3));
}
static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
{
return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
}
static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
{
return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
}
static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
}
static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
}
static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
{
return caps->flex_protocols & mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
}
static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn)
{
return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
}
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv, enum mlx5dr_ipv outer_ipv,
...@@ -251,6 +420,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -251,6 +420,9 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3) if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
mask.misc3 = matcher->mask.misc3; mask.misc3 = matcher->mask.misc3;
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
mask.misc4 = matcher->mask.misc4;
ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
&matcher->mask, NULL); &matcher->mask, NULL);
if (ret) if (ret)
...@@ -321,9 +493,28 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -321,9 +493,28 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++], mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn)) else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++], mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (dr_mask_is_tnl_gtpu(&mask, dmn))
mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++], mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
...@@ -333,17 +524,20 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -333,17 +524,20 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, &dmn->info.caps,
inner, rx);
else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) { if (dr_mask_is_icmp(&mask, dmn))
ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++], mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps, &mask, &dmn->info.caps,
inner, rx); inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc)) if (dr_mask_is_tnl_gre_set(&mask.misc))
mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++], mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
...@@ -404,10 +598,26 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -404,10 +598,26 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++], mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++], mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
&mask, inner, rx); &mask, &dmn->info.caps,
inner, rx);
else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
}
if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
&mask, false, rx);
if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
&mask, false, rx);
} }
/* Empty matcher, takes all */ /* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
......
...@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -952,6 +952,17 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false; return false;
} }
} }
if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
s_idx = offsetof(struct mlx5dr_match_param, misc4);
e_idx = min(s_idx + sizeof(param->misc4), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_err(matcher->tbl->dmn,
"Rule misc4 parameters contains a value not specified by mask\n");
return false;
}
}
return true; return true;
} }
......
...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr { ...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr {
u8 min_rnr_timer; u8 min_rnr_timer;
u8 sgid_index; u8 sgid_index;
u16 udp_src_port; u16 udp_src_port;
u8 fl:1;
}; };
struct dr_qp_rts_attr { struct dr_qp_rts_attr {
...@@ -45,6 +46,7 @@ struct dr_qp_init_attr { ...@@ -45,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn; u32 pdn;
u32 max_send_wr; u32 max_send_wr;
struct mlx5_uars_page *uar; struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
}; };
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64) static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
...@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, ...@@ -157,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn); MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index); MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size, MLX5_SET(qpc, qpc, log_page_size,
...@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev, ...@@ -213,7 +216,7 @@ static void dr_destroy_qp(struct mlx5_core_dev *mdev,
static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
{ {
dma_wmb(); dma_wmb();
*dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
/* After wmb() the hw aware of new work */ /* After wmb() the hw aware of new work */
wmb(); wmb();
...@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl) ...@@ -223,7 +226,7 @@ static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
u32 rkey, struct dr_data_seg *data_seg, u32 rkey, struct dr_data_seg *data_seg,
u32 opcode, int nreq) u32 opcode, bool notify_hw)
{ {
struct mlx5_wqe_raddr_seg *wq_raddr; struct mlx5_wqe_raddr_seg *wq_raddr;
struct mlx5_wqe_ctrl_seg *wq_ctrl; struct mlx5_wqe_ctrl_seg *wq_ctrl;
...@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr, ...@@ -255,16 +258,16 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++;
if (nreq) if (notify_hw)
dr_cmd_notify_hw(dr_qp, wq_ctrl); dr_cmd_notify_hw(dr_qp, wq_ctrl);
} }
static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info) static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
{ {
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
&send_info->write, MLX5_OPCODE_RDMA_WRITE, 0); &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey, dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
&send_info->read, MLX5_OPCODE_RDMA_READ, 1); &send_info->read, MLX5_OPCODE_RDMA_READ, true);
} }
/** /**
...@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -650,6 +653,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
attr->udp_src_port); attr->udp_src_port);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
MLX5_SET(qpc, qpc, min_rnr_nak, 1); MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
...@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -658,6 +662,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
} }
static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
{
/* Check whether RC RoCE QP creation with force loopback is allowed.
* There are two separate capability bits for this:
* - force loopback when RoCE is enabled
* - force loopback when RoCE is disabled
*/
return ((caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
(!caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_disabled));
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
{ {
struct mlx5dr_qp *dr_qp = dmn->send_ring->qp; struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
...@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -676,17 +693,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
} }
/* RTR */ /* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.mtu = mtu; rtr_attr.mtu = mtu;
rtr_attr.qp_num = dr_qp->qpn; rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12; rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port; rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
/* If QP creation with force loopback is allowed, then there
* is no need for GID index when creating the QP.
* Otherwise we query GID attributes and use GID index.
*/
rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
if (!rtr_attr.fl) {
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
&rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.sgid_index = gid_index;
}
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n"); mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
...@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) ...@@ -900,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn; init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar; init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE; init_attr.max_send_wr = QUEUE_SIZE;
/* Isolated VL is applicable only if force loopback is supported */
if (dr_send_allow_fl(&dmn->info.caps))
init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
spin_lock_init(&dmn->send_ring->lock); spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
......
...@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) ...@@ -852,6 +852,35 @@ static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
spec->geneve_tlv_option_0_data =
MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
spec->gtpu_first_ext_dw_0 =
MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
}
static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
{
spec->prog_sample_field_id_0 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
spec->prog_sample_field_value_0 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
spec->prog_sample_field_id_1 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
spec->prog_sample_field_value_1 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
spec->prog_sample_field_id_2 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
spec->prog_sample_field_value_2 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
spec->prog_sample_field_id_3 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
spec->prog_sample_field_value_3 =
MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
} }
void mlx5dr_ste_copy_param(u8 match_criteria, void mlx5dr_ste_copy_param(u8 match_criteria,
...@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria, ...@@ -925,6 +954,20 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} }
dr_ste_copy_mask_misc3(buff, &set_param->misc3); dr_ste_copy_mask_misc3(buff, &set_param->misc3);
} }
param_location += sizeof(struct mlx5dr_match_misc3);
if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
if (mask->match_sz < param_location +
sizeof(struct mlx5dr_match_misc4)) {
memcpy(tail_param, data + param_location,
mask->match_sz - param_location);
buff = tail_param;
} else {
buff = data + param_location;
}
dr_ste_copy_mask_misc4(buff, &set_param->misc4);
}
} }
void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
...@@ -1051,17 +1094,31 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1051,17 +1094,31 @@ void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_gre_init(sb, mask); ste_ctx->build_tnl_gre_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
}
void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx) bool inner, bool rx)
{ {
sb->rx = rx; sb->rx = rx;
sb->inner = inner; sb->inner = inner;
ste_ctx->build_tnl_mpls_init(sb, mask); sb->caps = caps;
return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
} }
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps, struct mlx5dr_cmd_caps *caps,
...@@ -1070,7 +1127,7 @@ int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1070,7 +1127,7 @@ int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
sb->rx = rx; sb->rx = rx;
sb->inner = inner; sb->inner = inner;
sb->caps = caps; sb->caps = caps;
return ste_ctx->build_icmp_init(sb, mask); ste_ctx->build_icmp_init(sb, mask);
} }
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
...@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1113,6 +1170,52 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_tnl_geneve_init(sb, mask); ste_ctx->build_tnl_geneve_init(sb, mask);
} }
void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
}
void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
sb->rx = rx;
sb->caps = caps;
sb->inner = inner;
ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
}
void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -1148,6 +1251,26 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
ste_ctx->build_src_gvmi_qpn_init(sb, mask); ste_ctx->build_src_gvmi_qpn_init(sb, mask);
} }
void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_flex_parser_0_init(sb, mask);
}
void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
sb->rx = rx;
sb->inner = inner;
ste_ctx->build_flex_parser_1_init(sb, mask);
}
static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
......
...@@ -62,6 +62,13 @@ ...@@ -62,6 +62,13 @@
in_out##_first_mpls_ttl); \ in_out##_first_mpls_ttl); \
} while (0) } while (0)
#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
u8 parser_id = (caps)->flex_parser_id_##fname; \
u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
*(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
(spec)->fname = 0;\
} while (0)
#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\ #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
(_misc)->outer_first_mpls_over_gre_label || \ (_misc)->outer_first_mpls_over_gre_label || \
(_misc)->outer_first_mpls_over_gre_exp || \ (_misc)->outer_first_mpls_over_gre_exp || \
...@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 { ...@@ -86,8 +93,22 @@ enum dr_ste_action_modify_type_l4 {
DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2, DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
}; };
enum {
HDR_MPLS_OFFSET_LABEL = 12,
HDR_MPLS_OFFSET_EXP = 9,
HDR_MPLS_OFFSET_S_BOS = 8,
HDR_MPLS_OFFSET_TTL = 0,
};
u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask); u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
static inline u8 *
dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
{
/* Calculate tag byte offset based on flex parser id */
return tag + 4 * (3 - (parser_id % 4));
}
#define DR_STE_CTX_BUILDER(fname) \ #define DR_STE_CTX_BUILDER(fname) \
((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
struct mlx5dr_match_param *mask)) struct mlx5dr_match_param *mask))
...@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx { ...@@ -106,14 +127,22 @@ struct mlx5dr_ste_ctx {
void DR_STE_CTX_BUILDER(mpls); void DR_STE_CTX_BUILDER(mpls);
void DR_STE_CTX_BUILDER(tnl_gre); void DR_STE_CTX_BUILDER(tnl_gre);
void DR_STE_CTX_BUILDER(tnl_mpls); void DR_STE_CTX_BUILDER(tnl_mpls);
int DR_STE_CTX_BUILDER(icmp); void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
void DR_STE_CTX_BUILDER(icmp);
void DR_STE_CTX_BUILDER(general_purpose); void DR_STE_CTX_BUILDER(general_purpose);
void DR_STE_CTX_BUILDER(eth_l4_misc); void DR_STE_CTX_BUILDER(eth_l4_misc);
void DR_STE_CTX_BUILDER(tnl_vxlan_gpe); void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
void DR_STE_CTX_BUILDER(tnl_geneve); void DR_STE_CTX_BUILDER(tnl_geneve);
void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
void DR_STE_CTX_BUILDER(register_0); void DR_STE_CTX_BUILDER(register_0);
void DR_STE_CTX_BUILDER(register_1); void DR_STE_CTX_BUILDER(register_1);
void DR_STE_CTX_BUILDER(src_gvmi_qpn); void DR_STE_CTX_BUILDER(src_gvmi_qpn);
void DR_STE_CTX_BUILDER(flex_parser_0);
void DR_STE_CTX_BUILDER(flex_parser_1);
void DR_STE_CTX_BUILDER(tnl_gtpu);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
/* Getters and Setters */ /* Getters and Setters */
void (*ste_init)(u8 *hw_ste_p, u16 lu_type, void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
......
...@@ -1248,32 +1248,29 @@ dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value, ...@@ -1248,32 +1248,29 @@ dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
u8 *tag) u8 *tag)
{ {
struct mlx5dr_match_misc2 *misc_2 = &value->misc2; struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
u32 mpls_hdr;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) { if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
misc_2, outer_first_mpls_over_gre_label); misc_2->outer_first_mpls_over_gre_label = 0;
mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, misc_2->outer_first_mpls_over_gre_exp = 0;
misc_2, outer_first_mpls_over_gre_exp); mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc_2->outer_first_mpls_over_gre_s_bos = 0;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
misc_2, outer_first_mpls_over_gre_s_bos); misc_2->outer_first_mpls_over_gre_ttl = 0;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2, outer_first_mpls_over_gre_ttl);
} else { } else {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label, mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
misc_2, outer_first_mpls_over_udp_label); misc_2->outer_first_mpls_over_udp_label = 0;
mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp, misc_2->outer_first_mpls_over_udp_exp = 0;
misc_2, outer_first_mpls_over_udp_exp); mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc_2->outer_first_mpls_over_udp_s_bos = 0;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos, mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
misc_2, outer_first_mpls_over_udp_s_bos); misc_2->outer_first_mpls_over_udp_ttl = 0;
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
misc_2, outer_first_mpls_over_udp_ttl);
} }
MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
return 0; return 0;
} }
...@@ -1288,6 +1285,91 @@ dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, ...@@ -1288,6 +1285,91 @@ dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag; sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
} }
static int
dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *parser_ptr;
u8 parser_id;
u32 mpls_hdr;
mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
misc2->outer_first_mpls_over_udp_label = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
misc2->outer_first_mpls_over_udp_exp = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc2->outer_first_mpls_over_udp_s_bos = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
misc2->outer_first_mpls_over_udp_ttl = 0;
parser_id = sb->caps->flex_parser_id_mpls_over_udp;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
return 0;
}
static void
dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
}
static int
dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *parser_ptr;
u8 parser_id;
u32 mpls_hdr;
mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
misc2->outer_first_mpls_over_gre_label = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
misc2->outer_first_mpls_over_gre_exp = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc2->outer_first_mpls_over_gre_s_bos = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
misc2->outer_first_mpls_over_gre_ttl = 0;
parser_id = sb->caps->flex_parser_id_mpls_over_gre;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
return 0;
}
static void
dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
}
#define ICMP_TYPE_OFFSET_FIRST_DW 24 #define ICMP_TYPE_OFFSET_FIRST_DW 24
#define ICMP_CODE_OFFSET_FIRST_DW 16 #define ICMP_CODE_OFFSET_FIRST_DW 16
...@@ -1300,9 +1382,11 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value, ...@@ -1300,9 +1382,11 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
u32 *icmp_header_data; u32 *icmp_header_data;
int dw0_location; int dw0_location;
int dw1_location; int dw1_location;
u8 *parser_ptr;
u8 *icmp_type; u8 *icmp_type;
u8 *icmp_code; u8 *icmp_code;
bool is_ipv4; bool is_ipv4;
u32 icmp_hdr;
is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) { if (is_ipv4) {
...@@ -1319,47 +1403,40 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value, ...@@ -1319,47 +1403,40 @@ dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
dw1_location = sb->caps->flex_parser_id_icmpv6_dw1; dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
} }
switch (dw0_location) { parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
case 4: icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
MLX5_SET(ste_flex_parser_1, tag, flex_parser_4, (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
(*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) | *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
(*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
*icmp_type = 0;
*icmp_code = 0; *icmp_code = 0;
break; *icmp_type = 0;
default:
return -EINVAL;
}
switch (dw1_location) { parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
case 5: *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
*icmp_header_data);
*icmp_header_data = 0; *icmp_header_data = 0;
break;
default:
return -EINVAL;
}
return 0; return 0;
} }
static int static void
dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb, dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask) struct mlx5dr_match_param *mask)
{ {
int ret; u8 parser_id;
bool is_ipv4;
ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask); dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
if (ret)
return ret;
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1; /* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
sb->caps->flex_parser_id_icmpv6_dw0;
sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag; sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
return 0;
} }
static int static int
...@@ -1595,6 +1672,185 @@ dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, ...@@ -1595,6 +1672,185 @@ dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag; sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
} }
static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
u32 *misc4_field_value,
bool *parser_is_used,
u8 *tag)
{
u32 id = *misc4_field_id;
u8 *parser_ptr;
if (parser_is_used[id])
return;
parser_is_used[id] = true;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
*misc4_field_id = 0;
*misc4_field_value = 0;
}
static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
&misc_4_mask->prog_sample_field_value_0,
parser_is_used, tag);
dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
&misc_4_mask->prog_sample_field_value_1,
parser_is_used, tag);
dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
&misc_4_mask->prog_sample_field_value_2,
parser_is_used, tag);
dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
&misc_4_mask->prog_sample_field_value_3,
parser_is_used, tag);
return 0;
}
static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
}
static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
}
static int
dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
misc3->geneve_tlv_option_0_data);
misc3->geneve_tlv_option_0_data = 0;
return 0;
}
static void
dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
}
static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
gtpu_msg_flags, misc3,
gtpu_msg_flags);
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
gtpu_msg_type, misc3,
gtpu_msg_type);
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
gtpu_teid, misc3,
gtpu_teid);
return 0;
}
static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
}
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
return 0;
}
static void
dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
}
static int
dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
return 0;
}
static void
dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v0 = { struct mlx5dr_ste_ctx ste_ctx_v0 = {
/* Builders */ /* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init, .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
...@@ -1609,14 +1865,22 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = { ...@@ -1609,14 +1865,22 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.build_mpls_init = &dr_ste_v0_build_mpls_init, .build_mpls_init = &dr_ste_v0_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init, .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init, .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
.build_tnl_mpls_over_udp_init = &dr_ste_v0_build_tnl_mpls_over_udp_init,
.build_tnl_mpls_over_gre_init = &dr_ste_v0_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v0_build_icmp_init, .build_icmp_init = &dr_ste_v0_build_icmp_init,
.build_general_purpose_init = &dr_ste_v0_build_general_purpose_init, .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init, .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init, .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v0_build_register_0_init, .build_register_0_init = &dr_ste_v0_build_register_0_init,
.build_register_1_init = &dr_ste_v0_build_register_1_init, .build_register_1_init = &dr_ste_v0_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init, .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v0_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v0_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
/* Getters and Setters */ /* Getters and Setters */
.ste_init = &dr_ste_v0_init, .ste_init = &dr_ste_v0_init,
......
...@@ -1306,6 +1306,88 @@ static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb, ...@@ -1306,6 +1306,88 @@ static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag; sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
} }
static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *parser_ptr;
u8 parser_id;
u32 mpls_hdr;
mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
misc2->outer_first_mpls_over_udp_label = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
misc2->outer_first_mpls_over_udp_exp = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc2->outer_first_mpls_over_udp_s_bos = 0;
mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
misc2->outer_first_mpls_over_udp_ttl = 0;
parser_id = sb->caps->flex_parser_id_mpls_over_udp;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
return 0;
}
static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
}
static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *parser_ptr;
u8 parser_id;
u32 mpls_hdr;
mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
misc2->outer_first_mpls_over_gre_label = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
misc2->outer_first_mpls_over_gre_exp = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
misc2->outer_first_mpls_over_gre_s_bos = 0;
mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
misc2->outer_first_mpls_over_gre_ttl = 0;
parser_id = sb->caps->flex_parser_id_mpls_over_gre;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
return 0;
}
static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
}
static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
u8 *tag) u8 *tag)
...@@ -1337,7 +1419,7 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value, ...@@ -1337,7 +1419,7 @@ static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
return 0; return 0;
} }
static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask) struct mlx5dr_match_param *mask)
{ {
dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask); dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
...@@ -1345,8 +1427,6 @@ static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb, ...@@ -1345,8 +1427,6 @@ static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O; sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask); sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag; sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
return 0;
} }
static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value, static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
...@@ -1571,6 +1651,179 @@ static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb, ...@@ -1571,6 +1651,179 @@ static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag; sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
} }
static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
u32 *misc4_field_value,
bool *parser_is_used,
u8 *tag)
{
u32 id = *misc4_field_id;
u8 *parser_ptr;
if (parser_is_used[id])
return;
parser_is_used[id] = true;
parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
*misc4_field_id = 0;
*misc4_field_value = 0;
}
static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
&misc_4_mask->prog_sample_field_value_0,
parser_is_used, tag);
dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
&misc_4_mask->prog_sample_field_value_1,
parser_is_used, tag);
dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
&misc_4_mask->prog_sample_field_value_2,
parser_is_used, tag);
dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
&misc_4_mask->prog_sample_field_value_3,
parser_is_used, tag);
return 0;
}
static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
}
static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
}
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
misc3->geneve_tlv_option_0_data);
misc3->geneve_tlv_option_0_data = 0;
return 0;
}
static void
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
/* STEs with lookup type FLEX_PARSER_{0/1} includes
* flex parsers_{0-3}/{4-7} respectively.
*/
sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
}
static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
return 0;
}
static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
}
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
return 0;
}
static void
dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
}
static int
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
uint8_t *tag)
{
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
return 0;
}
static void
dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask)
{
dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
}
struct mlx5dr_ste_ctx ste_ctx_v1 = { struct mlx5dr_ste_ctx ste_ctx_v1 = {
/* Builders */ /* Builders */
.build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init, .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
...@@ -1585,14 +1838,23 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { ...@@ -1585,14 +1838,23 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.build_mpls_init = &dr_ste_v1_build_mpls_init, .build_mpls_init = &dr_ste_v1_build_mpls_init,
.build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init, .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
.build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init, .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
.build_tnl_mpls_over_udp_init = &dr_ste_v1_build_tnl_mpls_over_udp_init,
.build_tnl_mpls_over_gre_init = &dr_ste_v1_build_tnl_mpls_over_gre_init,
.build_icmp_init = &dr_ste_v1_build_icmp_init, .build_icmp_init = &dr_ste_v1_build_icmp_init,
.build_general_purpose_init = &dr_ste_v1_build_general_purpose_init, .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
.build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init, .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
.build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init, .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
.build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init, .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
.build_tnl_geneve_tlv_opt_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
.build_register_0_init = &dr_ste_v1_build_register_0_init, .build_register_0_init = &dr_ste_v1_build_register_0_init,
.build_register_1_init = &dr_ste_v1_build_register_1_init, .build_register_1_init = &dr_ste_v1_build_register_1_init,
.build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init, .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
.build_flex_parser_0_init = &dr_ste_v1_build_flex_parser_0_init,
.build_flex_parser_1_init = &dr_ste_v1_build_flex_parser_1_init,
.build_tnl_gtpu_init = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
/* Getters and Setters */ /* Getters and Setters */
.ste_init = &dr_ste_v1_init, .ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type, .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
......
...@@ -12,17 +12,30 @@ ...@@ -12,17 +12,30 @@
#include "mlx5_ifc_dr.h" #include "mlx5_ifc_dr.h"
#include "mlx5dr.h" #include "mlx5dr.h"
#define DR_RULE_MAX_STES 17 #define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5 #define DR_ACTION_MAX_STES 5
#define WIRE_PORT 0xFFFF #define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1 #define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2 #define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
#define DR_NUM_OF_FLEX_PARSERS 8
#define DR_STE_MAX_FLEX_0_ID 3
#define DR_STE_MAX_FLEX_1_ID 7
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg) #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
static inline bool dr_is_flex_parser_0_id(u8 parser_id)
{
return parser_id <= DR_STE_MAX_FLEX_0_ID;
}
static inline bool dr_is_flex_parser_1_id(u8 parser_id)
{
return parser_id > DR_STE_MAX_FLEX_0_ID;
}
enum mlx5dr_icm_chunk_size { enum mlx5dr_icm_chunk_size {
DR_CHUNK_SIZE_1, DR_CHUNK_SIZE_1,
DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */ DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
...@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria { ...@@ -87,7 +100,8 @@ enum mlx5dr_matcher_criteria {
DR_MATCHER_CRITERIA_INNER = 1 << 2, DR_MATCHER_CRITERIA_INNER = 1 << 2,
DR_MATCHER_CRITERIA_MISC2 = 1 << 3, DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
DR_MATCHER_CRITERIA_MISC3 = 1 << 4, DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
DR_MATCHER_CRITERIA_MAX = 1 << 5, DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
DR_MATCHER_CRITERIA_MAX = 1 << 6,
}; };
enum mlx5dr_action_type { enum mlx5dr_action_type {
...@@ -389,7 +403,17 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -389,7 +403,17 @@ void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps, struct mlx5dr_cmd_caps *caps,
...@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -402,6 +426,25 @@ void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
...@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -419,6 +462,14 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn, struct mlx5dr_domain *dmn,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx); void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */ /* Actions utils */
...@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 { ...@@ -646,7 +697,24 @@ struct mlx5dr_match_misc3 {
u8 icmpv6_type; u8 icmpv6_type;
u8 icmpv4_code; u8 icmpv4_code;
u8 icmpv4_type; u8 icmpv4_type;
u8 reserved_auto3[0x1c]; u32 geneve_tlv_option_0_data;
u8 gtpu_msg_flags;
u8 gtpu_msg_type;
u32 gtpu_teid;
u32 gtpu_dw_2;
u32 gtpu_first_ext_dw_0;
u32 gtpu_dw_0;
};
struct mlx5dr_match_misc4 {
u32 prog_sample_field_value_0;
u32 prog_sample_field_id_0;
u32 prog_sample_field_value_1;
u32 prog_sample_field_id_1;
u32 prog_sample_field_value_2;
u32 prog_sample_field_id_2;
u32 prog_sample_field_value_3;
u32 prog_sample_field_id_3;
}; };
struct mlx5dr_match_param { struct mlx5dr_match_param {
...@@ -655,6 +723,7 @@ struct mlx5dr_match_param { ...@@ -655,6 +723,7 @@ struct mlx5dr_match_param {
struct mlx5dr_match_spec inner; struct mlx5dr_match_spec inner;
struct mlx5dr_match_misc2 misc2; struct mlx5dr_match_misc2 misc2;
struct mlx5dr_match_misc3 misc3; struct mlx5dr_match_misc3 misc3;
struct mlx5dr_match_misc4 misc4;
}; };
#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
...@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap { ...@@ -678,6 +747,12 @@ struct mlx5dr_cmd_vport_cap {
u32 num; u32 num;
}; };
struct mlx5dr_roce_cap {
u8 roce_en:1;
u8 fl_rc_qp_when_roce_disabled:1;
u8 fl_rc_qp_when_roce_enabled:1;
};
struct mlx5dr_cmd_caps { struct mlx5dr_cmd_caps {
u16 gvmi; u16 gvmi;
u64 nic_rx_drop_address; u64 nic_rx_drop_address;
...@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps { ...@@ -692,6 +767,13 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_icmp_dw1; u8 flex_parser_id_icmp_dw1;
u8 flex_parser_id_icmpv6_dw0; u8 flex_parser_id_icmpv6_dw0;
u8 flex_parser_id_icmpv6_dw1; u8 flex_parser_id_icmpv6_dw1;
u8 flex_parser_id_geneve_tlv_option_0;
u8 flex_parser_id_mpls_over_gre;
u8 flex_parser_id_mpls_over_udp;
u8 flex_parser_id_gtpu_dw_0;
u8 flex_parser_id_gtpu_teid;
u8 flex_parser_id_gtpu_dw_2;
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level; u8 max_ft_level;
u16 roce_min_src_udp; u16 roce_min_src_udp;
u8 num_esw_ports; u8 num_esw_ports;
...@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps { ...@@ -707,6 +789,8 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_esw_caps esw_caps; struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps; struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required; bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
u8 isolate_vl_tc:1;
}; };
struct mlx5dr_domain_rx_tx { struct mlx5dr_domain_rx_tx {
...@@ -1081,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr { ...@@ -1081,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr {
u32 sq_wqe_cnt; u32 sq_wqe_cnt;
u32 rq_wqe_cnt; u32 rq_wqe_cnt;
u32 rq_wqe_shift; u32 rq_wqe_shift;
u8 isolate_vl_tc:1;
}; };
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
......
...@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits { ...@@ -434,10 +434,7 @@ struct mlx5_ifc_ste_gre_bits {
}; };
struct mlx5_ifc_ste_flex_parser_0_bits { struct mlx5_ifc_ste_flex_parser_0_bits {
u8 parser_3_label[0x14]; u8 flex_parser_3[0x20];
u8 parser_3_exp[0x3];
u8 parser_3_s_bos[0x1];
u8 parser_3_ttl[0x8];
u8 flex_parser_2[0x20]; u8 flex_parser_2[0x20];
...@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits { ...@@ -488,6 +485,17 @@ struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
u8 reserved_at_40[0x40]; u8 reserved_at_40[0x40];
}; };
struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
u8 reserved_at_0[0x5];
u8 gtpu_msg_flags[0x3];
u8 gtpu_msg_type[0x8];
u8 reserved_at_10[0x10];
u8 gtpu_teid[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_ste_general_purpose_bits { struct mlx5_ifc_ste_general_purpose_bits {
u8 general_purpose_lookup_field[0x20]; u8 general_purpose_lookup_field[0x20];
......
...@@ -911,8 +911,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) ...@@ -911,8 +911,11 @@ static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF; return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
} }
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9) #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6) #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
#define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
#define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
struct mpwrq_cqe_bc { struct mpwrq_cqe_bc {
__be16 filler_consumed_strides; __be16 filler_consumed_strides;
......
...@@ -622,7 +622,19 @@ struct mlx5_ifc_fte_match_set_misc3_bits { ...@@ -622,7 +622,19 @@ struct mlx5_ifc_fte_match_set_misc3_bits {
u8 geneve_tlv_option_0_data[0x20]; u8 geneve_tlv_option_0_data[0x20];
u8 reserved_at_140[0xc0]; u8 gtpu_teid[0x20];
u8 gtpu_msg_type[0x8];
u8 gtpu_msg_flags[0x8];
u8 reserved_at_170[0x10];
u8 gtpu_dw_2[0x20];
u8 gtpu_first_ext_dw_0[0x20];
u8 gtpu_dw_0[0x20];
u8 reserved_at_1e0[0x20];
}; };
struct mlx5_ifc_fte_match_set_misc4_bits { struct mlx5_ifc_fte_match_set_misc4_bits {
...@@ -949,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits { ...@@ -949,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1]; u8 roce_apm[0x1];
u8 reserved_at_1[0x3]; u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1]; u8 sw_r_roce_src_udp_port[0x1];
u8 reserved_at_5[0x19]; u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
u8 reserved_at_7[0x17];
u8 qp_ts_format[0x2]; u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60]; u8 reserved_at_20[0x60];
...@@ -1237,9 +1251,17 @@ enum { ...@@ -1237,9 +1251,17 @@ enum {
enum { enum {
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
MLX5_FLEX_PARSER_GTPU_ENABLED = 1 << 11,
MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED = 1 << 16,
MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED = 1 << 18,
MLX5_FLEX_PARSER_GTPU_TEID_ENABLED = 1 << 19,
}; };
enum { enum {
...@@ -1297,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1297,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8]; u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8]; u8 log_max_qp_sz[0x8];
u8 event_cap[0x1]; u8 event_cap[0x1];
u8 reserved_at_91[0x7]; u8 reserved_at_91[0x2];
u8 isolate_vl_tc_new[0x1];
u8 reserved_at_94[0x4];
u8 prio_tag_required[0x1]; u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2]; u8 reserved_at_99[0x2];
u8 log_max_qp[0x5]; u8 log_max_qp[0x5];
...@@ -1637,7 +1661,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1637,7 +1661,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cqe_compression_timeout[0x10]; u8 cqe_compression_timeout[0x10];
u8 cqe_compression_max_num[0x10]; u8 cqe_compression_max_num[0x10];
u8 reserved_at_5e0[0x10]; u8 reserved_at_5e0[0x8];
u8 flex_parser_id_gtpu_dw_0[0x4];
u8 reserved_at_5ec[0x4];
u8 tag_matching[0x1]; u8 tag_matching[0x1];
u8 rndv_offload_rc[0x1]; u8 rndv_offload_rc[0x1];
u8 rndv_offload_dc[0x1]; u8 rndv_offload_dc[0x1];
...@@ -1648,7 +1674,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1648,7 +1674,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 affiliate_nic_vport_criteria[0x8]; u8 affiliate_nic_vport_criteria[0x8];
u8 native_port_num[0x8]; u8 native_port_num[0x8];
u8 num_vhca_ports[0x8]; u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6]; u8 flex_parser_id_gtpu_teid[0x4];
u8 reserved_at_61c[0x2];
u8 sw_owner_id[0x1]; u8 sw_owner_id[0x1];
u8 reserved_at_61f[0x1]; u8 reserved_at_61f[0x1];
...@@ -1683,7 +1710,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1683,7 +1710,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_6e0[0x10]; u8 reserved_at_6e0[0x10];
u8 sf_base_id[0x10]; u8 sf_base_id[0x10];
u8 reserved_at_700[0x8]; u8 flex_parser_id_gtpu_dw_2[0x4];
u8 flex_parser_id_gtpu_first_ext_dw_0[0x4];
u8 num_total_dynamic_vf_msix[0x18]; u8 num_total_dynamic_vf_msix[0x18];
u8 reserved_at_720[0x14]; u8 reserved_at_720[0x14];
u8 dynamic_msix_table_size[0xc]; u8 dynamic_msix_table_size[0xc];
...@@ -2918,7 +2946,8 @@ struct mlx5_ifc_qpc_bits { ...@@ -2918,7 +2946,8 @@ struct mlx5_ifc_qpc_bits {
u8 state[0x4]; u8 state[0x4];
u8 lag_tx_port_affinity[0x4]; u8 lag_tx_port_affinity[0x4];
u8 st[0x8]; u8 st[0x8];
u8 reserved_at_10[0x3]; u8 reserved_at_10[0x2];
u8 isolate_vl_tc[0x1];
u8 pm_state[0x2]; u8 pm_state[0x2];
u8 reserved_at_15[0x1]; u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2]; u8 req_e2e_credit_mode[0x2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment