Commit aeacb52a authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Add support for isolate_vl_tc QP

When using SW steering, rule insertion rate depends on the RDMA RC QP
performance used for writing to the ICM. During stress this QP is competing
on the HW resources with all the other QPs that are used to send data.
To protect SW steering QP's performance in such cases, we set this QP to
use isolated VL. The VL number is reserved by FW and is not exposed to the
driver.
Support for this QP on isolated VL exists only when both force-loopback and
isolate_vl_tc capabilities are set.
Signed-off-by: default avatarAlex Vesker <valex@mellanox.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 7304d603
......@@ -130,6 +130,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
}
caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
......
......@@ -46,6 +46,7 @@ struct dr_qp_init_attr {
u32 pdn;
u32 max_send_wr;
struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
};
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
......@@ -158,6 +159,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
MLX5_SET(qpc, qpc, pd, attr->pdn);
MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
MLX5_SET(qpc, qpc, log_page_size,
......@@ -924,6 +926,11 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
init_attr.pdn = dmn->pdn;
init_attr.uar = dmn->uar;
init_attr.max_send_wr = QUEUE_SIZE;
/* Isolated VL is applicable only if force loopback is supported */
if (dr_send_allow_fl(&dmn->info.caps))
init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
spin_lock_init(&dmn->send_ring->lock);
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
......
......@@ -790,6 +790,7 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
u8 isolate_vl_tc:1;
};
struct mlx5dr_domain_rx_tx {
......@@ -1164,6 +1165,7 @@ struct mlx5dr_cmd_qp_create_attr {
u32 sq_wqe_cnt;
u32 rq_wqe_cnt;
u32 rq_wqe_shift;
u8 isolate_vl_tc:1;
};
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
......
......@@ -1319,7 +1319,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
u8 event_cap[0x1];
u8 reserved_at_91[0x7];
u8 reserved_at_91[0x2];
u8 isolate_vl_tc_new[0x1];
u8 reserved_at_94[0x4];
u8 prio_tag_required[0x1];
u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment