Commit 7304d603 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Add support for force-loopback QP

When supported by the device, SW steering RoCE RC QP that is used to
write/read to/from ICM will be created with force-loopback attribute.
Such QP doesn't require GID index upon creation.
Signed-off-by: default avatarErez Shitrit <erezsh@mellanox.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent df9dd15a
...@@ -85,15 +85,51 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev, ...@@ -85,15 +85,51 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
return 0; return 0;
} }
static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
u16 vport, bool *roce_en)
{
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
int err;
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*roce_en = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.roce_en);
return 0;
}
int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
struct mlx5dr_cmd_caps *caps) struct mlx5dr_cmd_caps *caps)
{ {
bool roce_en;
int err;
caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required); caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
if (MLX5_CAP_GEN(mdev, roce)) {
err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
if (err)
return err;
caps->roce_caps.roce_en = roce_en;
caps->roce_caps.fl_rc_qp_when_roce_disabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
caps->roce_caps.fl_rc_qp_when_roce_enabled =
MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
}
if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
......
...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr { ...@@ -32,6 +32,7 @@ struct dr_qp_rtr_attr {
u8 min_rnr_timer; u8 min_rnr_timer;
u8 sgid_index; u8 sgid_index;
u16 udp_src_port; u16 udp_src_port;
u8 fl:1;
}; };
struct dr_qp_rts_attr { struct dr_qp_rts_attr {
...@@ -650,6 +651,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -650,6 +651,7 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
attr->udp_src_port); attr->udp_src_port);
MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num); MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
MLX5_SET(qpc, qpc, min_rnr_nak, 1); MLX5_SET(qpc, qpc, min_rnr_nak, 1);
MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
...@@ -658,6 +660,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev, ...@@ -658,6 +660,19 @@ static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, init2rtr_qp, in); return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
} }
static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
{
/* Check whether RC RoCE QP creation with force loopback is allowed.
* There are two separate capability bits for this:
* - force loopback when RoCE is enabled
* - force loopback when RoCE is disabled
*/
return ((caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
(!caps->roce_caps.roce_en &&
caps->roce_caps.fl_rc_qp_when_roce_disabled));
}
static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
{ {
struct mlx5dr_qp *dr_qp = dmn->send_ring->qp; struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
...@@ -676,17 +691,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -676,17 +691,26 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
} }
/* RTR */ /* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.mtu = mtu; rtr_attr.mtu = mtu;
rtr_attr.qp_num = dr_qp->qpn; rtr_attr.qp_num = dr_qp->qpn;
rtr_attr.min_rnr_timer = 12; rtr_attr.min_rnr_timer = 12;
rtr_attr.port_num = port; rtr_attr.port_num = port;
rtr_attr.sgid_index = gid_index;
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
/* If QP creation with force loopback is allowed, then there
* is no need for GID index when creating the QP.
* Otherwise we query GID attributes and use GID index.
*/
rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
if (!rtr_attr.fl) {
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
&rtr_attr.dgid_attr);
if (ret)
return ret;
rtr_attr.sgid_index = gid_index;
}
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n"); mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
......
...@@ -747,6 +747,12 @@ struct mlx5dr_cmd_vport_cap { ...@@ -747,6 +747,12 @@ struct mlx5dr_cmd_vport_cap {
u32 num; u32 num;
}; };
struct mlx5dr_roce_cap {
u8 roce_en:1;
u8 fl_rc_qp_when_roce_disabled:1;
u8 fl_rc_qp_when_roce_enabled:1;
};
struct mlx5dr_cmd_caps { struct mlx5dr_cmd_caps {
u16 gvmi; u16 gvmi;
u64 nic_rx_drop_address; u64 nic_rx_drop_address;
...@@ -783,6 +789,7 @@ struct mlx5dr_cmd_caps { ...@@ -783,6 +789,7 @@ struct mlx5dr_cmd_caps {
struct mlx5dr_esw_caps esw_caps; struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps; struct mlx5dr_cmd_vport_cap *vports_caps;
bool prio_tag_required; bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
}; };
struct mlx5dr_domain_rx_tx { struct mlx5dr_domain_rx_tx {
......
...@@ -961,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits { ...@@ -961,7 +961,9 @@ struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1]; u8 roce_apm[0x1];
u8 reserved_at_1[0x3]; u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1]; u8 sw_r_roce_src_udp_port[0x1];
u8 reserved_at_5[0x19]; u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
u8 reserved_at_7[0x17];
u8 qp_ts_format[0x2]; u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60]; u8 reserved_at_20[0x60];
...@@ -2942,7 +2944,8 @@ struct mlx5_ifc_qpc_bits { ...@@ -2942,7 +2944,8 @@ struct mlx5_ifc_qpc_bits {
u8 state[0x4]; u8 state[0x4];
u8 lag_tx_port_affinity[0x4]; u8 lag_tx_port_affinity[0x4];
u8 st[0x8]; u8 st[0x8];
u8 reserved_at_10[0x3]; u8 reserved_at_10[0x2];
u8 isolate_vl_tc[0x1];
u8 pm_state[0x2]; u8 pm_state[0x2];
u8 reserved_at_15[0x1]; u8 reserved_at_15[0x1];
u8 req_e2e_credit_mode[0x2]; u8 req_e2e_credit_mode[0x2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment