Commit f1093940 authored by Ram Amrani's avatar Ram Amrani Committed by David S. Miller

qed: Add support for QP verbs

Add support for the slowpath configurations of Queue Pair verbs
which adds, deletes, modifies and queries Queue Pairs.
Signed-off-by: default avatarRam Amrani <Ram.Amrani@caviumnetworks.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c295f86e
......@@ -176,6 +176,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type);
u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
enum protocol_type type);
int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_WORKING_MEM 0
#define QED_CTX_FL_MEM 1
......
......@@ -1107,6 +1107,1199 @@ err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
return rc;
}
static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
{
p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
__le32 *dst_gid)
{
u32 i;
if (qp->roce_mode == ROCE_V2_IPV4) {
/* The IPv4 addresses shall be aligned to the highest word.
* The lower words must be zero.
*/
memset(src_gid, 0, sizeof(union qed_gid));
memset(dst_gid, 0, sizeof(union qed_gid));
src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
} else {
/* GIDs and IPv6 addresses coincide in location and size */
for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
}
}
}
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
enum roce_flavor flavor;
switch (roce_mode) {
case ROCE_V1:
flavor = PLAIN_ROCE;
break;
case ROCE_V2_IPV4:
flavor = RROCE_IPV4;
break;
case ROCE_V2_IPV6:
flavor = ROCE_V2_IPV6;
break;
default:
flavor = MAX_ROCE_MODE;
break;
}
return flavor;
}
int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
u32 requester_icid;
int rc;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
&responder_icid);
if (rc) {
spin_unlock_bh(&p_rdma_info->lock);
return rc;
}
rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
&requester_icid);
spin_unlock_bh(&p_rdma_info->lock);
if (rc)
goto err;
/* the two icid's should be adjacent */
if ((requester_icid - responder_icid) != 1) {
DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
rc = -EINVAL;
goto err;
}
responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
p_rdma_info->proto);
requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
p_rdma_info->proto);
/* If these icids require a new ILT line allocate DMA-able context for
* an ILT page
*/
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
if (rc)
goto err;
rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
if (rc)
goto err;
*cid = (u16)responder_icid;
return rc;
err:
spin_lock_bh(&p_rdma_info->lock);
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
spin_unlock_bh(&p_rdma_info->lock);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Allocate CID - failed, rc = %d\n", rc);
return rc;
}
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
u16 physical_queue0 = 0;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for IRQ */
qp->irq_num_pages = 1;
qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
RDMA_RING_PAGE_SIZE,
&qp->irq_phys_addr, GFP_KERNEL);
if (!qp->irq) {
rc = -ENOMEM;
DP_NOTICE(p_hwfn,
"qed create responder failed: cannot allocate memory (irq). rc = %d\n",
rc);
return rc;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
goto err;
p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
p_ramrod->flags = 0;
roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
qp->incoming_rdma_read_en);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
qp->incoming_rdma_write_en);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
qp->incoming_atomic_en);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
qp->e2e_flow_control_en);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
qp->fmr_and_reserved_lkey);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
qp->min_rnr_nak_timer);
p_ramrod->max_ird = qp->max_rd_atomic_resp;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
p_ramrod->irq_num_pages = qp->irq_num_pages;
p_ramrod->p_key = cpu_to_le16(qp->pkey);
p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
p_ramrod->mtu = cpu_to_le16(qp->mtu);
p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
p_ramrod->pd = cpu_to_le16(qp->pd);
p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
p_ramrod->udp_src_port = qp->udp_src_port;
p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
rc, physical_queue0);
if (rc)
goto err;
qp->resp_offloaded = true;
return rc;
err:
DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
qp->irq, qp->irq_phys_addr);
return rc;
}
static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
u16 physical_queue0 = 0;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* Allocate DMA-able memory for ORQ */
qp->orq_num_pages = 1;
qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
RDMA_RING_PAGE_SIZE,
&qp->orq_phys_addr, GFP_KERNEL);
if (!qp->orq) {
rc = -ENOMEM;
DP_NOTICE(p_hwfn,
"qed create requester failed: cannot allocate memory (orq). rc = %d\n",
rc);
return rc;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid + 1;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ROCE_RAMROD_CREATE_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
goto err;
p_ramrod = &p_ent->ramrod.roce_create_qp_req;
p_ramrod->flags = 0;
roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
qp->fmr_and_reserved_lkey);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
qp->rnr_retry_cnt);
p_ramrod->max_ord = qp->max_rd_atomic_req;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
p_ramrod->orq_num_pages = qp->orq_num_pages;
p_ramrod->p_key = cpu_to_le16(qp->pkey);
p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
p_ramrod->mtu = cpu_to_le16(qp->mtu);
p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
p_ramrod->pd = cpu_to_le16(qp->pd);
p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->sq_cq_id);
memset(&qm_params, 0, sizeof(qm_params));
qm_params.roce.qpid = qp->icid >> 1;
physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
p_ramrod->udp_src_port = qp->udp_src_port;
p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
qp->stats_queue;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
if (rc)
goto err;
qp->req_offloaded = true;
return rc;
err:
DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
qp->orq, qp->orq_phys_addr);
return rc;
}
static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
bool move_to_err, u32 modify_flags)
{
struct roce_modify_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !qp->resp_offloaded)
return 0;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ROCE_EVENT_MODIFY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc) {
DP_NOTICE(p_hwfn, "rc = %d\n", rc);
return rc;
}
p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
p_ramrod->flags = 0;
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
qp->incoming_rdma_read_en);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
qp->incoming_rdma_write_en);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
qp->incoming_atomic_en);
SET_FIELD(p_ramrod->flags,
ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
qp->e2e_flow_control_en);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
GET_FIELD(modify_flags,
QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
GET_FIELD(modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
GET_FIELD(modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
GET_FIELD(modify_flags,
QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
p_ramrod->fields = 0;
SET_FIELD(p_ramrod->fields,
ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
qp->min_rnr_nak_timer);
p_ramrod->max_ird = qp->max_rd_atomic_resp;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
p_ramrod->p_key = cpu_to_le16(qp->pkey);
p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
p_ramrod->mtu = cpu_to_le16(qp->mtu);
qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
return rc;
}
static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
bool move_to_sqd,
bool move_to_err, u32 modify_flags)
{
struct roce_modify_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (move_to_err && !(qp->req_offloaded))
return 0;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid + 1;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ROCE_EVENT_MODIFY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc) {
DP_NOTICE(p_hwfn, "rc = %d\n", rc);
return rc;
}
p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
p_ramrod->flags = 0;
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
qp->sqd_async);
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
GET_FIELD(modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
GET_FIELD(modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
GET_FIELD(modify_flags,
QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
SET_FIELD(p_ramrod->flags,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
GET_FIELD(modify_flags,
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
p_ramrod->fields = 0;
SET_FIELD(p_ramrod->fields,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
SET_FIELD(p_ramrod->fields,
ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
qp->rnr_retry_cnt);
p_ramrod->max_ord = qp->max_rd_atomic_req;
p_ramrod->traffic_class = qp->traffic_class_tos;
p_ramrod->hop_limit = qp->hop_limit_ttl;
p_ramrod->p_key = cpu_to_le16(qp->pkey);
p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
p_ramrod->mtu = cpu_to_le16(qp->mtu);
qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
return rc;
}
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
u32 *num_invalidated_mw)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
dma_addr_t ramrod_res_phys;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->resp_offloaded)
return 0;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ROCE_RAMROD_DESTROY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
&ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
rc = -ENOMEM;
DP_NOTICE(p_hwfn,
"qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
rc);
return rc;
}
DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
qp->irq, qp->irq_phys_addr);
qp->resp_offloaded = false;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
err:
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct roce_destroy_qp_resp_output_params),
p_ramrod_res, ramrod_res_phys);
return rc;
}
static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
u32 *num_bound_mw)
{
struct roce_destroy_qp_req_output_params *p_ramrod_res;
struct roce_destroy_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
dma_addr_t ramrod_res_phys;
int rc = -ENOMEM;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
if (!qp->req_offloaded)
return 0;
p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_ramrod_res),
&ramrod_res_phys, GFP_KERNEL);
if (!p_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed destroy requester failed: cannot allocate memory (ramrod)\n");
return rc;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid + 1;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
goto err;
p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err;
*num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
qp->orq, qp->orq_phys_addr);
qp->req_offloaded = false;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
err:
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
p_ramrod_res, ramrod_res_phys);
return rc;
}
int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
struct roce_query_qp_req_ramrod_data *p_req_ramrod;
struct qed_sp_init_data init_data;
dma_addr_t resp_ramrod_res_phys;
dma_addr_t req_ramrod_res_phys;
struct qed_spq_entry *p_ent;
bool rq_err_state;
bool sq_err_state;
bool sq_draining;
int rc = -ENOMEM;
if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
/* We can't send ramrod to the fw since this qp wasn't offloaded
* to the fw yet
*/
out_params->draining = false;
out_params->rq_psn = qp->rq_psn;
out_params->sq_psn = qp->sq_psn;
out_params->state = qp->cur_state;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
return 0;
}
if (!(qp->resp_offloaded)) {
DP_NOTICE(p_hwfn,
"The responder's qp should be offloded before requester's\n");
return -EINVAL;
}
/* Send a query responder ramrod to FW to get RQ-PSN and state */
p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_resp_ramrod_res),
&resp_ramrod_res_phys, GFP_KERNEL);
if (!p_resp_ramrod_res) {
DP_NOTICE(p_hwfn,
"qed query qp failed: cannot allocate memory (ramrod)\n");
return rc;
}
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qp->icid;
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
goto err_resp;
p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err_resp;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
p_resp_ramrod_res, resp_ramrod_res_phys);
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
if (!(qp->req_offloaded)) {
/* Don't send query qp for the requester */
out_params->sq_psn = qp->sq_psn;
out_params->draining = false;
if (rq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
out_params->state = qp->cur_state;
return 0;
}
/* Send a query requester ramrod to FW to get SQ-PSN and state */
p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(*p_req_ramrod_res),
&req_ramrod_res_phys,
GFP_KERNEL);
if (!p_req_ramrod_res) {
rc = -ENOMEM;
DP_NOTICE(p_hwfn,
"qed query qp failed: cannot allocate memory (ramrod)\n");
return rc;
}
/* Get SPQ entry */
init_data.cid = qp->icid + 1;
rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
PROTOCOLID_ROCE, &init_data);
if (rc)
goto err_req;
p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
goto err_req;
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
p_req_ramrod_res, req_ramrod_res_phys);
out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
sq_draining =
GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
out_params->draining = false;
if (rq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
else if (sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
return 0;
err_req:
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
p_req_ramrod_res, req_ramrod_res_phys);
return rc;
err_resp:
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
p_resp_ramrod_res, resp_ramrod_res_phys);
return rc;
}
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 start_cid;
int rc;
/* Destroys the specified QP */
if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
(qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
(qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
DP_NOTICE(p_hwfn,
"QP must be in error, reset or init state before destroying it\n");
return -EINVAL;
}
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
if (rc)
return rc;
/* Send destroy requester ramrod */
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
if (rc)
return rc;
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
p_hwfn->p_rdma_info->proto);
/* Release responder's icid */
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
qp->icid - start_cid);
/* Release requester's icid */
qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
qp->icid + 1 - start_cid);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
return 0;
}
int qed_rdma_query_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
/* The following fields are filled in from qp and not FW as they can't
* be modified by FW
*/
out_params->mtu = qp->mtu;
out_params->dest_qp = qp->dest_qp;
out_params->incoming_atomic_en = qp->incoming_atomic_en;
out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
out_params->dgid = qp->dgid;
out_params->flow_label = qp->flow_label;
out_params->hop_limit_ttl = qp->hop_limit_ttl;
out_params->traffic_class_tos = qp->traffic_class_tos;
out_params->timeout = qp->ack_timeout;
out_params->rnr_retry = qp->rnr_retry_cnt;
out_params->retry_cnt = qp->retry_cnt;
out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
out_params->pkey_index = 0;
out_params->max_rd_atomic = qp->max_rd_atomic_req;
out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
out_params->sqd_async = qp->sqd_async;
rc = qed_roce_query_qp(p_hwfn, qp, out_params);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
return rc;
}
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
rc = qed_roce_destroy_qp(p_hwfn, qp);
/* free qp params struct */
kfree(qp);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
return rc;
}
struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_qp *qp;
u8 max_stats_queues;
int rc;
if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
DP_ERR(p_hwfn->cdev,
"qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
rdma_cxt, in_params, out_params);
return NULL;
}
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"qed rdma create qp called with qp_handle = %08x%08x\n",
in_params->qp_handle_hi, in_params->qp_handle_lo);
/* Some sanity checks... */
max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
if (in_params->stats_queue >= max_stats_queues) {
DP_ERR(p_hwfn->cdev,
"qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
in_params->stats_queue, max_stats_queues);
return NULL;
}
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp) {
DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
return NULL;
}
rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
qp->qpid = ((0xFF << 16) | qp->icid);
DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
if (rc) {
kfree(qp);
return NULL;
}
qp->cur_state = QED_ROCE_QP_STATE_RESET;
qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
qp->use_srq = in_params->use_srq;
qp->signal_all = in_params->signal_all;
qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
qp->pd = in_params->pd;
qp->dpi = in_params->dpi;
qp->sq_cq_id = in_params->sq_cq_id;
qp->sq_num_pages = in_params->sq_num_pages;
qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
qp->rq_cq_id = in_params->rq_cq_id;
qp->rq_num_pages = in_params->rq_num_pages;
qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
qp->srq_id = in_params->srq_id;
qp->req_offloaded = false;
qp->resp_offloaded = false;
qp->e2e_flow_control_en = qp->use_srq ? false : true;
qp->stats_queue = in_params->stats_queue;
out_params->icid = qp->icid;
out_params->qp_id = qp->qpid;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
return qp;
}
static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
enum qed_roce_qp_state prev_state,
struct qed_rdma_modify_qp_in_params *params)
{
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0;
/* Perform additional operations according to the current state and the
* next state
*/
if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
(prev_state == QED_ROCE_QP_STATE_RESET)) &&
(qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
/* Init->RTR or Reset->RTR */
rc = qed_roce_sp_create_responder(p_hwfn, qp);
return rc;
} else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
(qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
/* RTR-> RTS */
rc = qed_roce_sp_create_requester(p_hwfn, qp);
if (rc)
return rc;
/* Send modify responder ramrod */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
params->modify_flags);
return rc;
} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
(qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
/* RTS->RTS */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
params->modify_flags);
if (rc)
return rc;
rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
params->modify_flags);
return rc;
} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
(qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
/* RTS->SQD */
rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
params->modify_flags);
return rc;
} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
(qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
/* SQD->SQD */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
params->modify_flags);
if (rc)
return rc;
rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
params->modify_flags);
return rc;
} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
(qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
/* SQD->RTS */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
params->modify_flags);
if (rc)
return rc;
rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
params->modify_flags);
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
qp->cur_state == QED_ROCE_QP_STATE_SQE) {
/* ->ERR */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
params->modify_flags);
if (rc)
return rc;
rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
params->modify_flags);
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw);
if (rc)
return rc;
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
} else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
}
return rc;
}
int qed_rdma_modify_qp(void *rdma_cxt,
struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
int rc = 0;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
qp->icid, params->new_state);
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
return rc;
}
if (GET_FIELD(params->modify_flags,
QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
qp->incoming_atomic_en = params->incoming_atomic_en;
}
/* Update QP structure with the updated values */
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
qp->roce_mode = params->roce_mode;
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
qp->pkey = params->pkey;
if (GET_FIELD(params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
qp->e2e_flow_control_en = params->e2e_flow_control_en;
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
qp->dest_qp = params->dest_qp;
if (GET_FIELD(params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
/* Indicates that the following parameters have changed:
* Traffic class, flow label, hop limit, source GID,
* destination GID, loopback indicator
*/
qp->traffic_class_tos = params->traffic_class_tos;
qp->flow_label = params->flow_label;
qp->hop_limit_ttl = params->hop_limit_ttl;
qp->sgid = params->sgid;
qp->dgid = params->dgid;
qp->udp_src_port = 0;
qp->vlan_id = params->vlan_id;
qp->mtu = params->mtu;
qp->lb_indication = params->lb_indication;
memcpy((u8 *)&qp->remote_mac_addr[0],
(u8 *)&params->remote_mac_addr[0], ETH_ALEN);
if (params->use_local_mac) {
memcpy((u8 *)&qp->local_mac_addr[0],
(u8 *)&params->local_mac_addr[0], ETH_ALEN);
} else {
memcpy((u8 *)&qp->local_mac_addr[0],
(u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
}
}
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
qp->rq_psn = params->rq_psn;
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
qp->sq_psn = params->sq_psn;
if (GET_FIELD(params->modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
qp->max_rd_atomic_req = params->max_rd_atomic_req;
if (GET_FIELD(params->modify_flags,
QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
if (GET_FIELD(params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
qp->ack_timeout = params->ack_timeout;
if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
qp->retry_cnt = params->retry_cnt;
if (GET_FIELD(params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
qp->rnr_retry_cnt = params->rnr_retry_cnt;
if (GET_FIELD(params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
qp->sqd_async = params->sqd_async;
prev_state = qp->cur_state;
if (GET_FIELD(params->modify_flags,
QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
qp->cur_state = params->new_state;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
qp->cur_state);
}
rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
return rc;
}
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
......@@ -1201,6 +2394,10 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.rdma_dealloc_pd = &qed_rdma_free_pd,
.rdma_create_cq = &qed_rdma_create_cq,
.rdma_destroy_cq = &qed_rdma_destroy_cq,
.rdma_create_qp = &qed_rdma_create_qp,
.rdma_modify_qp = &qed_rdma_modify_qp,
.rdma_query_qp = &qed_rdma_query_qp,
.rdma_destroy_qp = &qed_rdma_destroy_qp,
};
const struct qed_rdma_ops *qed_get_rdma_ops()
......
......@@ -114,6 +114,72 @@ struct qed_rdma_resize_cnq_in_params {
u64 pbl_ptr;
};
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
u32 qpid;
u16 icid;
enum qed_roce_qp_state cur_state;
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u16 pd;
u16 pkey;
u32 dest_qp;
u16 mtu;
u16 srq_id;
u8 traffic_class_tos;
u8 hop_limit_ttl;
u16 dpi;
u32 flow_label;
bool lb_indication;
u16 vlan_id;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
union qed_gid sgid;
union qed_gid dgid;
enum roce_mode roce_mode;
u16 udp_src_port;
u8 stats_queue;
/* requeseter */
u8 max_rd_atomic_req;
u32 sq_psn;
u16 sq_cq_id;
u16 sq_num_pages;
dma_addr_t sq_pbl_ptr;
void *orq;
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id;
u16 rq_num_pages;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
};
int
qed_rdma_add_user(void *rdma_cxt,
struct qed_rdma_add_user_out_params *out_params);
......@@ -135,6 +201,11 @@ void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *params);
int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *out_params);
#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
......
......@@ -43,6 +43,17 @@
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
/* rdma interface */
enum qed_roce_qp_state {
QED_ROCE_QP_STATE_RESET,
QED_ROCE_QP_STATE_INIT,
QED_ROCE_QP_STATE_RTR,
QED_ROCE_QP_STATE_RTS,
QED_ROCE_QP_STATE_SQD,
QED_ROCE_QP_STATE_ERR,
QED_ROCE_QP_STATE_SQE
};
enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR,
......@@ -292,6 +303,128 @@ struct qed_rdma_destroy_cq_out_params {
u16 num_cq_notif;
};
struct qed_rdma_create_qp_in_params {
u32 qp_handle_lo;
u32 qp_handle_hi;
u32 qp_handle_async_lo;
u32 qp_handle_async_hi;
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
u16 pd;
u16 dpi;
u16 sq_cq_id;
u16 sq_num_pages;
u64 sq_pbl_ptr;
u8 max_sq_sges;
u16 rq_cq_id;
u16 rq_num_pages;
u64 rq_pbl_ptr;
u16 srq_id;
u8 stats_queue;
};
struct qed_rdma_create_qp_out_params {
u32 qp_id;
u16 icid;
void *rq_pbl_virt;
dma_addr_t rq_pbl_phys;
void *sq_pbl_virt;
dma_addr_t sq_pbl_phys;
};
struct qed_rdma_modify_qp_in_params {
u32 modify_flags;
#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
enum qed_roce_qp_state new_state;
u16 pkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u32 dest_qp;
bool lb_indication;
u16 mtu;
u8 traffic_class_tos;
u8 hop_limit_ttl;
u32 flow_label;
union qed_gid sgid;
union qed_gid dgid;
u16 udp_src_port;
u16 vlan_id;
u32 rq_psn;
u32 sq_psn;
u8 max_rd_atomic_resp;
u8 max_rd_atomic_req;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
bool use_local_mac;
enum roce_mode roce_mode;
};
struct qed_rdma_query_qp_out_params {
enum qed_roce_qp_state state;
u32 rq_psn;
u32 sq_psn;
bool draining;
u16 mtu;
u32 dest_qp;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
union qed_gid sgid;
union qed_gid dgid;
u32 flow_label;
u8 hop_limit_ttl;
u8 traffic_class_tos;
u32 timeout;
u8 rnr_retry;
u8 retry_cnt;
u8 min_rnr_nak_timer;
u16 pkey_index;
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
bool sqd_async;
};
struct qed_rdma_create_srq_out_params {
u16 srq_id;
};
......@@ -368,6 +501,17 @@ struct qed_rdma_ops {
int (*rdma_destroy_cq)(void *rdma_cxt,
struct qed_rdma_destroy_cq_in_params *iparams,
struct qed_rdma_destroy_cq_out_params *oparams);
struct qed_rdma_qp *
(*rdma_create_qp)(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *iparams,
struct qed_rdma_create_qp_out_params *oparams);
int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_modify_qp_in_params *iparams);
int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
struct qed_rdma_query_qp_out_params *oparams);
int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
};
const struct qed_rdma_ops *qed_get_rdma_ops(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment