Commit 7a01d05c authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

IB/srpt: Limit the send and receive queue sizes to what the HCA supports

Additionally, correct the comment about ch->rq_size.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent c70ca389
...@@ -1650,7 +1650,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ...@@ -1650,7 +1650,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
* both both, as RDMA contexts will also post completions for the * both both, as RDMA contexts will also post completions for the
* RDMA READ case. * RDMA READ case.
*/ */
qp_init->cap.max_send_wr = srp_sq_size / 2; qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE); qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port; qp_init->port_num = ch->sport->port;
...@@ -1953,10 +1953,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, ...@@ -1953,10 +1953,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->cm_id = cm_id; ch->cm_id = cm_id;
cm_id->context = ch; cm_id->context = ch;
/* /*
* Avoid QUEUE_FULL conditions by limiting the number of buffers used * ch->rq_size should be at least as large as the initiator queue
* for the SRP protocol to the command queue size. * depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/ */
ch->rq_size = SRPT_RQ_SIZE; ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock); spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING; ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list); INIT_LIST_HEAD(&ch->cmd_wait_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment