Commit d157e534 authored by James Smart's avatar James Smart Committed by Keith Busch

nvme_fc: rework sqsize handling

Corrected four outstanding issues in the transport around sqsize.

1: Create Connection LS is sending the 1's-based sqsize, should be
sending the 0's-based value.

2: allocation of hw queue is using the 0's-base size. It should be
using the 1's-based value.

3: normalization of ctrl.sqsize by MQES is using MQES+1 (1's-based
value). It should be MQES (0's-based value).

4: Missing clause to ensure queue_count not larger than ctrl->sqsize.

Corrected by:
Clean up routines that pass queue size around. The queue size value is
the actual count (1's-based) value and determined from ctrl->sqsize + 1.

Routines that send 0's-based value adapt from queue size.

Sset ctrl->sqsize properly for MQES.

Added clause to nsure queue_count not larger than ctrl->sqsize + 1.
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
parent 0475821e
......@@ -1206,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
/* Linux supports only Dynamic controllers */
assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
......@@ -1321,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
lsop->queue = queue;
lsreq->rqstaddr = conn_rqst;
......@@ -2481,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
goto out_free_tag_set;
}
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_cleanup_blk_queue;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
......@@ -2532,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_free_io_queues;
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_delete_hw_queues;
......@@ -2632,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
NVME_AQ_BLK_MQ_DEPTH);
NVME_AQ_DEPTH);
if (ret)
goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
NVME_AQ_BLK_MQ_DEPTH,
(NVME_AQ_BLK_MQ_DEPTH / 4));
NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
if (ret)
goto out_delete_hw_queue;
......@@ -2666,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret)
......@@ -2699,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
opts->queue_size = ctrl->ctrl.maxcmd;
}
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
ret = nvme_fc_init_aen_ops(ctrl);
if (ret)
goto out_term_aen_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment