Commit 20d0dfe6 authored by Sagi Grimberg's avatar Sagi Grimberg

nvme: move ctrl cap to struct nvme_ctrl

All transports use either a private cache of controller cap or an on-stack
copy, move it to the generic struct nvme_ctrl. In the future it will also
be maintained by the core.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent d858e5f0
...@@ -152,8 +152,6 @@ struct nvme_fc_ctrl { ...@@ -152,8 +152,6 @@ struct nvme_fc_ctrl {
u64 association_id; u64 association_id;
u64 cap;
struct list_head ctrl_list; /* rport->ctrl_list */ struct list_head ctrl_list; /* rport->ctrl_list */
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
...@@ -2328,7 +2326,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2328,7 +2326,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
* prior connection values * prior connection values
*/ */
ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (ret) { if (ret) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"prop_get NVME_REG_CAP failed\n"); "prop_get NVME_REG_CAP failed\n");
...@@ -2336,9 +2334,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2336,9 +2334,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
} }
ctrl->ctrl.sqsize = ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret) if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
......
...@@ -144,6 +144,7 @@ struct nvme_ctrl { ...@@ -144,6 +144,7 @@ struct nvme_ctrl {
u32 ctrl_config; u32 ctrl_config;
u32 queue_count; u32 queue_count;
u64 cap;
u32 page_size; u32 page_size;
u32 max_hw_sectors; u32 max_hw_sectors;
u16 oncs; u16 oncs;
......
...@@ -1144,8 +1144,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) ...@@ -1144,8 +1144,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
if (shutdown) if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl); nvme_shutdown_ctrl(&dev->ctrl);
else else
nvme_disable_ctrl(&dev->ctrl, lo_hi_readq( nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
dev->bar + NVME_REG_CAP));
spin_lock_irq(&nvmeq->q_lock); spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq); nvme_process_cq(nvmeq);
...@@ -1388,7 +1387,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1388,7 +1387,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
{ {
int result; int result;
u32 aqa; u32 aqa;
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
result = nvme_remap_bar(dev, db_bar_size(dev, 0)); result = nvme_remap_bar(dev, db_bar_size(dev, 0));
...@@ -1396,13 +1394,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1396,13 +1394,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result; return result;
dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0; NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
if (dev->subsystem && if (dev->subsystem &&
(readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO))
writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS);
result = nvme_disable_ctrl(&dev->ctrl, cap); result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
if (result < 0) if (result < 0)
return result; return result;
...@@ -1421,7 +1419,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1421,7 +1419,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
result = nvme_enable_ctrl(&dev->ctrl, cap); result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap);
if (result) if (result)
return result; return result;
...@@ -1865,7 +1863,6 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -1865,7 +1863,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
static int nvme_pci_enable(struct nvme_dev *dev) static int nvme_pci_enable(struct nvme_dev *dev)
{ {
u64 cap;
int result = -ENOMEM; int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
...@@ -1892,10 +1889,11 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1892,10 +1889,11 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (result < 0) if (result < 0)
return result; return result;
cap = lo_hi_readq(dev->bar + NVME_REG_CAP); dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
dev->db_stride = 1 << NVME_CAP_STRIDE(cap); NVME_Q_DEPTH);
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
dev->dbs = dev->bar + 4096; dev->dbs = dev->bar + 4096;
/* /*
...@@ -1909,7 +1907,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1909,7 +1907,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth); dev->q_depth);
} else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG &&
(pdev->device == 0xa821 || pdev->device == 0xa822) && (pdev->device == 0xa821 || pdev->device == 0xa822) &&
NVME_CAP_MQES(cap) == 0) { NVME_CAP_MQES(dev->ctrl.cap) == 0) {
dev->q_depth = 64; dev->q_depth = 64;
dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
"set queue depth=%u\n", dev->q_depth); "set queue depth=%u\n", dev->q_depth);
......
...@@ -118,7 +118,6 @@ struct nvme_rdma_ctrl { ...@@ -118,7 +118,6 @@ struct nvme_rdma_ctrl {
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
struct nvme_rdma_device *device; struct nvme_rdma_device *device;
u64 cap;
u32 max_fr_pages; u32 max_fr_pages;
struct sockaddr_storage addr; struct sockaddr_storage addr;
...@@ -728,7 +727,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ...@@ -728,7 +727,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (ret) if (ret)
goto requeue; goto requeue;
...@@ -1573,7 +1572,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) ...@@ -1573,7 +1572,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP,
&ctrl->ctrl.cap);
if (error) { if (error) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"prop_get NVME_REG_CAP failed\n"); "prop_get NVME_REG_CAP failed\n");
...@@ -1581,9 +1581,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) ...@@ -1581,9 +1581,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
} }
ctrl->ctrl.sqsize = ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
......
...@@ -48,7 +48,6 @@ struct nvme_loop_ctrl { ...@@ -48,7 +48,6 @@ struct nvme_loop_ctrl {
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
struct list_head list; struct list_head list;
u64 cap;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
struct nvme_loop_iod async_event_iod; struct nvme_loop_iod async_event_iod;
struct nvme_ctrl ctrl; struct nvme_ctrl ctrl;
...@@ -387,7 +386,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -387,7 +386,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (error) { if (error) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"prop_get NVME_REG_CAP failed\n"); "prop_get NVME_REG_CAP failed\n");
...@@ -395,9 +394,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -395,9 +394,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
} }
ctrl->ctrl.sqsize = ctrl->ctrl.sqsize =
min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment