Commit be597e97 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-4.15' of git://git.infradead.org/nvme into for-linus

Pull NVMe fixes from Christoph:

"A couple nvme fixes for 4.15:

 - expand the queue ready fix that we only had for RDMA to also cover FC and
   loop by moving it to common code (Sagi)
 - fix an array out of bounds in the PCIe HMB code (Minwoo Im)
 - two new device quirks (Jeff Lien and Kai-Heng Feng)
 - static checkers fixes (Keith Busch)
 - FC target refcount fix (James Smart)
 - A trivial spelling fix in new code (Colin Ian King)"
parents f341a4d3 8c97eecc
...@@ -1449,18 +1449,18 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10, ...@@ -1449,18 +1449,18 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
int srcu_idx, ret; int srcu_idx, ret;
u8 data[16] = { 0, }; u8 data[16] = { 0, };
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
put_unaligned_le64(key, &data[0]); put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]); put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = op; c.common.opcode = op;
c.common.nsid = cpu_to_le32(head->ns_id); c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10); c.common.cdw10[0] = cpu_to_le32(cdw10);
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
ret = -EWOULDBLOCK;
else
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
nvme_put_ns_from_disk(head, srcu_idx); nvme_put_ns_from_disk(head, srcu_idx);
return ret; return ret;
...@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns) static void nvme_ns_remove(struct nvme_ns *ns)
{ {
struct nvme_ns_head *head = ns->head;
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return; return;
...@@ -2980,7 +2978,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -2980,7 +2978,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock); mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns); nvme_mpath_clear_current_path(ns);
if (head)
list_del_rcu(&ns->siblings); list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock); mutex_unlock(&ns->ctrl->subsys->lock);
...@@ -2988,7 +2985,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -2988,7 +2985,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
list_del_init(&ns->list); list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex); mutex_unlock(&ns->ctrl->namespaces_mutex);
synchronize_srcu(&head->srcu); synchronize_srcu(&ns->head->srcu);
nvme_put_ns(ns); nvme_put_ns(ns);
} }
......
...@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); ...@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
struct request *rq)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
/*
* We cannot accept any other command until the connect command has
* completed, so only allow connect to pass.
*/
if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
/*
* Reconnecting state means transport disruption, which can take
* a long time and even might fail permanently, fail fast to
* give upper layers a chance to failover.
* Deleting state means that the ctrl will never accept commands
* again, fail it permanently.
*/
if (ctrl->state == NVME_CTRL_RECONNECTING ||
ctrl->state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
}
return BLK_STS_RESOURCE; /* try again later */
}
return BLK_STS_OK;
}
#endif /* _NVME_FABRICS_H */ #endif /* _NVME_FABRICS_H */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
enum nvme_fc_queue_flags { enum nvme_fc_queue_flags {
NVME_FC_Q_CONNECTED = (1 << 0), NVME_FC_Q_CONNECTED = (1 << 0),
NVME_FC_Q_LIVE = (1 << 1),
}; };
#define NVMEFC_QUEUE_DELAY 3 /* ms units */ #define NVMEFC_QUEUE_DELAY 3 /* ms units */
...@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) ...@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
return; return;
clear_bit(NVME_FC_Q_LIVE, &queue->flags);
/* /*
* Current implementation never disconnects a single queue. * Current implementation never disconnects a single queue.
* It always terminates a whole association. So there is never * It always terminates a whole association. So there is never
...@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) ...@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/ */
queue->connection_id = 0; queue->connection_id = 0;
clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
} }
static void static void
...@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ...@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
break; break;
set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
} }
return ret; return ret;
...@@ -2320,6 +2323,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2320,6 +2323,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
return BLK_STS_OK;
}
static blk_status_t static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
...@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len; u32 data_len;
blk_status_t ret; blk_status_t ret;
ret = nvme_fc_is_ready(queue, rq);
if (unlikely(ret))
return ret;
ret = nvme_setup_cmd(ns, rq, sqe); ret = nvme_setup_cmd(ns, rq, sqe);
if (ret) if (ret)
return ret; return ret;
...@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret) if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
/* /*
* Check controller capabilities * Check controller capabilities
* *
......
...@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, ...@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
bio->bi_opf |= REQ_NVME_MPATH; bio->bi_opf |= REQ_NVME_MPATH;
ret = direct_make_request(bio); ret = direct_make_request(bio);
} else if (!list_empty_careful(&head->list)) { } else if (!list_empty_careful(&head->list)) {
dev_warn_ratelimited(dev, "no path available - requeing I/O\n"); dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock); spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio); bio_list_add(&head->requeue_list, bio);
......
...@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req) ...@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
* found empirically. * found empirically.
*/ */
#define NVME_QUIRK_DELAY_AMOUNT 2000 #define NVME_QUIRK_DELAY_AMOUNT 2300
enum nvme_ctrl_state { enum nvme_ctrl_state {
NVME_CTRL_NEW, NVME_CTRL_NEW,
......
...@@ -1787,7 +1787,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, ...@@ -1787,7 +1787,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (!bufs) if (!bufs)
goto out_free_descs; goto out_free_descs;
for (size = 0; size < preferred; size += len) { for (size = 0; size < preferred && i < max_entries; size += len) {
dma_addr_t dma_addr; dma_addr_t dma_addr;
len = min_t(u64, chunk_size, preferred - size); len = min_t(u64, chunk_size, preferred - size);
...@@ -2428,7 +2428,7 @@ static int nvme_dev_map(struct nvme_dev *dev) ...@@ -2428,7 +2428,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return -ENODEV; return -ENODEV;
} }
static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
{ {
if (pdev->vendor == 0x144d && pdev->device == 0xa802) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
/* /*
...@@ -2443,6 +2443,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) ...@@ -2443,6 +2443,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
(dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
return NVME_QUIRK_NO_DEEPEST_PS; return NVME_QUIRK_NO_DEEPEST_PS;
} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
/*
* Samsung SSD 960 EVO drops off the PCIe bus after system
* suspend on a Ryzen board, ASUS PRIME B350M-A.
*/
if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
return NVME_QUIRK_NO_APST;
} }
return 0; return 0;
...@@ -2482,7 +2490,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2482,7 +2490,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result) if (result)
goto unmap; goto unmap;
quirks |= check_dell_samsung_bug(pdev); quirks |= check_vendor_combination_bug(pdev);
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks); quirks);
...@@ -2665,6 +2673,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -2665,6 +2673,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
......
...@@ -1591,31 +1591,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved) ...@@ -1591,31 +1591,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
* We cannot accept any other command until the Connect command has completed. * We cannot accept any other command until the Connect command has completed.
*/ */
static inline blk_status_t static inline blk_status_t
nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
{ {
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
struct nvme_command *cmd = nvme_req(rq)->cmd; return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
return BLK_STS_OK;
if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
/*
* reconnecting state means transport disruption, which
* can take a long time and even might fail permanently,
* fail fast to give upper layers a chance to failover.
* deleting state means that the ctrl will never accept
* commands again, fail it permanently.
*/
if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
}
return BLK_STS_RESOURCE; /* try again later */
}
}
return 0;
} }
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
...@@ -1634,7 +1614,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1634,7 +1614,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0); WARN_ON_ONCE(rq->tag < 0);
ret = nvme_rdma_queue_is_ready(queue, rq); ret = nvme_rdma_is_ready(queue, rq);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
......
...@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ...@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
/* release the queue lookup reference on the completed IO */
nvmet_fc_tgt_q_put(queue);
spin_lock_irqsave(&queue->qlock, flags); spin_lock_irqsave(&queue->qlock, flags);
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list); struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp) { if (!deferfcp) {
list_add_tail(&fod->fcp_list, &fod->queue->fod_list); list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
spin_unlock_irqrestore(&queue->qlock, flags); spin_unlock_irqrestore(&queue->qlock, flags);
/* Release reference taken at queue lookup and fod allocation */
nvmet_fc_tgt_q_put(queue);
return; return;
} }
...@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
deferfcp->fcp_req); deferfcp->fcp_req);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
kfree(deferfcp); kfree(deferfcp);
spin_lock_irqsave(&queue->qlock, flags); spin_lock_irqsave(&queue->qlock, flags);
......
...@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) ...@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
return container_of(ctrl, struct nvme_loop_ctrl, ctrl); return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
} }
enum nvme_loop_queue_flags {
NVME_LOOP_Q_LIVE = 0,
};
struct nvme_loop_queue { struct nvme_loop_queue {
struct nvmet_cq nvme_cq; struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct nvme_loop_ctrl *ctrl; struct nvme_loop_ctrl *ctrl;
unsigned long flags;
}; };
static struct nvmet_port *nvmet_loop_port; static struct nvmet_port *nvmet_loop_port;
...@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) ...@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED; return BLK_EH_HANDLED;
} }
static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
return BLK_STS_OK;
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
...@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret; blk_status_t ret;
ret = nvme_loop_is_ready(queue, req);
if (unlikely(ret))
return ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd); ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret) if (ret)
return ret; return ret;
...@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { ...@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set); blk_mq_free_tag_set(&ctrl->admin_tag_set);
...@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{ {
int i; int i;
for (i = 1; i < ctrl->ctrl.queue_count; i++) for (i = 1; i < ctrl->ctrl.queue_count; i++) {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
} }
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
...@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
return ret; return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
} }
return 0; return 0;
...@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (error) { if (error) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment