Commit 143d28dc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.13-2021-06-03' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "NVMe fixes from Christoph:

   - Fix corruption in RDMA in-capsule SGLs (Sagi Grimberg)

   - nvme-loop reset fixes (Hannes Reinecke)

   - nvmet fix for freeing unallocated p2pmem (Max Gurtovoy)"

* tag 'block-5.13-2021-06-03' of git://git.kernel.dk/linux-block:
  nvmet: fix freeing unallocated p2pmem
  nvme-loop: do not warn for deleted controllers during reset
  nvme-loop: check for NVME_LOOP_Q_LIVE in nvme_loop_destroy_admin_queue()
  nvme-loop: clear NVME_LOOP_Q_LIVE when nvme_loop_configure_admin_queue() fails
  nvme-loop: reset queue count to 1 in nvme_loop_destroy_io_queues()
  nvme-rdma: fix in-casule data send for chained sgls
parents ec955023 e369edbb
...@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, ...@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
int count) int count)
{ {
struct nvme_sgl_desc *sg = &c->common.dptr.sgl; struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
struct ib_sge *sge = &req->sge[1]; struct ib_sge *sge = &req->sge[1];
struct scatterlist *sgl;
u32 len = 0; u32 len = 0;
int i; int i;
for (i = 0; i < count; i++, sgl++, sge++) { for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
sge->addr = sg_dma_address(sgl); sge->addr = sg_dma_address(sgl);
sge->length = sg_dma_len(sgl); sge->length = sg_dma_len(sgl);
sge->lkey = queue->device->pd->local_dma_lkey; sge->lkey = queue->device->pd->local_dma_lkey;
len += sge->length; len += sge->length;
sge++;
} }
sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
......
...@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) ...@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
return req->transfer_len - req->metadata_len; return req->transfer_len - req->metadata_len;
} }
static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
struct nvmet_req *req)
{ {
req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
nvmet_data_transfer_len(req)); nvmet_data_transfer_len(req));
if (!req->sg) if (!req->sg)
goto out_err; goto out_err;
if (req->metadata_len) { if (req->metadata_len) {
req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
&req->metadata_sg_cnt, req->metadata_len); &req->metadata_sg_cnt, req->metadata_len);
if (!req->metadata_sg) if (!req->metadata_sg)
goto out_free_sg; goto out_free_sg;
} }
req->p2p_dev = p2p_dev;
return 0; return 0;
out_free_sg: out_free_sg:
pci_p2pmem_free_sgl(req->p2p_dev, req->sg); pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
...@@ -1025,25 +1029,19 @@ static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) ...@@ -1025,25 +1029,19 @@ static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
return -ENOMEM; return -ENOMEM;
} }
static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{ {
if (!IS_ENABLED(CONFIG_PCI_P2PDMA)) if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
return false; !req->sq->ctrl || !req->sq->qid || !req->ns)
return NULL;
if (req->sq->ctrl && req->sq->qid && req->ns) { return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
req->ns->nsid);
if (req->p2p_dev)
return true;
}
req->p2p_dev = NULL;
return false;
} }
int nvmet_req_alloc_sgls(struct nvmet_req *req) int nvmet_req_alloc_sgls(struct nvmet_req *req)
{ {
if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
return 0; return 0;
req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
...@@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req) ...@@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
pci_p2pmem_free_sgl(req->p2p_dev, req->sg); pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
if (req->metadata_sg) if (req->metadata_sg)
pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
req->p2p_dev = NULL;
} else { } else {
sgl_free(req->sg); sgl_free(req->sg);
if (req->metadata_sg) if (req->metadata_sg)
......
...@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { ...@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.fabrics_q); blk_cleanup_queue(ctrl->ctrl.fabrics_q);
...@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
} }
ctrl->ctrl.queue_count = 1;
} }
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
...@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
return 0; return 0;
out_cleanup_queue: out_cleanup_queue:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q: out_cleanup_fabrics_q:
blk_cleanup_queue(ctrl->ctrl.fabrics_q); blk_cleanup_queue(ctrl->ctrl.fabrics_q);
...@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) ...@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
nvme_loop_shutdown_ctrl(ctrl); nvme_loop_shutdown_ctrl(ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */ if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
WARN_ON_ONCE(1); ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
/* state change failure for non-deleted ctrl? */
WARN_ON_ONCE(1);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment