Commit eb1bd249 authored by Max Gurtovoy's avatar Max Gurtovoy Committed by Christoph Hellwig

nvme-rdma: fix memory leak during queue allocation

In case nvme_rdma_wait_for_cm timeout expires before we get
an established or rejected event (rdma_connect succeeded) from
rdma_cm, we end up with leaking the ib transport resources for
dedicated queue. This scenario can easily reproduced using traffic
test during port toggling.
Also, in order to protect from parallel ib queue destruction, that
may be invoked from different context's, introduce new flag that
stands for transport readiness. While we're here, protect also against
a situation that we can receive rdma_cm events during ib queue destruction.
Signed-off-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent f41725bb
...@@ -77,6 +77,7 @@ struct nvme_rdma_request { ...@@ -77,6 +77,7 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags { enum nvme_rdma_queue_flags {
NVME_RDMA_Q_ALLOCATED = 0, NVME_RDMA_Q_ALLOCATED = 0,
NVME_RDMA_Q_LIVE = 1, NVME_RDMA_Q_LIVE = 1,
NVME_RDMA_Q_TR_READY = 2,
}; };
struct nvme_rdma_queue { struct nvme_rdma_queue {
...@@ -390,12 +391,23 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) ...@@ -390,12 +391,23 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{ {
struct nvme_rdma_device *dev = queue->device; struct nvme_rdma_device *dev;
struct ib_device *ibdev = dev->dev; struct ib_device *ibdev;
if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
return;
dev = queue->device;
ibdev = dev->dev;
ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
rdma_destroy_qp(queue->cm_id); /*
* The cm_id object might have been destroyed during RDMA connection
* establishment error flow to avoid getting other cma events, thus
* the destruction of the QP shouldn't use rdma_cm API.
*/
ib_destroy_qp(queue->qp);
ib_free_cq(queue->ib_cq); ib_free_cq(queue->ib_cq);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
...@@ -463,6 +475,8 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) ...@@ -463,6 +475,8 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_ring; goto out_destroy_ring;
} }
set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
return 0; return 0;
out_destroy_ring: out_destroy_ring:
...@@ -529,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -529,6 +543,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
out_destroy_cm_id: out_destroy_cm_id:
rdma_destroy_id(queue->cm_id); rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment